text
stringlengths
78
104k
score
float64
0
0.18
def install_deny_hook(api): """ Install a deny import hook for Qt api. Parameters ---------- api : str The Qt api whose import should be prevented Example ------- >>> install_deny_import("pyqt4") >>> import PyQt4 Traceback (most recent call last):... ImportError: Import of PyQt4 is denied. """ if api == USED_API: raise ValueError sys.meta_path.insert(0, ImportHookDeny(api))
0.002212
def get_named_range(self, name): """ Retrieves a Named range by it's name """ url = self.build_url(self._endpoints.get('get_named_range').format(name=name)) response = self.session.get(url) if not response: return None return self.named_range_constructor(parent=self, **{self._cloud_data_key: response.json()})
0.01105
def _WriteCacheFile(self, cache_filename, scopes): """Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. """ # Credentials metadata dict. creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
0.003501
def set_segmentid_range(self, orchestrator_id, segid_min, segid_max): """set segment id range in DCNM. """ url = self._segmentid_ranges_url payload = {'orchestratorId': orchestrator_id, 'segmentIdRanges': "%s-%s" % (segid_min, segid_max)} res = self._send_request('POST', url, payload, 'segment-id range') if not (res and res.status_code in self._resp_ok): LOG.error("Failed to set segment id range for orchestrator " "%(orch)s on DCNM: %(text)s", {'orch': orchestrator_id, 'text': res.text}) raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res))
0.002911
def validate_checksum( filename, md5sum ): """ Compares the md5 checksum of a file with an expected value. If the calculated and expected checksum values are not equal, ValueError is raised. If the filename `foo` is not found, will try to read a gzipped file named `foo.gz`. In this case, the checksum is calculated for the unzipped file. Args: filename (str): Path for the file to be checksummed. md5sum (str): The expected hex checksum. Returns: None """ filename = match_filename( filename ) md5_hash = file_md5( filename=filename ) if md5_hash != md5sum: raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))
0.015363
def search(self): """ This is the most important method """ try: filters = json.loads(self.query) except ValueError: return False result = self.model_query if 'filter'in filters.keys(): result = self.parse_filter(filters['filter']) if 'sort'in filters.keys(): result = result.order_by(*self.sort(filters['sort'])) return result
0.009238
def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist
0.007067
def gen_tmp_file(i): """ Input: { (suffix) - temp file suffix (prefix) - temp file prefix (remove_dir) - if 'yes', remove dir } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 file_name - temp file name } """ xs=i.get('suffix','') xp=i.get('prefix','') s=i.get('string','') import tempfile fd, fn=tempfile.mkstemp(suffix=xs, prefix=xp) os.close(fd) os.remove(fn) if i.get('remove_dir','')=='yes': fn=os.path.basename(fn) return {'return':0, 'file_name':fn}
0.020053
def write_magic_file(self, custom_name=None, dir_path=".", append=False, multi_type=False, df=None): """ Write self.df out to tab-delimited file. By default will use standard MagIC filenames (specimens.txt, etc.), or you can provide a custom_name to write to instead. By default will write to custom_name if custom_name is a full path, or will write to dir_path + custom_name if custom_name is not a full path. Parameters ---------- self : MagIC DataFrame custom_name : str custom file name dir_path : str dir_path (used if custom_name is not a full path), default "." append : bool append to existing file, default False multi_type : bool for creating upload file Return -------- fname : str output file name """ # don't let custom name start with "./" if custom_name: if custom_name.startswith('.'): custom_name = os.path.split(custom_name)[1] # put columns in logical order (by group) self.sort_dataframe_cols() # if indexing column was put in, remove it if "num" in self.df.columns: self.df = self.df.drop("num", axis=1) # # make sure name is a string name = self.get_singular_and_plural_dtype(self.dtype)[0] if name in self.df.columns: self.df[name] = self.df[name].astype(str) # if df is None: df = self.df # get full file path dir_path = os.path.realpath(dir_path) if custom_name: fname = pmag.resolve_file_name(custom_name, dir_path) # os.path.join(dir_path, custom_name) elif self.magic_file: fname = pmag.resolve_file_name(self.magic_file, dir_path) else: fname = os.path.join(dir_path, self.dtype + ".txt") # see if there's any data if not len(df): print('-W- No data to write to {}'.format(fname)) return False # add to existing file if append: print('-I- appending {} data to {}'.format(self.dtype, fname)) mode = "a" # overwrite existing file elif os.path.exists(fname): print('-I- overwriting {}'.format(fname)) mode = "w" # or create new file else: print('-I- writing {} records to {}'.format(self.dtype, fname)) mode = "w" f = open(fname, mode) if append: header = False if multi_type: header = True f.write('tab\t{}\n'.format(self.dtype)) f.flush() df.to_csv(f, sep="\t", header=header, index=False, mode='a') else: f.write('tab\t{}\n'.format(self.dtype)) f.flush() df.to_csv(f, sep="\t", header=True, index=False, mode='a') print('-I- {} records written to {} file'.format(len(df), self.dtype)) f.close() return fname
0.001599
def _on_completions_refreshed(self, new_completer): """Swap the completer object in cli with the newly created completer. """ with self._completer_lock: self.completer = new_completer # When cli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. if self.cli: self.cli.current_buffer.completer = new_completer if self.cli: # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self.cli.request_redraw()
0.002766
def alias_asset(self, asset_id, alias_id): """Adds an ``Id`` to an ``Asset`` for the purpose of creating compatibility. The primary ``Id`` of the ``Asset`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another asset, it is reassigned to the given asset ``Id``. arg: asset_id (osid.id.Id): the ``Id`` of an ``Asset`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``asset_id`` not found raise: NullArgument - ``asset_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.alias_resources_template self._alias_id(primary_id=asset_id, equivalent_id=alias_id)
0.002828
def d3flare_json(metadata, file=None, **options): """ Converts the *metadata* dictionary of a container or field into a ``flare.json`` formatted string or formatted stream written to the *file* The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic library. The ``flare.json`` format looks like this: .. code-block:: JSON { "class": "class of the field or container", "name": "name of the field or container", "size": "bit size of the field", "value": "value of the field", "children": [] } :param dict metadata: metadata generated from a :class:`Structure`, :class:`Sequence`, :class:`Array` or any :class:`Field` instance. :param file file: file-like object. """ def convert(root): dct = OrderedDict() item_type = root.get('type') dct['class'] = root.get('class') dct['name'] = root.get('name') if item_type is ItemClass.Field.name: dct['size'] = root.get('size') dct['value'] = root.get('value') children = root.get('member') if children: # Any containable class with children dct['children'] = list() if item_type is ItemClass.Pointer.name: # Create pointer address field as child field = OrderedDict() field['class'] = dct['class'] field['name'] = '*' + dct['name'] field['size'] = root.get('size') field['value'] = root.get('value') dct['children'].append(field) for child in map(convert, children): # Recursive function call map(fnc, args). dct['children'].append(child) elif item_type is ItemClass.Pointer.name: # Null pointer (None pointer) dct['size'] = root.get('size') dct['value'] = root.get('value') return dct options['indent'] = options.get('indent', 2) if file: return json.dump(convert(metadata), file, **options) else: return json.dumps(convert(metadata), **options)
0.00091
def authenticate(self): """ Authenticate into the UAA instance as the admin user. """ # Make sure we've stored uri for use predix.config.set_env_value(self.use_class, 'uri', self._get_uri()) self.uaac = predix.security.uaa.UserAccountAuthentication() self.uaac.authenticate('admin', self._get_admin_secret(), use_cache=False) self.is_admin = True
0.007026
def self_if_parameters(func): """ If any parameter is given, the method's binded object is returned after executing the function. Else the function's result is returned. """ @wraps(func) def wrapper(self, *args, **kwargs): result = func(self, *args, **kwargs) if args or kwargs: return self else: return result return wrapper
0.002494
def plot_density( data, group="posterior", data_labels=None, var_names=None, credible_interval=0.94, point_estimate="mean", colors="cycle", outline=True, hpd_markers="", shade=0.0, bw=4.5, figsize=None, textsize=None, ): """Generate KDE plots for continuous variables and histograms for discrete ones. Plots are truncated at their 100*(1-alpha)% credible intervals. Plots are grouped per variable and colors assigned to models. Parameters ---------- data : Union[Object, Iterator[Object]] Any object that can be converted to an az.InferenceData object, or an Iterator returning a sequence of such objects. Refer to documentation of az.convert_to_dataset for details about such objects. group: Optional[str] Specifies which InferenceData group should be plotted. Defaults to 'posterior'. Alternative values include 'prior' and any other strings used as dataset keys in the InferenceData. data_labels : Optional[List[str]] List with names for the datasets passed as "data." Useful when plotting more than one dataset. Must be the same shape as the data parameter. Defaults to None. var_names: Optional[List[str]] List of variables to plot. If multiple datasets are supplied and var_names is not None, will print the same set of variables for each dataset. Defaults to None, which results in all the variables being plotted. credible_interval : float Credible intervals. Should be in the interval (0, 1]. Defaults to 0.94. point_estimate : Optional[str] Plot point estimate per variable. Values should be 'mean', 'median' or None. Defaults to 'mean'. colors : Optional[Union[List[str],str]] List with valid matplotlib colors, one color per model. Alternative a string can be passed. If the string is `cycle`, it will automatically choose a color per model from matplolib's cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all models. Defaults to `cycle`. outline : bool Use a line to draw KDEs and histograms. Default to True hpd_markers : str A valid `matplotlib.markers` like 'v', used to indicate the limits of the hpd interval. Defaults to empty string (no marker). shade : Optional[float] Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1 (opaque). Defaults to 0. bw : Optional[float] Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule of thumb (the default rule used by SciPy). figsize : Optional[Tuple[int, int]] Figure size. If None it will be defined automatically. textsize: Optional[float] Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. Returns ------- ax : Matplotlib axes Examples -------- Plot default density plot .. plot:: :context: close-figs >>> import arviz as az >>> centered = az.load_arviz_data('centered_eight') >>> non_centered = az.load_arviz_data('non_centered_eight') >>> az.plot_density([centered, non_centered]) Plot subset variables by specifying variable name exactly .. plot:: :context: close-figs >>> az.plot_density([centered, non_centered], var_names=["mu"]) Plot a specific `az.InferenceData` group .. plot:: :context: close-figs >>> az.plot_density([centered, non_centered], var_names=["mu"], group="prior") Specify credible interval .. plot:: :context: close-figs >>> az.plot_density([centered, non_centered], var_names=["mu"], credible_interval=.5) Shade plots and/or remove outlines .. plot:: :context: close-figs >>> az.plot_density([centered, non_centered], var_names=["mu"], outline=False, shade=.8) Specify binwidth for kernel density estimation .. plot:: :context: close-figs >>> az.plot_density([centered, non_centered], var_names=["mu"], bw=.9) """ if not isinstance(data, (list, tuple)): datasets = [convert_to_dataset(data, group=group)] else: datasets = [convert_to_dataset(datum, group=group) for datum in data] var_names = _var_names(var_names, datasets) if point_estimate not in ("mean", "median", None): raise ValueError( "Point estimate should be 'mean'," "median' or None, not {}".format(point_estimate) ) n_data = len(datasets) if data_labels is None: if n_data > 1: data_labels = ["{}".format(idx) for idx in range(n_data)] else: data_labels = [""] elif len(data_labels) != n_data: raise ValueError( "The number of names for the models ({}) " "does not match the number of models ({})".format(len(data_labels), n_data) ) if colors == "cycle": colors = ["C{}".format(idx % 10) for idx in range(n_data)] elif isinstance(colors, str): colors = [colors for _ in range(n_data)] if not 1 >= credible_interval > 0: raise ValueError("The value of credible_interval should be in the interval (0, 1]") to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets] all_labels = [] length_plotters = [] for plotters in to_plot: length_plotters.append(len(plotters)) for var_name, selection, _ in plotters: label = make_label(var_name, selection) if label not in all_labels: all_labels.append(label) length_plotters = max(length_plotters) rows, cols = default_grid(length_plotters, max_cols=3) (figsize, _, titlesize, xt_labelsize, linewidth, markersize) = _scale_fig_size( figsize, textsize, rows, cols ) _, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False) axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())} for m_idx, plotters in enumerate(to_plot): for var_name, selection, values in plotters: label = make_label(var_name, selection) _d_helper( values.flatten(), label, colors[m_idx], bw, titlesize, xt_labelsize, linewidth, markersize, credible_interval, point_estimate, hpd_markers, outline, shade, axis_map[label], ) if n_data > 1: for m_idx, label in enumerate(data_labels): ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize) ax[0].legend(fontsize=xt_labelsize) return ax
0.004104
def setup_logging(self): """Setup logging module based on known modules in the config file """ logging.getLogger('amqp').setLevel(str_to_logging(self.get('logging', 'amqp'))) logging.getLogger('rdflib').setLevel(str_to_logging(self.get('logging', 'rdflib')))
0.013793
def main(): """ NAME umich_magic.py DESCRIPTION converts UMICH .mag format files to magic_measurements format files SYNTAX umich_magic.py [command line options] OPTIONS -h: prints the help message and quits. -usr USER: identify user, default is "" -f FILE: specify .mag format input file, required -fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -F FILE: specify output file, default is magic_measurements.txt -spc NUM : specify number of characters to designate a specimen, default = 0 -loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic -ncn NCON: specify naming convention: default is #1 below -A: don't average replicate measurements Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail [email protected] for help. Format of UMICH .mag files: Spec Treat CSD Intensity Declination Inclination metadata string Spec: specimen name Treat: treatment step XXX T in Centigrade XXX AF in mT Intensity assumed to be total moment in 10^3 Am^2 (emu) Declination: Declination in specimen coordinate system Inclination: Declination in specimen coordinate system metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS hh in 24 hours. dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively xx.xxx DC field UNITS of DC field (microT, mT) INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes, measured in four positions) NMEAS: number of measurements in a single position (1,3,200...) """ # initialize some stuff dir_path='.' infile_type="mag" noave=0 methcode,inst="","" phi,theta,peakfield,labfield=0,0,0,0 pTRM,MD,samp_con,Z=0,0,'1',1 missing=1 demag="N" er_location_name="" citation='This study' args=sys.argv methcode="LP-NO" samp_file,ErSamps='',[] specnum=0 # # get command line arguments # meas_file="magic_measurements.txt" user="" if '-WD' in args: ind=args.index("-WD") dir_path=args[ind+1] if "-h" in args: print(main.__doc__) sys.exit() if "-usr" in args: ind=args.index("-usr") user=args[ind+1] if '-F' in args: ind=args.index("-F") meas_file=dir_path+'/'+args[ind+1] if '-f' in args: ind=args.index("-f") magfile=dir_path+'/'+args[ind+1] try: input=open(magfile,'r') except: print("bad mag file name") sys.exit() else: print("mag_file field is required option") print(main.__doc__) sys.exit() if "-spc" in args: ind=args.index("-spc") specnum=int(args[ind+1]) if specnum!=0:specnum=-specnum if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-fsa" in args: ind=args.index("-fsa") samp_file=dir_path+'/'+args[ind+1] Samps,file_type=pmag.magic_read(samp_file) if "-A" in args: noave=1 if "-ncn" in args: ind=args.index("-ncn") samp_con=sys.argv[ind+1] if "4" in samp_con: if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") sys.exit() else: Z=samp_con.split("-")[1] samp_con="4" samp_con=sys.argv[ind+1] if "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") sys.exit() else: Z=samp_con.split("-")[1] samp_con="7" MagRecs,specs=[],[] version_num=pmag.get_version() if infile_type=="mag": for line in input.readlines(): instcode="" if len(line)>2: MagRec={} MagRec['er_location_name']=er_location_name MagRec['magic_software_packages']=version_num MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["treatment_ac_field"]='0' MagRec["treatment_dc_field"]='0' MagRec["treatment_dc_field_phi"]='0' MagRec["treatment_dc_field_theta"]='0' meas_type="LT-NO" rec=line.split() labfield=0 code1=rec[6].split(';') date=code1[0].split('/') # break date into mon/day/year yy=int(date[2]) if yy <90: yyyy=str(2000+yy) else: yyyy=str(1900+yy) mm=int(date[0]) if mm<10: mm="0"+str(mm) else: mm=str(mm) dd=int(date[1]) if dd<10: dd="0"+str(dd) else: dd=str(dd) time=code1[1].split(':') hh=int(time[0]) if hh<10: hh="0"+str(hh) else: hh=str(hh) min=int(time[1]) if min<10: min= "0"+str(min) else: min=str(min) MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00" MagRec["measurement_time_zone"]='' instcode='' if len(code1)>1: MagRec["measurement_positions"]=code1[6][2] else: MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different if user=="":user=code1[5] if code1[2][-1]=='C': demag="T" if code1[2]=='mT': demag="AF" treat=rec[1].split('.') if len(treat)==1:treat.append('0') if demag=='T' and treat!=0: meas_type="LT-T-Z" MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin if demag=="AF": meas_type="LT-AF-Z" MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # Af field in T MagRec["treatment_dc_field"]='0' MagRec["er_specimen_name"]=rec[0] if rec[0] not in specs:specs.append(rec[0]) # get a list of specimen names experiment=rec[0]+":" MagRec["er_site_name"]="" if specnum!=0: MagRec["er_sample_name"]=rec[0][:specnum] else: MagRec["er_sample_name"]=rec[0] if "-fsa" in args: for samp in Samps: if samp["er_sample_name"] == MagRec["er_sample_name"]: MagRec["er_location_name"]=samp["er_location_name"] MagRec["er_site_name"]=samp["er_site_name"] break elif int(samp_con)!=6: site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z) MagRec["er_site_name"]=site if MagRec['er_site_name']=="": print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name']) if MagRec["er_location_name"]=="": print('no location name for: ',MagRec["er_specimen_name"]) if rec[1]==".00":rec[1]="0.00" MagRec["measurement_csd"]=rec[2] MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu) MagRec["measurement_dec"]=rec[4] MagRec["measurement_inc"]=rec[5] MagRec["magic_instrument_codes"]=instcode MagRec["er_analyst_mail_names"]=user MagRec["er_citation_names"]=citation MagRec["magic_method_codes"]=meas_type MagRec["measurement_flag"]='g' MagRec["er_specimen_name"]=rec[0] MagRec["measurement_number"]='1' MagRecs.append(MagRec) MagOuts=[] for spec in specs: # gather all demag types for this specimen SpecRecs,meths,measnum=[],[],1 for rec in MagRecs: if rec['er_specimen_name']==spec: rec['measurement_number']=str(measnum) measnum+=1 if rec['magic_method_codes'] not in meths:meths.append(rec['magic_method_codes']) SpecRecs.append(rec) expname=spec if "LT-AF-Z" in meths:expname=expname+ ':LP-DIR-AF' if "LT-T-Z" in meths:expname=expname+ ':LP-DIR-T' for rec in SpecRecs: rec['magic_experiment_name']=expname MagOuts.append(rec) pmag.magic_write(meas_file,MagOuts,'magic_measurements') print("results put in ",meas_file)
0.023481
def read_stats(self): """ :return: dictionary {group name {stat name: value}}. Sea XenaTpld.stats_captions. """ stats_with_captions = OrderedDict() for stat_name in self.stats_captions.keys(): stats_with_captions[stat_name] = self.read_stat(self.stats_captions[stat_name], stat_name) return stats_with_captions
0.007833
def all_origins(m): ''' Generate all unique statement origins in the given model ''' seen = set() for link in m.match(): origin = link[ORIGIN] if origin not in seen: seen.add(origin) yield origin
0.003922
def create_qgis_template_output(output_path, layout): """Produce QGIS Template output. :param output_path: The output path. :type output_path: str :param composition: QGIS Composition object to get template. values :type composition: qgis.core.QgsLayout :return: Generated output path. :rtype: str """ # make sure directory is created dirname = os.path.dirname(output_path) if not os.path.exists(dirname): os.makedirs(dirname) context = QgsReadWriteContext() context.setPathResolver(QgsProject.instance().pathResolver()) layout.saveAsTemplate(output_path, context) return output_path
0.001506
def _compute_diplomacy(self): """Compute diplomacy.""" self._diplomacy = { 'teams': self.teams(), 'ffa': len(self.teams()) == (self._player_num + self._computer_num and self._player_num + self._computer_num > 2), 'TG': len(self.teams()) == 2 and self._player_num + self._computer_num > 2, '1v1': self._player_num + self._computer_num == 2, } self._diplomacy['type'] = 'unknown' if self._diplomacy['ffa']: self._diplomacy['type'] = 'ffa' if self._diplomacy['TG']: self._diplomacy['type'] = 'TG' size = len(self.teams()[0]['player_numbers']) self._diplomacy['team_size'] = '{}v{}'.format(size, size) if self._diplomacy['1v1']: self._diplomacy['type'] = '1v1'
0.0058
def adjust_all_to_360(dictionary): """ Take a dictionary and check each key/value pair. If this key is of type: declination/longitude/azimuth/direction, adjust it to be within 0-360 as required by the MagIC data model """ for key in dictionary: dictionary[key] = adjust_to_360(dictionary[key], key) return dictionary
0.002841
def _rsaes_pkcs1_v1_5_decrypt(self, C): """ Implements RSAES-PKCS1-V1_5-DECRYPT() function described in section 7.2.2 of RFC 3447. Input: C: ciphertext to be decrypted, an octet string of length k, where k is the length in octets of the RSA modulus n. Output: an octet string of length k at most k - 11 on error, None is returned. """ # 1) Length checking cLen = len(C) k = self.modulusLen / 8 if cLen != k or k < 11: warning("Key._rsaes_pkcs1_v1_5_decrypt() decryption error " "(cLen != k or k < 11)") return None # 2) RSA decryption c = pkcs_os2ip(C) # 2.a) m = self._rsadp(c) # 2.b) EM = pkcs_i2osp(m, k) # 2.c) # 3) EME-PKCS1-v1_5 decoding # I am aware of the note at the end of 7.2.2 regarding error # conditions reporting but the one provided below are for _local_ # debugging purposes. --arno if EM[0] != '\x00': warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error " "(first byte is not 0x00)") return None if EM[1] != '\x02': warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error " "(second byte is not 0x02)") return None tmp = EM[2:].split('\x00', 1) if len(tmp) != 2: warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error " "(no 0x00 to separate PS from M)") return None PS, M = tmp if len(PS) < 8: warning("Key._rsaes_pkcs1_v1_5_decrypt(): decryption error " "(PS is less than 8 byte long)") return None return M
0.002099
def create(fs, channels): """Allocates and initializes a decoder state""" result_code = ctypes.c_int() result = _create(fs, channels, ctypes.byref(result_code)) if result_code.value is not 0: raise OpusError(result_code.value) return result
0.00369
def _encrypt_message(self, msg, nonce, timestamp=None): """将公众号回复用户的消息加密打包 :param msg: 待回复用户的消息,xml格式的字符串 :param nonce: 随机串,可以自己生成,也可以用URL参数的nonce :param timestamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间 :return: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串 """ xml = """<xml> <Encrypt><![CDATA[{encrypt}]]></Encrypt> <MsgSignature><![CDATA[{signature}]]></MsgSignature> <TimeStamp>{timestamp}</TimeStamp> <Nonce><![CDATA[{nonce}]]></Nonce> </xml>""" nonce = to_binary(nonce) timestamp = to_binary(timestamp) or to_binary(int(time.time())) encrypt = self.__pc.encrypt(to_text(msg), self.__id) # 生成安全签名 signature = get_sha1_signature(self.__token, timestamp, nonce, encrypt) return to_text(xml.format( encrypt=to_text(encrypt), signature=to_text(signature), timestamp=to_text(timestamp), nonce=to_text(nonce) ))
0.002994
def generate_certificate( ctx, slot, management_key, pin, public_key, subject, valid_days): """ Generate a self-signed X.509 certificate. A self-signed certificate is generated and written to one of the slots on the YubiKey. A private key need to exist in the slot. \b SLOT PIV slot where private key is stored. PUBLIC-KEY File containing a public key. Use '-' to use stdin. """ controller = ctx.obj['controller'] _ensure_authenticated( ctx, controller, pin, management_key, require_pin_and_key=True) data = public_key.read() public_key = serialization.load_pem_public_key( data, default_backend()) now = datetime.datetime.now() valid_to = now + datetime.timedelta(days=valid_days) try: controller.generate_self_signed_certificate( slot, public_key, subject, now, valid_to, touch_callback=prompt_for_touch) except APDUError as e: logger.error('Failed to generate certificate for slot %s', slot, exc_info=e) ctx.fail('Certificate generation failed.')
0.000887
def get_share(self, sharename): """ Get a specific share. Does not require authentication. Input: * A sharename Output: * A :py:mod:`pygett.shares.GettShare` object Example:: share = client.get_share("4ddfds") """ response = GettRequest().get("/shares/%s" % sharename) if response.http_status == 200: return GettShare(self.user, **response.response)
0.004283
def dir_list(self, tgt_env): ''' Get a list of directories for the target environment using pygit2 ''' def _traverse(tree, blobs, prefix): ''' Traverse through a pygit2 Tree object recursively, accumulating all the empty directories within it in the "blobs" list ''' for entry in iter(tree): if entry.oid not in self.repo: # Entry is a submodule, skip it continue blob = self.repo[entry.oid] if not isinstance(blob, pygit2.Tree): continue blobs.append( salt.utils.path.join(prefix, entry.name, use_posixpath=True) ) if blob: _traverse( blob, blobs, salt.utils.path.join( prefix, entry.name, use_posixpath=True) ) ret = set() tree = self.get_tree(tgt_env) if not tree: return ret if self.root(tgt_env): try: oid = tree[self.root(tgt_env)].oid tree = self.repo[oid] except KeyError: return ret if not isinstance(tree, pygit2.Tree): return ret relpath = lambda path: os.path.relpath(path, self.root(tgt_env)) else: relpath = lambda path: path blobs = [] if tree: _traverse(tree, blobs, self.root(tgt_env)) add_mountpoint = lambda path: salt.utils.path.join( self.mountpoint(tgt_env), path, use_posixpath=True) for blob in blobs: ret.add(add_mountpoint(relpath(blob))) if self.mountpoint(tgt_env): ret.add(self.mountpoint(tgt_env)) return ret
0.003207
def items_detail_get(self, num_iids, fields=[], **kwargs): '''taobao.taobaoke.items.detail.get 查询淘宝客推广商品详细信息 查询淘宝客推广商品详细信息''' request = TOPRequest('taobao.taobaoke.items.detail.get') request['num_iids'] = num_iids if not fields: taobaokeItem = TaobaokeItem() fields = taobaokeItem.fields request['fields'] = fields for k, v in kwargs.iteritems(): if k not in ('nick', 'outer_code', 'pid') and v==None: continue request[k] = v self.create(self.execute(request), fields=['taobaoke_item_details', 'total_results'], models={'taobaoke_item_details':TaobaokeItemDetail}) return self.taobaoke_item_details
0.011004
def update(self, new_data: Dict[Text, Dict[Text, Text]]): """ Receive an update from a loader. :param new_data: New translation data from the loader """ for locale, data in new_data.items(): if locale not in self.dict: self.dict[locale] = {} self.dict[locale].update(data)
0.005634
def RB_bias(data, pars, ita=None, acf=None): """ Calculate the expected bias on each of the parameters in the model pars. Only parameters that are allowed to vary will have a bias. Calculation follows the description of Refrieger & Brown 1998 (cite). Parameters ---------- data : 2d-array data that was fit pars : lmfit.Parameters The model ita : 2d-array The ita matrix (optional). acf : 2d-array The acf for the data. Returns ------- bias : array The bias on each of the parameters """ log.info("data {0}".format(data.shape)) nparams = np.sum([pars[k].vary for k in pars.keys() if k != 'components']) # masked pixels xm, ym = np.where(np.isfinite(data)) # all pixels x, y = np.indices(data.shape) # Create the jacobian as an AxN array accounting for the masked pixels j = np.array(np.vsplit(lmfit_jacobian(pars, xm, ym).T, nparams)).reshape(nparams, -1) h = hessian(pars, x, y) # mask the hessian to be AxAxN array h = h[:, :, xm, ym] Hij = np.einsum('ik,jk', j, j) Dij = np.linalg.inv(Hij) Bijk = np.einsum('ip,jkp', j, h) Eilkm = np.einsum('il,km', Dij, Dij) Cimn_1 = -1 * np.einsum('krj,ir,km,jn', Bijk, Dij, Dij, Dij) Cimn_2 = -1./2 * np.einsum('rkj,ir,km,jn', Bijk, Dij, Dij, Dij) Cimn = Cimn_1 + Cimn_2 if ita is None: # N is the noise (data-model) N = data - ntwodgaussian_lmfit(pars)(x, y) if acf is None: acf = nan_acf(N) ita = make_ita(N, acf=acf) log.info('acf.shape {0}'.format(acf.shape)) log.info('acf[0] {0}'.format(acf[0])) log.info('ita.shape {0}'.format(ita.shape)) log.info('ita[0] {0}'.format(ita[0])) # Included for completeness but not required # now mask/ravel the noise # N = N[np.isfinite(N)].ravel() # Pi = np.einsum('ip,p', j, N) # Qij = np.einsum('ijp,p', h, N) Vij = np.einsum('ip,jq,pq', j, j, ita) Uijk = np.einsum('ip,jkq,pq', j, h, ita) bias_1 = np.einsum('imn, mn', Cimn, Vij) bias_2 = np.einsum('ilkm, mlk', Eilkm, Uijk) bias = bias_1 + bias_2 log.info('bias {0}'.format(bias)) return bias
0.00134
def handle_args(): """ Default values are defined here. """ default_database_name = os.environ.get( 'VOEVENTDB_DBNAME', dbconfig.testdb_corpus_url.database) default_logfile_path = os.path.expanduser("~/voeventdb_packet_ingest.log") parser = argparse.ArgumentParser( prog=os.path.basename(__file__), formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.description = """ Ingest a packet from stdin and attempt to ingest into a voeventdb database. Usage: cat test.xml | voeventdb_ingest_packet.py -d mydb -l /tmp/my.log """ parser.add_argument('-d', '--dbname', nargs='?', default=str(default_database_name), help='Database name') parser.add_argument('-l', '--logfile_path', nargs='?', default=default_logfile_path, ) return parser.parse_args()
0.001055
def calls_sorted(self): """ calls sorted in z """ def _z(call): if isinstance(call.z.value, np.ndarray): return np.mean(call.z.value.flatten()) elif isinstance(call.z.value, float) or isinstance(call.z.value, int): return call.z.value else: # put it at the back return -np.inf calls = self._calls zs = np.array([_z(c) for c in calls]) sorted_inds = zs.argsort() # TODO: ugh, this is ugly. Test to find the optimal way to sort # while still ending up with a list return _call.make_callgroup(np.array(calls)[sorted_inds].tolist())
0.004225
def shrink(self, index, target, body=None, params=None): """ The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_ :arg index: The name of the source index to shrink :arg target: The name of the target index to shrink into :arg body: The configuration for the target index (`settings` and `aliases`) :arg master_timeout: Specify timeout for connection to master :arg request_timeout: Explicit operation timeout :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ for param in (index, target): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path(index, "_shrink", target), params=params, body=body )
0.002478
def get_result(self): """ Get the result of this transfer. """ while self._result is None: if len(self.daplink._commands_to_read) > 0: self.daplink._read_packet() else: assert not self.daplink._crnt_cmd.get_empty() self.daplink.flush() if self._error is not None: # Pylint is confused and thinks self._error is None # since that is what it is initialized to. # Supress warnings for this. # pylint: disable=raising-bad-type raise self._error assert self._result is not None return self._result
0.002933
def hashstr(data, hashlen=HASH_LEN, alphabet=ALPHABET): """ python -c "import utool as ut; print(ut.hashstr('abcd'))" Args: data (hashable): hashlen (int): (default = 16) alphabet (list): list of characters: Returns: str: hashstr CommandLine: python -m utool.util_hash --test-hashstr python3 -m utool.util_hash --test-hashstr python3 -m utool.util_hash --test-hashstr:2 python -m utool.util_hash hashstr:3 python3 -m utool.util_hash hashstr:3 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> data = 'foobar' >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = mi5yum60mbxhyp+x Example1: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> data = '' >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = 0000000000000000 Example2: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> data = np.array([1, 2, 3]) >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr(data, hashlen, alphabet) >>> result = ('text = %s' % (str(text),)) >>> print(result) text = z5lqw0bzt4dmb9yy Example2: >>> # ENABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> from uuid import UUID >>> data = (UUID('7cd0197b-1394-9d16-b1eb-0d8d7a60aedc'), UUID('c76b54a5-adb6-7f16-f0fb-190ab99409f8')) >>> hashlen = 16 >>> alphabet = ALPHABET_41 >>> text = hashstr_arr(data, 'label') >>> result = ('text = %s' % (str(text),)) >>> print(result) Example3: >>> # DISABLE_DOCTEST >>> # UNSTABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> import numpy as np >>> data = np.array(['a', 'b'], dtype=object) >>> text = hashstr(data, alphabet=ALPHABET_27) >>> result = ('text = %s' % (str(text),)) >>> print(result) Ignore: data = np.array(['a', 'b'], dtype=object) data.tobytes() data = np.array(['a', 'b']) data = ['a', 'b'] data = np.array([1, 2, 3]) import hashlib from six.moves import cPickle as pickle pickle.dumps(data, protocol=2) python2 -c "import hashlib, numpy; print(hashlib.sha1('ab').hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1('ab'.encode('utf8')).hexdigest())" python2 -c "import hashlib, numpy; print(hashlib.sha1('ab').hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(b'ab').hexdigest())" python2 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([1, 2])).hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([1, 2])).hexdigest())" # TODO: numpy arrays of strings must be encoded to bytes first in python3 python2 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'])).hexdigest())" python3 -c "import hashlib, numpy; print(hashlib.sha1(numpy.array([b'a', b'b'])).hexdigest())" python -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'], dtype=object)).hexdigest())" python -c "import hashlib, numpy; print(hashlib.sha1(numpy.array(['a', 'b'], dtype=object)).hexdigest())" """ if util_type.HAVE_NUMPY and isinstance(data, np.ndarray): if data.dtype.kind == 'O': msg = '[ut] hashing ndarrays with dtype=object is unstable' warnings.warn(msg, RuntimeWarning) # but tobytes is ok, but differs between python 2 and 3 for objects data = data.dumps() # data = data.tobytes() if isinstance(data, tuple): # should instead do if False: hasher = hashlib.sha512() items = data for item in items: if isinstance(item, uuid.UUID): hasher.update(item.bytes) else: hasher.update(item) text = hasher.hexdigest() hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet)) # Truncate text = hashstr2[:hashlen] return text else: msg = '[ut] hashing tuples with repr is not a good idea. FIXME' # warnings.warn(msg, RuntimeWarning) data = repr(data) # Hack? # convert unicode into raw bytes if isinstance(data, six.text_type): data = data.encode('utf-8') if isinstance(data, stringlike) and len(data) == 0: # Make a special hash for empty data text = (alphabet[0] * hashlen) else: # Get a 128 character hex string text = hashlib.sha512(data).hexdigest() # Shorten length of string (by increasing base) hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet)) # Truncate text = hashstr2[:hashlen] return text
0.000556
def get_quantity_info(self, quantity, key=None, default=None): """ Get information of a certain quantity. If *key* is `None`, return the full dict for that quantity. """ d = self._get_quantity_info_dict(quantity, default if key is None else dict()) if key is None: return d return d.get(key, default)
0.008108
def sub(self, repl): """ Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. Example:: make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") print(make_html.transformString("h1:main title:")) # prints "<h1>main title</h1>" """ if self.asGroupList: warnings.warn("cannot use sub() with Regex(asGroupList=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch: def pa(tokens): return tokens[0].expand(repl) else: def pa(tokens): return self.re.sub(repl, tokens[0]) return self.addParseAction(pa)
0.006524
def from_pandas(cls, index): """Create baloo Index from pandas Index. Parameters ---------- index : pandas.base.Index Returns ------- Index """ from pandas import Index as PandasIndex check_type(index, PandasIndex) return Index(index.values, index.dtype, index.name)
0.005
def _connect(self): """Connect to FreeSWITCH ESL Interface.""" try: self._eslconn = ESL.ESLconnection(self._eslhost, str(self._eslport), self._eslpass) except: pass if not self._eslconn.connected(): raise Exception( "Connection to FreeSWITCH ESL Interface on host %s and port %d failed." % (self._eslhost, self._eslport) )
0.011278
def async_callback(self, callback, *args, **kwargs): """Obsolete - catches exceptions from the wrapped function. This function is unnecessary since Tornado 1.1. """ if callback is None: return None if args or kwargs: callback = functools.partial(callback, *args, **kwargs) #FIXME what about the exception wrapper? return callback
0.007282
def _get_query_params(query): """ >>> _get_query_params({'query': {'a': 1, 'c': 3, 'b': 5}}) 'a=1 b=5 c=3' """ query_params = OrderedDict(sorted(query['query'].items())) return ' '.join(['{}={}'.format(k, v) for k, v in query_params.items()])
0.003759
def save_initial_state(self): """Save initial cursors and initial active widget.""" paths = self.paths self.initial_widget = self.get_widget() self.initial_cursors = {} for i, editor in enumerate(self.widgets): if editor is self.initial_widget: self.initial_path = paths[i] # This try is needed to make the fileswitcher work with # plugins that does not have a textCursor. try: self.initial_cursors[paths[i]] = editor.textCursor() except AttributeError: pass
0.004934
def WaitUntilComplete(self,poll_freq=2,timeout=None): """Poll until all request objects have completed. If status is 'notStarted' or 'executing' continue polling. If status is 'succeeded' then success Else log as error poll_freq option is in seconds Returns an Int the number of unsuccessful requests. This behavior is subject to change. >>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').PowerOn().WaitUntilComplete() 0 """ start_time = time.time() while len(self.requests): cur_requests = [] for request in self.requests: status = request.Status() if status in ('notStarted','executing','resumed','queued','running'): cur_requests.append(request) elif status == 'succeeded': self.success_requests.append(request) elif status in ("failed", "unknown"): self.error_requests.append(request) self.requests = cur_requests if self.requests > 0 and clc.v2.time_utils.TimeoutExpired(start_time, timeout): raise clc.RequestTimeoutException('Timeout waiting for Requests: {0}'.format(self.requests[0].id), self.requests[0].Status()) time.sleep(poll_freq) # alternately - sleep for the delta between start time and 2s # Is this the best approach? Non-zero indicates some error. Exception seems the wrong approach for # a partial failure return(len(self.error_requests))
0.032823
def namedb_state_mutation_sanity_check( opcode, op_data ): """ Make sure all mutate fields for this operation are present. Return True if so Raise exception if not """ # sanity check: each mutate field in the operation must be defined in op_data, even if it's null. missing = [] mutate_fields = op_get_mutate_fields( opcode ) for field in mutate_fields: if field not in op_data.keys(): missing.append( field ) assert len(missing) == 0, ("BUG: operation '%s' is missing the following fields: %s" % (opcode, ",".join(missing))) return True
0.014901
def _load_from_native_memory(self, addr, data_type=None, data_size=None, no_of_elements=1, return_as_list=False): """ Load from native memory. :param addr: Native load address. :param data_type: Java type of elements. If set, all loaded elements are casted to this type. :param data_size: Size of each element. If not set, size is determined based on the given type. :param no_of_elements: Number of elements to load. :param return_as_list: Whether to wrap a single element in a list. :return: The value or a list of loaded element(s). """ # check if addr is symbolic if addr is not None and self.state.solver.symbolic(addr): raise NotImplementedError('Symbolic addresses are not supported.') # if data size is not set, derive it from the type if not data_size: if data_type: data_size = ArchSoot.sizeof[data_type]//8 else: raise ValueError("Cannot determine the data size w/o a type.") native_memory_endness = self.state.arch.memory_endness # load elements values = [] for i in range(no_of_elements): value = self.state.memory.load(addr + i*data_size, size=data_size, endness=native_memory_endness) if data_type: value = self.state.project.simos.cast_primitive(self.state, value=value, to_type=data_type) values.append(value) # return element(s) if no_of_elements == 1 and not return_as_list: return values[0] else: return values
0.004313
def form_valid(self, form, forms): """ Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page. """ if self.object: form.save() for (formobj, linkerfield) in forms: if form != formobj: formobj.save() else: self.object = form.save() for (formobj, linkerfield) in forms: if form != formobj: setattr(formobj.instance, linkerfield, self.object) formobj.save() return HttpResponseRedirect(self.get_success_url())
0.004386
def intersect(self, r): """Restrict self to common area with rectangle r.""" if not len(r) == 4: raise ValueError("bad sequ. length") self.x0, self.y0, self.x1, self.y1 = TOOLS._intersect_rect(self, r) return self
0.007782
def _bind(username, password, anonymous=False, opts=None): ''' Authenticate via an LDAP bind ''' # Get config params; create connection dictionary basedn = _config('basedn', opts=opts) scope = _config('scope', opts=opts) connargs = {} # config params (auth.ldap.*) params = { 'mandatory': ['uri', 'server', 'port', 'starttls', 'tls', 'no_verify', 'anonymous', 'accountattributename', 'activedirectory'], 'additional': ['binddn', 'bindpw', 'filter', 'groupclass', 'auth_by_group_membership_only'], } paramvalues = {} for param in params['mandatory']: paramvalues[param] = _config(param, opts=opts) for param in params['additional']: paramvalues[param] = _config(param, mandatory=False, opts=opts) paramvalues['anonymous'] = anonymous if paramvalues['binddn']: # the binddn can also be composited, e.g. # - {{ username }}@domain.com # - cn={{ username }},ou=users,dc=company,dc=tld # so make sure to render it first before using it paramvalues['binddn'] = _render_template(paramvalues['binddn'], username) paramvalues['binddn'] = ldap.filter.escape_filter_chars(paramvalues['binddn']) if paramvalues['filter']: escaped_username = ldap.filter.escape_filter_chars(username) paramvalues['filter'] = _render_template(paramvalues['filter'], escaped_username) # Only add binddn/bindpw to the connargs when they're set, as they're not # mandatory for initializing the LDAP object, but if they're provided # initially, a bind attempt will be done during the initialization to # validate them if paramvalues['binddn']: connargs['binddn'] = paramvalues['binddn'] if paramvalues['bindpw']: params['mandatory'].append('bindpw') for name in params['mandatory']: connargs[name] = paramvalues[name] if not paramvalues['anonymous']: if paramvalues['binddn'] and paramvalues['bindpw']: # search for the user's DN to be used for the actual authentication _ldap = _LDAPConnection(**connargs).ldap log.debug( 'Running LDAP user dn search with filter:%s, dn:%s, ' 'scope:%s', paramvalues['filter'], basedn, scope ) result = _ldap.search_s(basedn, int(scope), paramvalues['filter']) if not result: log.warning('Unable to find user %s', username) return False elif len(result) > 1: # Active Directory returns something odd. Though we do not # chase referrals (ldap.set_option(ldap.OPT_REFERRALS, 0) above) # it still appears to return several entries for other potential # sources for a match. All these sources have None for the # CN (ldap array return items are tuples: (cn, ldap entry)) # But the actual CNs are at the front of the list. # So with some list comprehension magic, extract the first tuple # entry from all the results, create a list from those, # and count the ones that are not None. If that total is more than one # we need to error out because the ldap filter isn't narrow enough. cns = [tup[0] for tup in result] total_not_none = sum(1 for c in cns if c is not None) if total_not_none > 1: log.error('LDAP lookup found multiple results for user %s', username) return False elif total_not_none == 0: log.error('LDAP lookup--unable to find CN matching user %s', username) return False connargs['binddn'] = result[0][0] if paramvalues['binddn'] and not paramvalues['bindpw']: connargs['binddn'] = paramvalues['binddn'] elif paramvalues['binddn'] and not paramvalues['bindpw']: connargs['binddn'] = paramvalues['binddn'] # Update connection dictionary with the user's password connargs['bindpw'] = password # Attempt bind with user dn and password if paramvalues['anonymous']: log.debug('Attempting anonymous LDAP bind') else: log.debug('Attempting LDAP bind with user dn: %s', connargs['binddn']) try: ldap_conn = _LDAPConnection(**connargs).ldap except Exception: connargs.pop('bindpw', None) # Don't log the password log.error('Failed to authenticate user dn via LDAP: %s', connargs) log.debug('Error authenticating user dn via LDAP:', exc_info=True) return False log.debug('Successfully authenticated user dn via LDAP: %s', connargs['binddn']) return ldap_conn
0.002468
def textfsm_extractor(cls, template_name, raw_text): """ Applies a TextFSM template over a raw text and return the matching table. Main usage of this method will be to extract data form a non-structured output from a network device and return the values in a table format. :param cls: Instance of the driver class :param template_name: Specifies the name of the template to be used :param raw_text: Text output as the devices prompts on the CLI :return: table-like list of entries """ textfsm_data = list() fsm_handler = None for c in cls.__class__.mro(): if c is object: continue current_dir = os.path.dirname( os.path.abspath(sys.modules[c.__module__].__file__) ) template_dir_path = "{current_dir}/utils/textfsm_templates".format( current_dir=current_dir ) template_path = "{template_dir_path}/{template_name}.tpl".format( template_dir_path=template_dir_path, template_name=template_name ) try: with open(template_path) as f: fsm_handler = textfsm.TextFSM(f) for obj in fsm_handler.ParseText(raw_text): entry = {} for index, entry_value in enumerate(obj): entry[fsm_handler.header[index].lower()] = entry_value textfsm_data.append(entry) return textfsm_data except IOError: # Template not present in this class continue # Continue up the MRO except textfsm.TextFSMTemplateError as tfte: raise napalm.base.exceptions.TemplateRenderException( "Wrong format of TextFSM template {template_name}: {error}".format( template_name=template_name, error=py23_compat.text_type(tfte) ) ) raise napalm.base.exceptions.TemplateNotImplemented( "TextFSM template {template_name}.tpl is not defined under {path}".format( template_name=template_name, path=template_dir_path ) )
0.002365
def _check_medimg(image, make_it_3d=True): """Check that image is a proper img. Turn filenames into objects. Parameters ---------- image: img-like object or str Can either be: - a file path to a medical image file, e.g. NifTI, .mhd/raw, .mha - any object with get_data() method and affine & header attributes, e.g., nibabel.Nifti1Image. - a Numpy array, which will be wrapped by a nibabel.Nifti2Image class with an `eye` affine. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. make_it_3d: boolean, optional If True, check if the image is a 3D image and raise an error if not. Returns ------- result: nifti-like result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. """ if isinstance(image, string_types): # a filename, load it img = open_volume_file(image) if make_it_3d: img = _make_it_3d(img) return img elif isinstance(image, np.array): return nib.Nifti2Image(image, affine=np.eye(image.ndim + 1)) elif isinstance(image, nib.Nifti1Image) or is_img(image): return image else: raise TypeError('Data given cannot be converted to a medical image' ' image: this object -"{}"- does not have' ' get_data or get_affine methods'.format(type(image)))
0.001845
def _generateForTokenSecurity(self, username, password, referer=None, tokenUrl=None, expiration=None, proxy_url=None, proxy_port=None): """ generates a token for a feature service """ if referer is None: referer = self._referer_url if tokenUrl is None: tokenUrl = self._token_url query_dict = {'username': self._username, 'password': self._password, 'expiration': str(_defaultTokenExpiration), 'referer': referer, 'f': 'json'} if expiration is not None: query_dict['expiration'] = str(expiration) self._token_created_on = datetime.datetime.now() token = self._post(url=tokenUrl, param_dict=query_dict, securityHandler=None, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'error' in token: self._token = None return token self._token_expires_on = datetime.datetime.fromtimestamp(token['expires'] / 1000) - \ datetime.timedelta(seconds=10) if "token" not in token: self._token = None return None else: httpPrefix = self._url if token['ssl'] == True: httpPrefix = self._surl self._token = token['token'] return token['token'], httpPrefix
0.009748
def _QA_data_stock_to_fq(bfq_data, xdxr_data, fqtype): '使用数据库数据进行复权' info = xdxr_data.query('category==1') bfq_data = bfq_data.assign(if_trade=1) if len(info) > 0: data = pd.concat( [ bfq_data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['category']] ], axis=1 ) data['if_trade'].fillna(value=0, inplace=True) data = data.fillna(method='ffill') data = pd.concat( [ data, info.loc[bfq_data.index[0]:bfq_data.index[-1], ['fenhong', 'peigu', 'peigujia', 'songzhuangu']] ], axis=1 ) else: data = pd.concat( [ bfq_data, info. loc[:, ['category', 'fenhong', 'peigu', 'peigujia', 'songzhuangu']] ], axis=1 ) data = data.fillna(0) data['preclose'] = ( data['close'].shift(1) * 10 - data['fenhong'] + data['peigu'] * data['peigujia'] ) / (10 + data['peigu'] + data['songzhuangu']) if fqtype in ['01', 'qfq']: data['adj'] = (data['preclose'].shift(-1) / data['close']).fillna(1)[::-1].cumprod() else: data['adj'] = (data['close'] / data['preclose'].shift(-1)).cumprod().shift(1).fillna(1) for col in ['open', 'high', 'low', 'close', 'preclose']: data[col] = data[col] * data['adj'] data['volume'] = data['volume'] / \ data['adj'] if 'volume' in data.columns else data['vol']/data['adj'] try: data['high_limit'] = data['high_limit'] * data['adj'] data['low_limit'] = data['high_limit'] * data['adj'] except: pass return data.query('if_trade==1 and open != 0').drop( ['fenhong', 'peigu', 'peigujia', 'songzhuangu', 'if_trade', 'category'], axis=1, errors='ignore' )
0.000897
def _extract_from_object(self, selector): """Extracts all values from `self.obj` object addressed with a `selector`. Selector can be a ``slice``, or a singular value extractor in form of a valid dictionary key (hashable object). Object (operated on) can be anything with an itemgetter or attrgetter, including, but limited to `dict`, and `list`. Itemgetter is preferred over attrgetter, except when called as `.key`. If `selector` is a singular value extractor (like a string, integer, etc), a single value (for a given key) is returned if key exists, an empty list if not. If `selector` is a ``slice``, each key from that range is extracted; failing-back, again, to an empty list. """ if isinstance(selector, slice): # we must expand the slice manually, in order to be able to apply to # for example, to mapping types, or general objects # (e.g. slice `4::2` will filter all even numerical keys/attrs >=4) start = selector.start or 0 step = selector.step or 1 if selector.stop is None: if hasattr(self.obj, "keys"): # filter keys by slice keys = \ [k for k in self.obj.keys() if isinstance(k, baseinteger) \ and k >= start and (k - start) % step == 0] elif hasattr(self.obj, "__len__"): # object we slice should have a length (__len__ method), keys = xrange(start, len(self.obj), step) else: # otherwise, we don't know how to slice, so just skip it, # instead of failing keys = [] else: keys = xrange(start, selector.stop, step) else: keys = [selector] res = [] for key in keys: self._append(self.obj, key, res) return res
0.004854
def write_manifest (self): """Write the file list in 'self.filelist' (presumably as filled in by 'add_defaults()' and 'read_template()') to the manifest file named by 'self.manifest'. """ # The manifest must be UTF-8 encodable. See #303. if sys.version_info >= (3,): files = [] for file in self.filelist.files: try: file.encode("utf-8") except UnicodeEncodeError: log.warn("'%s' not UTF-8 encodable -- skipping" % file) else: files.append(file) self.filelist.files = files files = self.filelist.files if os.sep!='/': files = [f.replace(os.sep,'/') for f in files] self.execute(write_file, (self.manifest, files), "writing manifest file '%s'" % self.manifest)
0.005507
def _tilt(c, direction:uniform_int, magnitude:uniform=0, invert=False): "Tilt `c` field with random `direction` and `magnitude`." orig_pts = [[-1,-1], [-1,1], [1,-1], [1,1]] if direction == 0: targ_pts = [[-1,-1], [-1,1], [1,-1-magnitude], [1,1+magnitude]] elif direction == 1: targ_pts = [[-1,-1-magnitude], [-1,1+magnitude], [1,-1], [1,1]] elif direction == 2: targ_pts = [[-1,-1], [-1-magnitude,1], [1,-1], [1+magnitude,1]] elif direction == 3: targ_pts = [[-1-magnitude,-1], [-1,1], [1+magnitude,-1], [1,1]] coeffs = _find_coeffs(targ_pts, _orig_pts) if invert else _find_coeffs(_orig_pts, targ_pts) return _apply_perspective(c, coeffs)
0.050445
def show_welcome_message(self): """Show the welcome message.""" # import here only so that it is AFTER i18n set up from safe.gui.tools.options_dialog import OptionsDialog # Do not show by default show_message = False previous_version = StrictVersion(setting('previous_version')) current_version = StrictVersion(inasafe_version) # Set previous_version to the current inasafe_version set_setting('previous_version', inasafe_version) if setting('always_show_welcome_message', expected_type=bool): # Show if it the setting said so show_message = True elif previous_version < current_version: # Always show if the user installed new version show_message = True # Allow to disable welcome message when running automated tests if os.environ.get('INASAFE_DISABLE_WELCOME_MESSAGE', False): show_message = False if show_message: dialog = OptionsDialog( iface=self.iface, parent=self.iface.mainWindow()) dialog.show_welcome_dialog() if dialog.exec_(): # modal self.dock_widget.read_settings()
0.001608
def setWidth(self, typeID, width): """setWidth(string, double) -> None Sets the width in m of vehicles of this type. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_WIDTH, typeID, width)
0.007692
def sweep_to_proto_dict(sweep: Sweep, repetitions: int=1) -> Dict: """Converts sweep into an equivalent protobuf representation.""" msg = {} # type: Dict if not sweep == UnitSweep: sweep = _to_zip_product(sweep) msg['sweep'] = { 'factors': [_sweep_zip_to_proto_dict(cast(Zip, factor)) for factor in sweep.factors] } msg['repetitions'] = repetitions return msg
0.006818
def fetch_access_token(self, url, verifier=None, **request_kwargs): """Fetch an access token. This is the final step in the OAuth 1 workflow. An access token is obtained using all previously obtained credentials, including the verifier from the authorization step. Note that a previously set verifier will be reset for your convenience, or else signature creation will be incorrect on consecutive requests. >>> access_token_url = 'https://api.twitter.com/oauth/access_token' >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345' >>> oauth_session = OAuth1Session('client-key', client_secret='secret') >>> oauth_session.parse_authorization_response(redirect_response) { 'oauth_token: 'kjerht2309u', 'oauth_token_secret: 'lsdajfh923874', 'oauth_verifier: 'w34o8967345', } >>> oauth_session.fetch_access_token(access_token_url) { 'oauth_token': 'sdf0o9823sjdfsdf', 'oauth_token_secret': '2kjshdfp92i34asdasd', } """ if verifier: self._client.client.verifier = verifier if not getattr(self._client.client, "verifier", None): raise VerifierMissing("No client verifier has been set.") token = self._fetch_token(url, **request_kwargs) log.debug("Resetting verifier attribute, should not be used anymore.") self._client.client.verifier = None return token
0.001873
def double(self): """ :return: A PrimePoint object that is twice this point """ # X9.62 B.3: p = self.curve.p a = self.curve.a l_ = ((3 * self.x * self.x + a) * inverse_mod(2 * self.y, p)) % p x3 = (l_ * l_ - 2 * self.x) % p y3 = (l_ * (self.x - x3) - self.y) % p return PrimePoint(self.curve, x3, y3)
0.005025
def calculate(self, T, method): r'''Method to calculate low-pressure liquid viscosity at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate viscosity, [K] method : str Name of the method to use Returns ------- mu : float Viscosity of the liquid at T and a low pressure, [Pa*S] ''' if method == DUTT_PRASAD: A, B, C = self.DUTT_PRASAD_coeffs mu = ViswanathNatarajan3(T, A, B, C, ) elif method == VISWANATH_NATARAJAN_3: A, B, C = self.VISWANATH_NATARAJAN_3_coeffs mu = ViswanathNatarajan3(T, A, B, C) elif method == VISWANATH_NATARAJAN_2: A, B = self.VISWANATH_NATARAJAN_2_coeffs mu = ViswanathNatarajan2(T, self.VISWANATH_NATARAJAN_2_coeffs[0], self.VISWANATH_NATARAJAN_2_coeffs[1]) elif method == VISWANATH_NATARAJAN_2E: C, D = self.VISWANATH_NATARAJAN_2E_coeffs mu = ViswanathNatarajan2Exponential(T, C, D) elif method == DIPPR_PERRY_8E: mu = EQ101(T, *self.Perrys2_313_coeffs) elif method == COOLPROP: mu = CoolProp_T_dependent_property(T, self.CASRN, 'V', 'l') elif method == LETSOU_STIEL: mu = Letsou_Stiel(T, self.MW, self.Tc, self.Pc, self.omega) elif method == PRZEDZIECKI_SRIDHAR: Vml = self.Vml(T) if hasattr(self.Vml, '__call__') else self.Vml mu = Przedziecki_Sridhar(T, self.Tm, self.Tc, self.Pc, self.Vc, Vml, self.omega, self.MW) elif method == VDI_PPDS: A, B, C, D, E = self.VDI_PPDS_coeffs term = (C - T)/(T-D) if term < 0: term1 = -((T - C)/(T-D))**(1/3.) else: term1 = term**(1/3.) term2 = term*term1 mu = E*exp(A*term1 + B*term2) elif method in self.tabular_data: mu = self.interpolate(T, method) return mu
0.001862
def change_object_name(self, old_name, new_name): """ Change object name """ h = self._get_object_handle(old_name) if old_name in self._object_handles: self._object_handles.pop(old_name) lua_code = "simSetObjectName({}, '{}')".format(h, new_name) self._inject_lua_code(lua_code)
0.006061
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course = self.course_factory.get_course(courseid) username = self.user_manager.session_username() error = False change = False msg = "" data = web.input() if self.user_manager.has_staff_rights_on_course(course): raise web.notfound() elif not self.user_manager.course_is_open_to_user(course, lti=False): return self.template_helper.get_renderer().course_unavailable() elif "register_group" in data: change = True if course.can_students_choose_group() and course.use_classrooms(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username}) if int(data["register_group"]) >= 0 and (len(aggregation["groups"]) > int(data["register_group"])): group = aggregation["groups"][int(data["register_group"])] if group["size"] > len(group["students"]): for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) aggregation["groups"][int(data["register_group"])]["students"].append(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s registered to group %s/%s/%s", username, courseid, aggregation["description"], data["register_group"]) else: error = True msg = _("Couldn't register to the specified group.") elif course.can_students_choose_group(): aggregation = self.database.aggregations.find_one( {"courseid": course.get_id(), "students": username}) if aggregation is not None: aggregation["students"].remove(username) for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) # Add student in the classroom and unique group self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"students": username}}) new_aggregation = self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"groups.0.students": username}}) if new_aggregation is None: error = True msg = _("Couldn't register to the specified group.") else: self._logger.info("User %s registered to team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You are not allowed to change group.") elif "unregister_group" in data: change = True if course.can_students_choose_group(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username, "groups.students": username}) if aggregation is not None: for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s unregistered from group/team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You're not registered in a group.") else: error = True msg = _("You are not allowed to change group.") tasks = course.get_tasks() last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": courseid, "taskid": {"$in": list(tasks.keys())}}) for submission in last_submissions: submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language()) aggregation = self.user_manager.get_course_user_aggregation(course) aggregations = self.user_manager.get_course_aggregations(course) users = self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)) if course.use_classrooms(): mygroup = None for index, group in enumerate(aggregation["groups"]): if self.user_manager.session_username() in group["students"]: mygroup = group mygroup["index"] = index + 1 return self.template_helper.get_renderer().classroom(course, last_submissions, aggregation, users, mygroup, msg, error, change) else: return self.template_helper.get_renderer().team(course, last_submissions, aggregations, users, aggregation, msg, error)
0.00493
def _validate_ssh_minion_opts(opts): ''' Ensure we're not using any invalid ssh_minion_opts. We want to make sure that the ssh_minion_opts does not override any pillar or fileserver options inherited from the master config. To add other items, modify the if statement in the for loop below. ''' ssh_minion_opts = opts.get('ssh_minion_opts', {}) if not isinstance(ssh_minion_opts, dict): log.error('Invalidly-formatted ssh_minion_opts') opts.pop('ssh_minion_opts') for opt_name in list(ssh_minion_opts): if re.match('^[a-z0-9]+fs_', opt_name, flags=re.IGNORECASE) \ or ('pillar' in opt_name and not 'ssh_merge_pillar' == opt_name) \ or opt_name in ('fileserver_backend',): log.warning( '\'%s\' is not a valid ssh_minion_opts parameter, ignoring', opt_name ) ssh_minion_opts.pop(opt_name)
0.002112
def coerce(self, value): """Subclasses should override this method for type coercion. Default version will simply return the argument. If the argument is not valid, a SerializeException is raised. For primitives like booleans, ints, floats, and strings, use this default version to avoid unintended type conversions.""" if not self.is_valid(value): raise ex.SerializeException('{} is not a valid value for ' 'type {}'.format(value, self.__class__.__name__)) return value
0.005172
def inheritFromContext(self, ignore=()): """ Doesn't store exactly the same items as Nodebox for ease of implementation, it has enough to get the Nodebox Dentrite example working. """ for canvas_attr, grob_attr in STATES.items(): if canvas_attr in ignore: continue setattr(self, grob_attr, getattr(self._bot._canvas, canvas_attr))
0.007317
def solveAndNotify(self, request): """Notifies the owner of the current request (so, the user doing the exercise) that they've solved the exercise, and mark it as solved in the database. """ remote = request.transport.remote withThisIdentifier = Exercise.identifier == self.exerciseIdentifier exercise = self.store.findUnique(Exercise, withThisIdentifier) solveAndNotify(remote, exercise)
0.004415
def add_bundle(name, scripts=[], files=[], scriptsdir=SCRIPTSDIR, filesdir=FILESDIR): """High level, simplified interface for creating a bundle which takes the bundle name, a list of script file names in a common scripts directory, and a list of absolute target file paths, of which the basename is also located in a common files directory. It converts those lists into maps and then calls new_bundle() to actually create the Bundle and add it to BUNDLEMAP""" scriptmap = makemap(scripts, join(PATH, scriptsdir)) filemap = dict(zip(files, [join(PATH, filesdir, os.path.basename(f)) for f in files])) new_bundle(name, scriptmap, filemap)
0.004451
def can_solve(cls, resource): """Tells if the solver is able to resolve the given resource. Arguments --------- resource : subclass of ``dataql.resources.Resource`` The resource to check if it is solvable by the current solver class Returns ------- boolean ``True`` if the current solver class can solve the given resource, ``False`` otherwise. Example ------- >>> AttributeSolver.solvable_resources (<class 'dataql.resources.Field'>,) >>> AttributeSolver.can_solve(Field('foo')) True >>> AttributeSolver.can_solve(Object('bar')) False """ for solvable_resource in cls.solvable_resources: if isinstance(resource, solvable_resource): return True return False
0.003492
def looks_like_a_filename(kernel_source): """ attempt to detect whether source code or a filename was passed """ logging.debug('looks_like_a_filename called') result = False if isinstance(kernel_source, str): result = True #test if not too long if len(kernel_source) > 250: result = False #test if not contains special characters for c in "();{}\\": if c in kernel_source: result = False #just a safeguard for stuff that looks like code for s in ["__global__ ", "__kernel ", "void ", "float "]: if s in kernel_source: result = False #string must contain substring ".c", ".opencl", or ".F" result = result and any([s in kernel_source for s in (".c", ".opencl", ".F")]) logging.debug('kernel_source is a filename: %s' % str(result)) return result
0.006608
def encode_endian(text, encoding, errors="strict", le=True): """Like text.encode(encoding) but always returns little endian/big endian BOMs instead of the system one. Args: text (text) encoding (str) errors (str) le (boolean): if little endian Returns: bytes Raises: UnicodeEncodeError LookupError """ encoding = codecs.lookup(encoding).name if encoding == "utf-16": if le: return codecs.BOM_UTF16_LE + text.encode("utf-16-le", errors) else: return codecs.BOM_UTF16_BE + text.encode("utf-16-be", errors) elif encoding == "utf-32": if le: return codecs.BOM_UTF32_LE + text.encode("utf-32-le", errors) else: return codecs.BOM_UTF32_BE + text.encode("utf-32-be", errors) else: return text.encode(encoding, errors)
0.001115
def get(self, name, default=None): """Get a configuration value and expand environment variables.""" value = self.params.get(name, default) if isinstance(value, str): value = os.path.expandvars(value) return value
0.007782
def listing_searchable_text(instance): """Fulltext search for the audit metadata """ # get all snapshots snapshots = get_snapshots(instance) # extract all snapshot values, because we are not interested in the # fieldnames (keys) values = map(lambda s: s.values(), snapshots) # prepare a set of unified catalog data catalog_data = set() # values to skip skip_values = ["None", "true", "True", "false", "False"] # internal uid -> title cache uid_title_cache = {} # helper function to recursively unpack the snapshot values def append(value): if isinstance(value, (list, tuple)): map(append, value) elif isinstance(value, (dict)): map(append, value.items()) elif isinstance(value, basestring): # convert unicode to UTF8 if isinstance(value, unicode): value = api.safe_unicode(value).encode("utf8") # skip single short values if len(value) < 2: return # flush non meaningful values if value in skip_values: return # flush ISO dates if re.match(DATE_RX, value): return # fetch the title if re.match(UID_RX, value): if value in uid_title_cache: value = uid_title_cache[value] else: title_or_id = get_title_or_id_from_uid(value) uid_title_cache[value] = title_or_id value = title_or_id catalog_data.add(value) # extract all meaningful values for value in itertools.chain(values): append(value) return " ".join(catalog_data)
0.00057
def _get_ntgpadnt(self, ver, add_ns): """Create a namedtuple object for each annotation""" hdrs = self.gpad_columns[ver] if add_ns: hdrs = hdrs + ['NS'] return cx.namedtuple("ntgpadobj", hdrs)
0.008475
def home(request, hproPk): """ Route the request to runURI if defined otherwise go to plugIt """ if settings.PIAPI_STANDALONE: return main(request, '', hproPk) (plugIt, baseURI, hproject) = getPlugItObject(hproPk) if hproject.runURI: return HttpResponseRedirect(hproject.runURI) else: # Check if a custom url key is used if hasattr(hproject, 'plugItCustomUrlKey') and hproject.plugItCustomUrlKey: return HttpResponseRedirect(reverse('plugIt.views.main', args=(hproject.plugItCustomUrlKey, ''))) return main(request, '', hproPk)
0.004983
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS): "In the IOU fungible the supply is set by Issuer, who issue funds." # allocate new issue as result of a new cash entry ctx.accounts[ctx.msg_sender] += amount ctx.issued_amounts[ctx.msg_sender] += amount # Store hash(rtgs) ctx.Issuance(ctx.msg_sender, rtgs_hash, amount) return OK
0.004854
def _get_method_doc(self): """ Return method documentations. """ ret = {} for method_name in self.methods: method = getattr(self, method_name, None) if method: ret[method_name] = method.__doc__ return ret
0.007246
def alert_stream(self, reset_event, kill_event): """Open event stream.""" _LOGGING.debug('Stream Thread Started: %s, %s', self.name, self.cam_id) start_event = False parse_string = "" fail_count = 0 url = '%s/ISAPI/Event/notification/alertStream' % self.root_url # pylint: disable=too-many-nested-blocks while True: try: stream = self.hik_request.get(url, stream=True, timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)) if stream.status_code == requests.codes.not_found: # Try alternate URL for stream url = '%s/Event/notification/alertStream' % self.root_url stream = self.hik_request.get(url, stream=True) if stream.status_code != requests.codes.ok: raise ValueError('Connection unsucessful.') else: _LOGGING.debug('%s Connection Successful.', self.name) fail_count = 0 self.watchdog.start() for line in stream.iter_lines(): # _LOGGING.debug('Processing line from %s', self.name) # filter out keep-alive new lines if line: str_line = line.decode("utf-8", "ignore") # New events start with --boundry if str_line.find('<EventNotificationAlert') != -1: # Start of event message start_event = True parse_string += str_line elif str_line.find('</EventNotificationAlert>') != -1: # Message end found found parse_string += str_line start_event = False if parse_string: tree = ET.fromstring(parse_string) self.process_stream(tree) self.update_stale() parse_string = "" else: if start_event: parse_string += str_line if kill_event.is_set(): # We were asked to stop the thread so lets do so. break elif reset_event.is_set(): # We need to reset the connection. raise ValueError('Watchdog failed.') if kill_event.is_set(): # We were asked to stop the thread so lets do so. _LOGGING.debug('Stopping event stream thread for %s', self.name) self.watchdog.stop() self.hik_request.close() return elif reset_event.is_set(): # We need to reset the connection. raise ValueError('Watchdog failed.') except (ValueError, requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError) as err: fail_count += 1 reset_event.clear() _LOGGING.warning('%s Connection Failed (count=%d). Waiting %ss. Err: %s', self.name, fail_count, (fail_count * 5) + 5, err) parse_string = "" self.watchdog.stop() self.hik_request.close() time.sleep(5) self.update_stale() time.sleep(fail_count * 5) continue
0.001045
def stop_recording(self): """ Stops writing video to file. """ if not self._recording: raise Exception("Cannot stop a video recording when it's not recording!") self._cmd_q.put(('stop',)) self._recording = False
0.011765
def validlines(self): """Return all lines within which Prosodic understood all words.""" return [ln for ln in self.lines() if (not ln.isBroken() and not ln.ignoreMe)]
0.023392
def auth(self, user, pwd): """ Perform a login with the given Skype username and its password. This emulates a login to Skype for Web on ``api.skype.com``. Args: user (str): username of the connecting account pwd (str): password of the connecting account Returns: (str, datetime.datetime) tuple: Skype token, and associated expiry if known Raises: .SkypeAuthException: if the login request is rejected .SkypeApiException: if the login form can't be processed """ # Wrap up the credentials ready to send. pwdHash = base64.b64encode(hashlib.md5((user + "\nskyper\n" + pwd).encode("utf-8")).digest()).decode("utf-8") json = self.conn("POST", "{0}/login/skypetoken".format(SkypeConnection.API_USER), json={"username": user, "passwordHash": pwdHash, "scopes": "client"}).json() if "skypetoken" not in json: raise SkypeAuthException("Couldn't retrieve Skype token from response") expiry = None if "expiresIn" in json: expiry = datetime.fromtimestamp(int(time.time()) + int(json["expiresIn"])) return json["skypetoken"], expiry
0.007229
def batch_filter(self, zs, Rs=None, dts=None, UT=None, saver=None): """ Performs the UKF filter over the list of measurement in `zs`. Parameters ---------- zs : list-like list of measurements at each time step `self._dt` Missing measurements must be represented by 'None'. Rs : None, np.array or list-like, default=None optional list of values to use for the measurement error covariance R. If Rs is None then self.R is used for all epochs. If it is a list of matrices or a 3D array where len(Rs) == len(zs), then it is treated as a list of R values, one per epoch. This allows you to have varying R per epoch. dts : None, scalar or list-like, default=None optional value or list of delta time to be passed into predict. If dtss is None then self.dt is used for all epochs. If it is a list where len(dts) == len(zs), then it is treated as a list of dt values, one per epoch. This allows you to have varying epoch durations. UT : function(sigmas, Wm, Wc, noise_cov), optional Optional function to compute the unscented transform for the sigma points passed through hx. Typically the default function will work - you can use x_mean_fn and z_mean_fn to alter the behavior of the unscented transform. saver : filterpy.common.Saver, optional filterpy.common.Saver object. If provided, saver.save() will be called after every epoch Returns ------- means: ndarray((n,dim_x,1)) array of the state for each time step after the update. Each entry is an np.array. In other words `means[k,:]` is the state at step `k`. covariance: ndarray((n,dim_x,dim_x)) array of the covariances for each time step after the update. In other words `covariance[k,:,:]` is the covariance at step `k`. Examples -------- .. code-block:: Python # this example demonstrates tracking a measurement where the time # between measurement varies, as stored in dts The output is then smoothed # with an RTS smoother. zs = [t + random.randn()*4 for t in range (40)] (mu, cov, _, _) = ukf.batch_filter(zs, dts=dts) (xs, Ps, Ks) = ukf.rts_smoother(mu, cov) """ #pylint: disable=too-many-arguments try: z = zs[0] except TypeError: raise TypeError('zs must be list-like') if self._dim_z == 1: if not(isscalar(z) or (z.ndim == 1 and len(z) == 1)): raise TypeError('zs must be a list of scalars or 1D, 1 element arrays') else: if len(z) != self._dim_z: raise TypeError( 'each element in zs must be a 1D array of length {}'.format(self._dim_z)) z_n = np.size(zs, 0) if Rs is None: Rs = [self.R] * z_n if dts is None: dts = [self._dt] * z_n # mean estimates from Kalman Filter if self.x.ndim == 1: means = zeros((z_n, self._dim_x)) else: means = zeros((z_n, self._dim_x, 1)) # state covariances from Kalman Filter covariances = zeros((z_n, self._dim_x, self._dim_x)) for i, (z, r, dt) in enumerate(zip(zs, Rs, dts)): self.predict(dt=dt, UT=UT) self.update(z, r, UT=UT) means[i, :] = self.x covariances[i, :, :] = self.P if saver is not None: saver.save() return (means, covariances)
0.001578
def messageReceived(self, value, sender, target): """ An AMP-formatted message was received. Dispatch to the appropriate command responder, i.e. a method on this object exposed with L{commandMethod.expose}. @see IMessageReceiver.messageReceived """ if value.type != AMP_MESSAGE_TYPE: raise UnknownMessageType() inputBox = self._boxFromData(value.data) thunk = commandMethod.responderForName(self, inputBox[COMMAND]) placeholder = _ProtocolPlaceholder(sender, target) arguments = thunk.command.parseArguments(inputBox, placeholder) try: result = thunk(**arguments) except tuple(thunk.command.errors.keys()), knownError: errorCode = thunk.command.errors[knownError.__class__] raise RevertAndRespond( Value(AMP_ANSWER_TYPE, Box(_error_code=errorCode, _error_description=str(knownError)).serialize())) else: response = thunk.command.makeResponse(result, None) return Value(AMP_ANSWER_TYPE, response.serialize())
0.00258
def getManagers(self): """Return all managers of responsible departments """ manager_ids = [] manager_list = [] for department in self.getDepartments(): manager = department.getManager() if manager is None: continue manager_id = manager.getId() if manager_id not in manager_ids: manager_ids.append(manager_id) manager_list.append(manager) return manager_list
0.003984
def _group_by_area(self, datasets): """Group datasets by their area.""" def _area_id(area_def): return area_def.name + str(area_def.area_extent) + str(area_def.shape) # get all of the datasets stored by area area_datasets = {} for x in datasets: area_id = _area_id(x.attrs['area']) area, ds_list = area_datasets.setdefault(area_id, (x.attrs['area'], [])) ds_list.append(x) return area_datasets
0.008163
def _add_resources(data, runtime): """Merge input resources with current CWL runtime parameters. """ if "config" not in data: data["config"] = {} # Convert input resources, which may be a JSON string resources = data.get("resources", {}) or {} if isinstance(resources, six.string_types) and resources.startswith(("{", "[")): resources = json.loads(resources) data["resources"] = resources assert isinstance(resources, dict), (resources, data) data["config"]["resources"] = resources # Add in memory and core usage from CWL memory = int(float(runtime["ram"]) / float(runtime["cores"])) data["config"]["resources"].update({"default": {"cores": int(runtime["cores"]), "memory": "%sM" % memory, "jvm_opts": ["-Xms%sm" % min(1000, memory // 2), "-Xmx%sm" % memory]}}) data["config"]["algorithm"]["num_cores"] = int(runtime["cores"]) return data
0.00552
def connected(G, method_name, **kwargs): """ Performs analysis methods from networkx.connected on each graph in the collection. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method : string Name of method in networkx.connected. **kwargs : kwargs Keyword arguments, passed directly to method. Returns ------- results : dict Keys are graph indices, values are output of method for that graph. Raises ------ ValueError If name is not in networkx.connected, or if no such method exists. """ warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(['connected', method_name], **kwargs)
0.00113
def addSwitch(self, name=None): ''' Add a new switch to the topology. ''' if name is None: while True: name = 's' + str(self.__snum) self.__snum += 1 if name not in self.__nxgraph: break self.__addNode(name, Switch) return name
0.005634
def revision(cwd, rev='HEAD', short=False, user=None, password=None, ignore_retcode=False, output_encoding=None): ''' Returns the SHA1 hash of a given identifier (hash, branch, tag, HEAD, etc.) cwd The path to the git checkout rev : HEAD The revision short : False If ``True``, return an abbreviated SHA1 git hash user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 CLI Example: .. code-block:: bash salt myminion git.revision /path/to/repo mybranch ''' cwd = _expand_path(cwd, user) command = ['git', 'rev-parse'] if short: command.append('--short') command.append(rev) return _git_run(command, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding)['stdout']
0.000537
def free(self): """Release the results and connection lock from the TornadoSession object. This **must** be called after you finish processing the results from :py:meth:`TornadoSession.query <queries.TornadoSession.query>` or :py:meth:`TornadoSession.callproc <queries.TornadoSession.callproc>` or the connection will not be able to be reused by other asynchronous requests. """ self._freed = True self._cleanup(self.cursor, self._fd)
0.003945
def p_expr_BAND_expr(p): """ expr : expr BAND expr """ p[0] = make_binary(p.lineno(2), 'BAND', p[1], p[3], lambda x, y: x & y)
0.007246
def _process_sasl_success(self, stream, element): """Process incoming <sasl:success/> element. [initiating entity only] """ if not self.authenticator: logger.debug("Unexpected SASL response") return False content = element.text if content: data = a2b_base64(content.encode("us-ascii")) else: data = None ret = self.authenticator.finish(data) if isinstance(ret, sasl.Success): logger.debug("SASL authentication succeeded") authzid = ret.properties.get("authzid") if authzid: me = JID(authzid) elif "username" in ret.properties: # FIXME: other rules for server me = JID(ret.properties["username"], stream.peer.domain) else: me = None stream.set_authenticated(me, True) else: logger.debug("SASL authentication failed") raise SASLAuthenticationFailed("Additional success data" " procesing failed") return True
0.002584
def get(self, sid): """ Constructs a TaskChannelContext :param sid: The sid :returns: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext :rtype: twilio.rest.taskrouter.v1.workspace.task_channel.TaskChannelContext """ return TaskChannelContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid, )
0.012821
def list_open_remote_share(self): """List all pending remote shares :returns: array of pending remote shares :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'GET', self.OCS_SERVICE_SHARE, 'remote_shares/pending' ) if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree) shares = [] for element in tree.find('data').iter('element'): share_attr = {} for child in element: key = child.tag value = child.text share_attr[key] = value shares.append(share_attr) return shares raise HTTPResponseError(res)
0.002336
def serialize_symbol(ctx, document, el, root): "Serialize special symbols." span = etree.SubElement(root, 'span') span.text = el.value() fire_hooks(ctx, document, el, span, ctx.get_hook('symbol')) return root
0.004329
def format_listall_output(format_file, format_item_dir, format, rule, option_prefix = None, template = None, skip_options = False): """ Prepare listall output template :param format_file: :param format_item_dir: :param format: :param config: :param option_prefix: :param template: :param skip_options: :return: """ # Set the list of keys if printing from a file spec # _LINE_(whatever)_EOL_ # _ITEM_(resource)_METI_ # _KEY_(path_to_value) if format_file and os.path.isfile(format_file): if not template: with open(format_file, 'rt') as f: template = f.read() # Optional files if not skip_options: re_option = re.compile(r'(%_OPTION_\((.*?)\)_NOITPO_)') optional_files = re_option.findall(template) for optional_file in optional_files: if optional_file[1].startswith(option_prefix + '-'): with open(os.path.join(format_item_dir, optional_file[1].strip()), 'rt') as f: template = template.replace(optional_file[0].strip(), f.read()) # Include files if needed re_file = re.compile(r'(_FILE_\((.*?)\)_ELIF_)') while True: requested_files = re_file.findall(template) available_files = os.listdir(format_item_dir) if format_item_dir else [] for requested_file in requested_files: if requested_file[1].strip() in available_files: with open(os.path.join(format_item_dir, requested_file[1].strip()), 'rt') as f: template = template.replace(requested_file[0].strip(), f.read()) # Find items and keys to be printed re_line = re.compile(r'(_ITEM_\((.*?)\)_METI_)') re_key = re.compile(r'_KEY_\(*(.*?)\)', re.DOTALL|re.MULTILINE) # Remove the multiline ? lines = re_line.findall(template) for (i, line) in enumerate(lines): lines[i] = line + (re_key.findall(line[1]),) requested_files = re_file.findall(template) if len(requested_files) == 0: break elif format and format[0] == 'csv': keys = rule.keys line = ', '.join('_KEY_(%s)' % k for k in keys) lines = [ (line, line, keys) ] template = line return (lines, template)
0.007509
def pathFromHere_walk(self, astr_startPath = '/'): """ Return a list of paths from "here" in the stree, using the internal cd() to walk the path space. :return: a list of paths from "here" """ self.l_lwd = [] self.treeWalk(startPath = astr_startPath, f=self.lwd) return self.l_lwd
0.01847
def read_stream(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE): """Using a schema, deserialize a stream of consecutive Avro values. :param str schema: json string representing the Avro schema :param file-like stream: a buffered stream of binary input :param int buffer_size: size of bytes to read from the stream each time :return: yields a sequence of python data structures deserialized from the stream """ reader = _lancaster.Reader(schema) buf = stream.read(buffer_size) remainder = b'' while len(buf) > 0: values, n = reader.read_seq(buf) yield from values remainder = buf[n:] buf = stream.read(buffer_size) if len(buf) > 0 and len(remainder) > 0: ba = bytearray() ba.extend(remainder) ba.extend(buf) buf = memoryview(ba).tobytes() if len(remainder) > 0: raise EOFError('{} bytes remaining but could not continue reading ' 'from stream'.format(len(remainder)))
0.000956
def create(self): """Create the required directory structure and admin metadata.""" self._storage_broker.create_structure() self._storage_broker.put_admin_metadata(self._admin_metadata)
0.009569