text
stringlengths
78
104k
score
float64
0
0.18
def do_default(value, default_value=u'', boolean=False): """If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} """ if (boolean and not value) or isinstance(value, Undefined): return default_value return value
0.001403
def has_source_contents(self, src_id): """Checks if some sources exist.""" return bool(rustcall(_lib.lsm_view_has_source_contents, self._get_ptr(), src_id))
0.00995
def remove_duplicates(items): """ Returns a list without duplicates, keeping elements order :param items: A list of items :return: The list without duplicates, in the same order """ if items is None: return items new_list = [] for item in items: if item not in new_list: new_list.append(item) return new_list
0.002674
def patch(import_path, rvalue=UNDEFINED, side_effect=UNDEFINED, ignore=UNDEFINED, callback=UNDEFINED, ctxt=UNDEFINED, subsequent_rvalue=UNDEFINED): """ Patches an attribute of a module referenced on import_path with a decorated version that will use the caliendo cache if rvalue is None. Otherwise it will patch the attribute of the module to return rvalue when called. This class provides a context in which to use the patched module. After the decorated method is called patch_in_place unpatches the patched module with the original method. :param str import_path: The import path of the method to patch. :param mixed rvalue: The return value of the patched method. :param mixed side_effect: The side effect to execute. Either a callable with the same parameters as the target, or an exception. :param caliendo.Ignore ignore: Arguments to ignore. The first element should be a list of positional arguments. The second should be a list of keys for keyword arguments. :param function callback: A pickleable callback to execute when the patched method is called and the cache is hit. (has to have been cached the first time). :param caliendo.hooks.Context ctxt: The context this patch should be executed under. Generally reserved for internal use. The vast majority of use cases should leave this parameter alone. :param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out. """ def patch_test(unpatched_test): """ Patches a callable dependency of an unpatched test with a callable corresponding to patch_with. :param unpatched_test: The unpatched test for which we're patching dependencies :type unpatched_test: instance method of a test suite :param patch_with: A callable to patch the callable dependency with. Should match the function signature of the callable we're patching. :type patch_with: callable :returns: The patched test :rtype: instance method """ if ctxt == UNDEFINED: context = get_context(unpatched_test) else: context = ctxt context.enter() patched_test = get_patched_test(import_path=import_path, unpatched_test=unpatched_test, rvalue=rvalue, side_effect=side_effect, context=context, ignore=ignore, callback=callback, subsequent_rvalue=subsequent_rvalue) patched_test.__context = context patched_test.__name__ = context.name return patched_test return patch_test
0.003955
def ambiguous_date_to_date_range(mydate, fmt="%Y-%m-%d", min_max_year=None): """parse an abiguous date such as 2017-XX-XX to [2017,2017.999] Parameters ---------- mydate : str date string to be parsed fmt : str format descriptor. default is %Y-%m-%d min_max_year : None, optional if date is completely unknown, use this as bounds. Returns ------- tuple upper and lower bounds on the date. return (None, None) if errors """ from datetime import datetime sep = fmt.split('%')[1][-1] min_date, max_date = {}, {} today = datetime.today().date() for val, field in zip(mydate.split(sep), fmt.split(sep+'%')): f = 'year' if 'y' in field.lower() else ('day' if 'd' in field.lower() else 'month') if 'XX' in val: if f=='year': if min_max_year: min_date[f]=min_max_year[0] if len(min_max_year)>1: max_date[f]=min_max_year[1] elif len(min_max_year)==1: max_date[f]=4000 #will be replaced by 'today' below. else: return None, None elif f=='month': min_date[f]=1 max_date[f]=12 elif f=='day': min_date[f]=1 max_date[f]=31 else: try: min_date[f]=int(val) max_date[f]=int(val) except ValueError: print("Can't parse date string: "+mydate, file=sys.stderr) return None, None max_date['day'] = min(max_date['day'], 31 if max_date['month'] in [1,3,5,7,8,10,12] else 28 if max_date['month']==2 else 30) lower_bound = datetime(year=min_date['year'], month=min_date['month'], day=min_date['day']).date() upper_bound = datetime(year=max_date['year'], month=max_date['month'], day=max_date['day']).date() return (lower_bound, upper_bound if upper_bound<today else today)
0.015437
def responds(self): """ :returns: The frequency with which the user associated with this profile responds to messages. """ contacted_text = self._contacted_xpb.\ get_text_(self.profile_tree).lower() if 'contacted' not in contacted_text: return contacted_text.strip().replace('replies ', '')
0.01039
def column_exists(self, tablename: str, column: str) -> bool: """Does the column exist?""" sql = """ SELECT COUNT(*) FROM information_schema.columns WHERE table_name=? AND column_name=? AND table_schema={} """.format(self.get_current_schema_expr()) row = self.fetchone(sql, tablename, column) return True if row[0] >= 1 else False
0.004651
def stop_running_tasks(self): """ Terminate all the running tasks :return: None """ for task in self.__running_registry: task.stop() self.__running_registry.clear()
0.044944
def make_plot(self): """Make the horizon plot. """ self.get_contour_values() # sets levels of main contour plot colors1 = ['blue', 'green', 'red', 'purple', 'orange', 'gold', 'magenta'] # set contour value. Default is SNR_CUT. self.snr_contour_value = (self.SNR_CUT if self.snr_contour_value is None else self.snr_contour_value) # plot contours for j in range(len(self.zvals)): hz = self.axis.contour(self.xvals[j], self.yvals[j], self.zvals[j], np.array([self.snr_contour_value]), colors=colors1[j], linewidths=1., linestyles='solid') # plot invisible lines for purpose of creating a legend if self.legend_labels != []: # plot a curve off of the grid with same color for legend label. self.axis.plot([0.1, 0.2], [0.1, 0.2], color=colors1[j], label=self.legend_labels[j]) if self.add_legend: self.axis.legend(**self.legend_kwargs) return
0.005146
def is_field_remote(model, field_name): """Check whether a given model field is a remote field. A remote field is the inverse of a one-to-many or a many-to-many relationship. Arguments: model: a Django model field_name: the name of a field Returns: True if `field_name` is a remote field, False otherwise. """ if not hasattr(model, '_meta'): # ephemeral model with no metaclass return False model_field = get_model_field(model, field_name) return isinstance(model_field, (ManyToManyField, RelatedObject))
0.001712
def trunk_update(request, trunk_id, old_trunk, new_trunk): """Handle update to a trunk in (at most) three neutron calls. The JavaScript side should know only about the old and new state of a trunk. However it should not know anything about how the old and new are meant to be diffed and sent to neutron. We handle that here. This code was adapted from Heat, see: https://review.opendev.org/442496 Call #1) Update all changed properties but 'sub_ports'. PUT /v2.0/trunks/TRUNK_ID openstack network trunk set Call #2) Delete subports not needed anymore. PUT /v2.0/trunks/TRUNK_ID/remove_subports openstack network trunk unset --subport Call #3) Create new subports. PUT /v2.0/trunks/TRUNK_ID/add_subports openstack network trunk set --subport A single neutron port cannot be two subports at the same time (ie. have two segmentation (type, ID)s on the same trunk or to belong to two trunks). Therefore we have to delete old subports before creating new ones to avoid conflicts. """ LOG.debug("trunk_update(): trunk_id=%s", trunk_id) # NOTE(bence romsics): We want to do set operations on the subports, # however we receive subports represented as dicts. In Python # mutable objects like dicts are not hashable so they cannot be # inserted into sets. So we convert subport dicts to (immutable) # frozensets in order to do the set operations. def dict2frozenset(d): """Convert a dict to a frozenset. Create an immutable equivalent of a dict, so it's hashable therefore can be used as an element of a set or a key of another dictionary. """ return frozenset(d.items()) # cf. neutron_lib/api/definitions/trunk.py updatable_props = ('admin_state_up', 'description', 'name') prop_diff = { k: new_trunk[k] for k in updatable_props if old_trunk[k] != new_trunk[k]} subports_old = {dict2frozenset(d): d for d in old_trunk.get('sub_ports', [])} subports_new = {dict2frozenset(d): d for d in new_trunk.get('sub_ports', [])} old_set = set(subports_old.keys()) new_set = set(subports_new.keys()) delete = old_set - new_set create = new_set - old_set dicts_delete = [subports_old[fs] for fs in delete] dicts_create = [subports_new[fs] for fs in create] trunk = old_trunk if prop_diff: LOG.debug('trunk_update(): update properties of trunk %s: %s', trunk_id, prop_diff) body = _prepare_body_update_trunk(prop_diff) trunk = neutronclient(request).update_trunk( trunk_id, body=body).get('trunk') if dicts_delete: LOG.debug('trunk_update(): delete subports of trunk %s: %s', trunk_id, dicts_delete) body = _prepare_body_remove_subports(dicts_delete) trunk = neutronclient(request).trunk_remove_subports( trunk_id, body=body) if dicts_create: LOG.debug('trunk_update(): create subports of trunk %s: %s', trunk_id, dicts_create) body = _prepare_body_add_subports(dicts_create) trunk = neutronclient(request).trunk_add_subports( trunk_id, body=body) return Trunk(trunk)
0.0003
def create_exception_for_response( cls, response_code, messages, response_id ): """ :type response_code: int :type messages: list[str] :type response_id: str :return: The exception according to the status code. :rtype: ApiException """ error_message = cls._generate_message_error( response_code, messages, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_BAD_REQUEST: return BadRequestException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_UNAUTHORIZED: return UnauthorizedException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_FORBIDDEN: return ForbiddenException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_NOT_FOUND: return NotFoundException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_METHOD_NOT_ALLOWED: return MethodNotAllowedException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_TOO_MANY_REQUESTS: return TooManyRequestsException( error_message, response_code, response_id ) if response_code == cls._HTTP_RESPONSE_CODE_INTERNAL_SERVER_ERROR: return PleaseContactBunqException( error_message, response_code, response_id ) return UnknownApiErrorException( error_message, response_code, response_id )
0.00142
def createLists(self): '''Generate the checklists. Note that: 0,1 = off/on for auto-ticked items 2,3 = off/on for manually ticked items''' self.beforeAssemblyList = { 'Confirm batteries charged':2, 'No physical damage to airframe':2, 'All electronics present and connected':2, 'Bottle loaded':2, 'Ground station operational':2 } self.beforeEngineList = { 'Avionics Power ON':2, 'Pixhawk Booted':0, 'Odroid Booted':2, 'Cameras calibrated and capturing':2, 'GPS lock':0, 'Airspeed check':2, 'Barometer check':2, 'Compass check':2, 'Flight mode MANUAL':0, 'Avionics Power':0, 'Servo Power':0, 'IMU Check':0, 'Aircraft Params Loaded':2, 'Waypoints Loaded':0, 'Servo and clevis check':2, 'Geofence loaded':2, 'Ignition circuit and battery check':2, 'Check stabilisation in FBWA mode':2 } self.beforeTakeoffList = { 'Engine throttle responsive':2, 'Runway clear':2, 'Radio links > 6db margin':0, 'Antenna tracker check':2, 'GCS stable':2, } self.beforeCruiseList = { 'Airspeed > 10 m/s':0, 'Altitude > 30 m':0, '< 100 degrees to 1st Waypoint':2, 'Airspeed and climb rate nominal':2 } self.bottleDropList = { 'Joe found':2, 'Joe waypoint laid in':2, '< 100m to Joe waypoint':2, 'Bottle drop mechanism activated':2 } self.beforeLandingList = { 'Runway clear':2, 'APM set to FBWA mode':2, '< 100m from airfield home':2 } self.beforeShutdownList = { 'Taxi to parking':2, 'Engine cutoff':2, 'Data downloaded':2, 'Ignition power off':2, 'Avionics power off':2 }
0.046296
def is_valid(self, field_name: str, value, kg: dict) -> Optional[dict]: """ Check if this value is valid for the given name property according to input knowledge graph and ontology. If is valid, then return a dict with key @id or @value for ObjectProperty or DatatypeProperty. No schema checked by this function. :param field_name: name of the property, if prefix is omitted, then use default namespace :param value: the value that try to add :param kg: the knowledge graph that perform adding action :return: None if the value isn't valid for the property, otherwise return {key: value}, key is @id for ObjectProperty and @value for DatatypeProperty. """ # property uri = self.__is_valid_uri_resolve(field_name, kg.get("@context")) property_ = self.get_entity(uri) if not isinstance(property_, OntologyProperty): logging.warning("Property is not OntologyProperty, ignoring it: %s", uri) return None if not self.__is_valid_domain(property_, kg): logging.warning("Property does not have valid domain, ignoring it: %s", uri) return None # check if is valid range # first determine the input value type if isinstance(property_, OntologyDatatypeProperty): types = self.__is_valid_determine_value_type(value) else: if isinstance(value, dict): try: types = map(self.get_entity, value['@type']) except KeyError: return None # input entity without type elif self.__is_schema_org_datatype(property_): if self.expanded_jsonld: return {'@value': self.__serialize_type(value)} else: return value else: return {'@id': self.__serialize_type(value)} # check if is a valid range if any(property_.is_legal_object(type_) for type_ in types): if isinstance(property_, OntologyObjectProperty): return value elif self.expanded_jsonld: return {'@value': self.__serialize_type(value)} else: return self.__serialize_type(value) return None
0.00342
def main(): """ Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given. """ parser = OptionParser(description= "Upload a vcl file (set as main) to a given fastly service. All arguments are required.") parser.add_option("-k", "--key", dest="apikey", help="fastly api key") parser.add_option("-u", "--user", dest="user", help="fastly user name") parser.add_option("-p", "--password", dest="password", help="fastly password") parser.add_option("-f", "--file", dest="filename", help="vcl file to upload") parser.add_option("-s", "--service", dest="service_name", help="service to update") parser.add_option("-d", "--delete_vcl", action="store_true", dest="delete_vcl", default=False, help="delete existing vcl files from service\ before uploading") parser.add_option("-i", "--include", action="store_true", dest="include_vcl", default=False, help="do not set uploaded vcl as main,\ to be included only") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options:" parser.print_help() sys.exit(1) vcl_name = options.filename.split('/').pop() service_name = options.service_name vcl_file = open(options.filename, 'r') vcl_content = vcl_file.read() # Need to fully authenticate to access all features. client = fastly.connect(options.apikey) client.login(options.user, options.password) service = client.get_service_by_name(service_name) versions = client.list_versions(service.id) latest = versions.pop() if latest.locked is True or latest.active is True: print "\n[ Cloning version %d ]\n"\ % (latest.number) latest = client.clone_version(service.id, latest.number) if options.delete_vcl: vcls = client.list_vcls(service.id, latest.number) for vcl in vcls: print "\n[ Deleting vcl file %s from version %d ]\n" %\ (service_name, latest.number) client.delete_vcl(service.id, latest.number, vcl.name) if vcl_name in latest.vcls: print "\n[ Updating vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.update_vcl(service.id, latest.number, vcl_name, content=vcl_content) else: print "\n[ Uploading new vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.upload_vcl(service.id, latest.number, vcl_name, vcl_content) if options.include_vcl is False: print "\n[ Setting vcl %s as main ]\n" % (vcl_name) client.set_main_vcl(service.id, latest.number, vcl_name) client.activate_version(service.id, latest.number) print "\n[ Activing configuration version %d ]\n" % (latest.number)
0.001225
def add(self, name): ''' Start a new section. :param name: :return: ''' if self.__current_section: self._flush_content() self.discard_current(name)
0.009302
def create_user(self, user_name, initial_password): """Create a new user with an initial password via provisioning API. It is not an error, if the user already existed before. If you get back an error 999, then the provisioning API is not enabled. :param user_name: name of user to be created :param initial_password: password for user being created :returns: True on success :raises: HTTPResponseError in case an HTTP error status was returned """ res = self._make_ocs_request( 'POST', self.OCS_SERVICE_CLOUD, 'users', data={'password': initial_password, 'userid': user_name} ) # We get 200 when the user was just created. if res.status_code == 200: tree = ET.fromstring(res.content) self._check_ocs_status(tree, [100]) return True raise HTTPResponseError(res)
0.002094
def get_default_version(env): """Returns the default version string to use for MSVS. If no version was requested by the user through the MSVS environment variable, query all the available visual studios through get_installed_visual_studios, and take the highest one. Return ------ version: str the default version. """ if 'MSVS' not in env or not SCons.Util.is_Dict(env['MSVS']): # get all versions, and remember them for speed later versions = [vs.version for vs in get_installed_visual_studios()] env['MSVS'] = {'VERSIONS' : versions} else: versions = env['MSVS'].get('VERSIONS', []) if 'MSVS_VERSION' not in env: if versions: env['MSVS_VERSION'] = versions[0] #use highest version by default else: debug('get_default_version: WARNING: no installed versions found, ' 'using first in SupportedVSList (%s)'%SupportedVSList[0].version) env['MSVS_VERSION'] = SupportedVSList[0].version env['MSVS']['VERSION'] = env['MSVS_VERSION'] return env['MSVS_VERSION']
0.005348
def mangle_agreement(correct_sentence): """Given a correct sentence, return a sentence or sentences with a subject verb agreement error""" # # Examples # # Back in the 1800s, people were much shorter and much stronger. # This sentence begins with the introductory phrase, 'back in the 1800s' # which means that it should have the past tense verb. Any other verb would # be incorrect. # # # Jack and jill went up the hill. # This sentence is different; 'go' would also be correct. If it began with # 'Yesterday', a single-word introductory phrase requiring no comma, only # 'went' would be acceptable. # # # The man in the checkered shirt danced his warrior dance to show that # he was the most dominant male in the room. # This sentence has multiple verbs. If the sentence ended at the word dance, # changing 'danced' to 'dances' would be acceptable, but since the sentence # continues we cannot make this change -- 'was' agrees with 'danced' but not # with 'dances'. This is a shifty tense error, a classic subject verb # agreement error. # # # Our Method # # Right now, we will assume that any change in verb form of a single verb in # a sentence is incorrect. As demonstrated above, this is not always true. # We hope that since any model created off of this data will use a # confidence interval to determine likelihood of a subject-verb agreement # error, that some number can be found for which the model excels. # # It would also be possible to use a rule based learner to evaluate single # verb sentences, and only evaluating more complex sentences with the # tensorflow model. bad_sents = [] doc = nlp(correct_sentence) verbs = [(i, v) for (i, v) in enumerate(doc) if v.tag_.startswith('VB')] for i, v in verbs: for alt_verb in lexeme(doc[i].text): if alt_verb == doc[i].text: continue # Same as the original, skip it if (tenses(alt_verb) == tenses(v.text) or (alt_verb.startswith(v.text) and alt_verb.endswith("n't"))): continue # Negated version of the original, skip it new_sent = str(doc[:i]) + " {} ".format(alt_verb) + str(doc[i+1:]) new_sent = new_sent.replace(' ,', ',') # fix space before comma bad_sents.append(new_sent) return bad_sents
0.00613
def install_missing(name, version=None, source=None): ''' Instructs Chocolatey to install a package if it doesn't already exist. .. versionchanged:: 2014.7.0 If the minion has Chocolatey >= 0.9.8.24 installed, this function calls :mod:`chocolatey.install <salt.modules.chocolatey.install>` instead, as ``installmissing`` is deprecated as of that version and will be removed in Chocolatey 1.0. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. source Chocolatey repository (directory, share or remote URL feed) the package comes from. Defaults to the official Chocolatey feed. CLI Example: .. code-block:: bash salt '*' chocolatey.install_missing <package name> salt '*' chocolatey.install_missing <package name> version=<package version> ''' choc_path = _find_chocolatey(__context__, __salt__) if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.8.24'): log.warning('installmissing is deprecated, using install') return install(name, version=version) # chocolatey helpfully only supports a single package argument cmd = [choc_path, 'installmissing', name] if version: cmd.extend(['--version', version]) if source: cmd.extend(['--source', source]) # Shouldn't need this as this code should never run on v0.9.9 and newer cmd.extend(_yes(__context__)) result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: raise CommandExecutionError( 'Running chocolatey failed: {0}'.format(result['stdout']) ) return result['stdout']
0.001657
def has_permission(user, permission_name): """Check if a user has a given permission.""" if user and user.is_superuser: return True return permission_name in available_perm_names(user)
0.004878
def make_form_or_formset_fields_not_required(form_or_formset): """Take a Form or FormSet and set all fields to not required.""" if isinstance(form_or_formset, BaseFormSet): for single_form in form_or_formset: make_form_fields_not_required(single_form) else: make_form_fields_not_required(form_or_formset)
0.002907
def add_warning(self, exception: BELParserWarning, context: Optional[Mapping[str, Any]] = None, ) -> None: """Add a warning to the internal warning log in the graph, with optional context information. :param exception: The exception that occurred :param context: The context from the parser when the exception occurred """ self.warnings.append(( self.path, exception, {} if context is None else context, ))
0.010889
def _get_xml_value(value): """Convert an individual value to an XML string. Calls itself recursively for dictionaries and lists. Uses some heuristics to convert the data to XML: - In dictionaries, the keys become the tag name. - In lists the tag name is 'child' with an order-attribute giving the list index. - All other values are included as is. All values are escaped to fit into the XML document. :param value: The value to convert to XML. :type value: Any valid Python value :rtype: string """ retval = [] if isinstance(value, dict): for key, value in value.iteritems(): retval.append('<' + xml_escape(str(key)) + '>') retval.append(_get_xml_value(value)) retval.append('</' + xml_escape(str(key)) + '>') elif isinstance(value, list): for key, value in enumerate(value): retval.append('<child order="' + xml_escape(str(key)) + '">') retval.append(_get_xml_value(value)) retval.append('</child>') elif isinstance(value, bool): retval.append(xml_escape(str(value).lower())) elif isinstance(value, unicode): retval.append(xml_escape(value.encode('utf-8'))) else: retval.append(xml_escape(str(value))) return "".join(retval)
0.00075
def _construct_api_path(self, version): """Returns valid base API path based on version given The base API path for the URL is different depending on UniFi server version. Default returns correct path for latest known stable working versions. """ V2_PATH = 'api/' V3_PATH = 'api/s/' + self.site_id + '/' if(version == 'v2'): return V2_PATH if(version == 'v3'): return V3_PATH if(version == 'v4'): return V3_PATH if(version == 'v5'): return V3_PATH else: return V2_PATH
0.00639
def depth_july_average_ground_temperature(self, value=None): """Corresponds to IDD Field `depth_july_average_ground_temperature` Args: value (float): value for IDD Field `depth_july_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_july_average_ground_temperature`'.format(value)) self._depth_july_average_ground_temperature = value
0.004768
def _init_level_set(init_level_set, image_shape): """Auxiliary function for initializing level sets with a string. If `init_level_set` is not a string, it is returned as is. """ if isinstance(init_level_set, str): if init_level_set == 'checkerboard': res = checkerboard_level_set(image_shape) elif init_level_set == 'circle': res = circle_level_set(image_shape) else: raise ValueError("`init_level_set` not in " "['checkerboard', 'circle']") else: res = init_level_set return res
0.001664
def create_option_from_value(tag, value): """ Set DHCP option with human friendly value """ dhcp_option.parser() fake_opt = dhcp_option(tag = tag) for c in dhcp_option.subclasses: if c.criteria(fake_opt): if hasattr(c, '_parse_from_value'): return c(tag = tag, value = c._parse_from_value(value)) else: raise ValueError('Invalid DHCP option ' + str(tag) + ": " + repr(value)) else: fake_opt._setextra(_tobytes(value)) return fake_opt
0.014787
def _add_unitary_single(self, gate, qubit): """Apply an arbitrary 1-qubit unitary matrix. Args: gate (matrix_like): a single qubit gate matrix qubit (int): the qubit to apply gate to """ # Compute einsum index string for 1-qubit matrix multiplication indexes = einsum_vecmul_index([qubit], self._number_of_qubits) # Convert to complex rank-2 tensor gate_tensor = np.array(gate, dtype=complex) # Apply matrix multiplication self._statevector = np.einsum(indexes, gate_tensor, self._statevector, dtype=complex, casting='no')
0.002729
def sanitize_label(text): """Remove characters not accepted in labels key This replaces any non-word characters (alphanumeric or underscore), with an underscore. It also ensures that the first character is a letter by prepending with 'key' if necessary, and trims the text to 100 characters. """ if not text: return text text = re.sub('\\W+', '_', text) if text[0] in string.digits: text = "key_" + text elif text[0] == '_': text = "key" + text return text[:100]
0.001898
def transform_predict(self, X, y): """ Apply transforms to the data, and predict with the final estimator. Unlike predict, this also returns the transformed target Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : array-like target Returns ------- yt : array-like Transformed target yp : array-like Predicted transformed target """ Xt, yt, _ = self._transform(X, y) yp = self._final_estimator.predict(Xt) return yt, yp
0.002976
def delete_session_entity_type( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ Deletes the specified session entity type. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.SessionEntityTypesClient() >>> >>> name = client.session_entity_type_path('[PROJECT]', '[SESSION]', '[ENTITY_TYPE]') >>> >>> client.delete_session_entity_type(name) Args: name (str): Required. The name of the entity type to delete. Format: ``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if 'delete_session_entity_type' not in self._inner_api_calls: self._inner_api_calls[ 'delete_session_entity_type'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_session_entity_type, default_retry=self._method_configs[ 'DeleteSessionEntityType'].retry, default_timeout=self._method_configs[ 'DeleteSessionEntityType'].timeout, client_info=self._client_info, ) request = session_entity_type_pb2.DeleteSessionEntityTypeRequest( name=name, ) self._inner_api_calls['delete_session_entity_type']( request, retry=retry, timeout=timeout, metadata=metadata)
0.002335
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): """Clones the object, overriding data and parameters. Args: data: New data replacing the existing data shared_data (bool, optional): Whether to use existing data new_type (optional): Type to cast object to *args: Additional arguments to pass to constructor **overrides: New keyword arguments to pass to constructor Returns: Cloned object """ if 'datatype' not in overrides: datatypes = [self.interface.datatype] + self.datatype overrides['datatype'] = list(util.unique_iterator(datatypes)) return super(Dataset, self).clone(data, shared_data, new_type, *args, **overrides)
0.005006
def playlist_song_add( self, song, playlist, *, after=None, before=None, index=None, position=None ): """Add a song to a playlist. Note: * Provide no optional arguments to add to end. * Provide playlist song dicts for ``after`` and/or ``before``. * Provide a zero-based ``index``. * Provide a one-based ``position``. Songs are inserted *at* given index or position. It's also possible to add to the end by using ``len(songs)`` for index or ``len(songs) + 1`` for position. Parameters: song (dict): A song dict. playlist (dict): A playlist dict. after (dict, Optional): A playlist song dict ``songs`` will follow. before (dict, Optional): A playlist song dict ``songs`` will precede. index (int, Optional): The zero-based index position to insert ``song``. position (int, Optional): The one-based position to insert ``song``. Returns: dict: Playlist dict including songs. """ prev, next_ = get_ple_prev_next( self.playlist_songs(playlist), after=after, before=before, index=index, position=position ) if 'storeId' in song: song_id = song['storeId'] elif 'trackId' in song: song_id = song['trackId'] else: song_id = song['id'] mutation = mc_calls.PlaylistEntriesBatch.create( song_id, playlist['id'], preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id') ) self._call(mc_calls.PlaylistEntriesBatch, mutation) return self.playlist(playlist['id'], include_songs=True)
0.033179
def best_policy(mdp, U): """Given an MDP and a utility function U, determine the best policy, as a mapping from state to action. (Equation 17.4)""" pi = {} for s in mdp.states: pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp)) return pi
0.006993
def conn_is_open(conn): """Tests sqlite3 connection, returns T/F""" if conn is None: return False try: get_table_names(conn) return True # # Idea taken from # # http: // stackoverflow.com / questions / 1981392 / how - to - tell - if -python - sqlite - database - connection - or -cursor - is -closed # conn.execute("select id from molecule limit 1") # return True except sqlite3.ProgrammingError as e: # print(e) return False
0.003774
def database_caller_creator(self, name=None): '''creates a sqlite3 db returns the related connection object which will be later used to spawn the cursor ''' try: if name: database = name + '.db' else: database = 'sqlite_' + str_generator(self) + '.db' conn = sqlite3.connect(database) logger.warning('Database created and opened succesfully: %s' % database, extra=d) except Exception: logger.error('Failed to connect or create database / sqlite3', extra=d) raise DbConnException return conn
0.006126
def is_probably_packed( pe ): """Returns True is there is a high likelihood that a file is packed or contains compressed data. The sections of the PE file will be analyzed, if enough sections look like containing compressed data and the data makes up for more than 20% of the total file size, the function will return True. """ # Calculate the lenth of the data up to the end of the last section in the # file. Overlay data won't be taken into account # total_pe_data_length = len( pe.trim() ) # Assume that the file is packed when no data is available if not total_pe_data_length: return True has_significant_amount_of_compressed_data = False # If some of the sections have high entropy and they make for more than 20% of the file's size # it's assumed that it could be an installer or a packed file total_compressed_data = 0 for section in pe.sections: s_entropy = section.get_entropy() s_length = len( section.get_data() ) # The value of 7.4 is empircal, based on looking at a few files packed # by different packers if s_entropy > 7.4: total_compressed_data += s_length if ((1.0 * total_compressed_data)/total_pe_data_length) > .2: has_significant_amount_of_compressed_data = True return has_significant_amount_of_compressed_data
0.006508
def new_page(self, page_number, new_chapter, **kwargs): """Called by :meth:`render` with the :class:`Chain`s that need more :class:`Container`s. This method should create a new :class:`Page` which contains a container associated with `chain`.""" right_template = self.document.get_page_template(self, 'right') left_template = self.document.get_page_template(self, 'left') page_template = right_template if page_number % 2 else left_template return page_template.page(self, page_number, self.chain, new_chapter, **kwargs)
0.004918
def _start_machine(machine, session): ''' Helper to try and start machines @param machine: @type machine: IMachine @param session: @type session: ISession @return: @rtype: IProgress or None ''' try: return machine.launchVMProcess(session, '', '') except Exception as e: log.debug(e.message, exc_info=True) return None
0.002591
def parse_dunder_all(self): """Parse the __all__ definition in a module.""" assert self.current.value == '__all__' self.consume(tk.NAME) # More than one __all__ definition means we ignore all __all__. if self.dunder_all is not None or self.dunder_all_error is not None: self.dunder_all = None self.dunder_all_error = 'Could not evaluate contents of __all__. ' return if self.current.value != '=': self.dunder_all_error = 'Could not evaluate contents of __all__. ' return self.consume(tk.OP) is_surrounded = False if self.current.value in '([': is_surrounded = True self.consume(tk.OP) dunder_all_content = "(" while True: if is_surrounded and self.current.value in ")]": break if self.current.kind in (tk.NEWLINE, tk.ENDMARKER): break if self.current.kind in (tk.NL, tk.COMMENT): pass elif (self.current.kind == tk.STRING or self.current.value == ','): dunder_all_content += self.current.value else: self.dunder_all_error = 'Could not evaluate contents of __all__.' return self.stream.move() if is_surrounded: self.consume(tk.OP) if not is_surrounded and ',' not in dunder_all_content: self.dunder_all_error = ( 'Unexpected token kind in __all__: {!r}. ' .format(self.current.kind)) return dunder_all_content += ")" try: self.dunder_all = eval(dunder_all_content, {}) except BaseException as e: self.dunder_all_error = ( 'Could not evaluate contents of __all__.' '\bThe value was {}. The exception was:\n{}' .format(dunder_all_content, e)) while (self.current.kind not in self.stream.LOGICAL_NEWLINES and self.current.kind != tk.ENDMARKER): if self.current.kind != tk.COMMENT: self.dunder_all = None self.dunder_all_error = 'Could not evaluate contents of __all__. ' return
0.002622
def collate(binder, ruleset=None, includes=None): """Given a ``Binder`` as ``binder``, collate the content into a new set of models. Returns the collated binder. """ html_formatter = SingleHTMLFormatter(binder, includes) raw_html = io.BytesIO(bytes(html_formatter)) collated_html = io.BytesIO() if ruleset is None: # No ruleset found, so no cooking necessary. return binder easybake(ruleset, raw_html, collated_html) collated_html.seek(0) collated_binder = reconstitute(collated_html) return collated_binder
0.001736
def write_done(self, addr): """Callback when data is received from the Crazyflie""" if not addr == self._current_addr: logger.warning( 'Address did not match when adding data to read request!') return if len(self._data) > 0: self._current_addr += self._addr_add self._write_new_chunk() return False else: logger.debug('This write request is done') return True
0.004065
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]: """Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects. """ pass
0.011236
def join(self, channel_name): """ https://api.slack.com/methods/channels.join """ self.params.update({ 'name': channel_name, }) return FromUrl('https://slack.com/api/channels.join', self._requests)(data=self.params).post()
0.010791
def init2( self, input_tube, # Read task from the input tube. output_tubes, # Send result on all the output tubes. num_workers, # Total number of workers in the stage. disable_result, # Whether to override any result with None. do_stop_task, # Whether to call doTask() on "stop" request. ): """Create *num_workers* worker objects with *input_tube* and an iterable of *output_tubes*. The worker reads a task from *input_tube* and writes the result to *output_tubes*.""" super(OrderedWorker, self).__init__() self._tube_task_input = input_tube self._tubes_result_output = output_tubes self._num_workers = num_workers # Serializes reading from input tube. self._lock_prev_input = None self._lock_next_input = None # Serializes writing to output tube. self._lock_prev_output = None self._lock_next_output = None self._disable_result = disable_result self._do_stop_task = do_stop_task
0.005566
def get_month(self): """ Return the month from the database in the format expected by the URL. """ year = super(BuildableMonthArchiveView, self).get_year() month = super(BuildableMonthArchiveView, self).get_month() fmt = self.get_month_format() return date(int(year), int(month), 1).strftime(fmt)
0.005682
def query_handler(cls, identifier, role=None): ''' Lookup the handler for the giving idetifier (descriptor_type) and role. In case it was not found return the default. Logic goes as follows: - First try to find exact match for identifier and role, - Try to find match for identifier and role=None, - Return default handler. ''' key = cls._key_for(identifier, role) handler = cls._handlers.get(key, None) if handler is None: default_for_identifier = cls._key_for(identifier, None) handler = cls._handlers.get(default_for_identifier, cls._handlers['_default']) return handler
0.002714
def snapshot(self): """Snapshot current state.""" self._snapshot = { 'name': self.name, 'volume': self.volume, 'muted': self.muted, 'latency': self.latency } _LOGGER.info('took snapshot of current state of %s', self.friendly_name)
0.009677
def connectProcess(connection, processProtocol, commandLine='', env={}, usePTY=None, childFDs=None, *args, **kwargs): """Opens a SSHSession channel and connects a ProcessProtocol to it @param connection: the SSH Connection to open the session channel on @param processProtocol: the ProcessProtocol instance to connect to the process @param commandLine: the command line to execute the process @param env: optional environment variables to set for the process @param usePTY: if set, request a PTY for the process @param childFDs: custom child file descriptors for the process """ processOpenDeferred = defer.Deferred() process = SSHProcess(processProtocol, commandLine, env, usePTY, childFDs, *args, **kwargs) process.processOpen = processOpenDeferred.callback process.openFailed = processOpenDeferred.errback connection.openChannel(process) return processOpenDeferred
0.002064
def _interrupt_read(self): """ Read data from device. """ data = self._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT) LOGGER.debug('Read data: %r', data) return data
0.009174
def create(self, opts): """Create a conf stanza.""" argv = opts.args count = len(argv) # unflagged arguments are conf, stanza, key. In this order # however, we must have a conf and stanza. cpres = True if count > 0 else False spres = True if count > 1 else False kpres = True if count > 2 else False if kpres: kvpair = argv[2].split("=") if len(kvpair) != 2: error("Creating a k/v pair requires key and value", 2) else: key, value = kvpair if not cpres and not spres: error("Conf name and stanza name is required for create", 2) name = argv[0] stan = argv[1] conf = self.service.confs[name] if not kpres: # create stanza conf.create(stan) return # create key/value pair under existing stanza stanza = conf[stan] stanza.submit({key: value})
0.004
def delete_variants(adapter, vcf_obj, case_obj, case_id=None): """Delete variants for a case in the database Args: adapter(loqusdb.plugins.Adapter) vcf_obj(iterable(dict)) ind_positions(dict) case_id(str) Returns: nr_deleted (int): Number of deleted variants """ case_id = case_id or case_obj['case_id'] nr_deleted = 0 start_deleting = datetime.now() chrom_time = datetime.now() current_chrom = None new_chrom = None for variant in vcf_obj: formated_variant = build_variant( variant=variant, case_obj=case_obj, case_id=case_id, ) if not formated_variant: continue new_chrom = formated_variant.get('chrom') adapter.delete_variant(formated_variant) nr_deleted += 1 if not current_chrom: LOG.info("Start deleting chromosome {}".format(new_chrom)) current_chrom = new_chrom chrom_time = datetime.now() continue if new_chrom != current_chrom: LOG.info("Chromosome {0} done".format(current_chrom)) LOG.info("Time to delete chromosome {0}: {1}".format( current_chrom, datetime.now()-chrom_time)) LOG.info("Start deleting chromosome {0}".format(new_chrom)) current_chrom = new_chrom return nr_deleted
0.006177
def client_authentication_required(self, request, *args, **kwargs): """Determine if client authentication is required for current request. According to the rfc6749, client authentication is required in the following cases: Resource Owner Password Credentials Grant: see `Section 4.3.2`_. Authorization Code Grant: see `Section 4.1.3`_. Refresh Token Grant: see `Section 6`_. .. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2 .. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3 .. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6 """ def is_confidential(client): if hasattr(client, 'is_confidential'): return client.is_confidential client_type = getattr(client, 'client_type', None) if client_type: return client_type == 'confidential' return True grant_types = ('password', 'authorization_code', 'refresh_token') client_id, _ = self._get_client_creds_from_request(request) if client_id and request.grant_type in grant_types: client = self._clientgetter(client_id) if client: return is_confidential(client) return False
0.001527
def save(self): """ save or update endpoint to Ariane server :return: """ LOGGER.debug("Endpoint.save") if self.parent_node is not None: if self.parent_node.id is None: self.parent_node.save() self.parent_node_id = self.parent_node.id post_payload = {} consolidated_twin_endpoints_id = [] consolidated_properties = {} consolidated_endpoint_properties = [] if self.id is not None: post_payload['endpointID'] = self.id if self.url is not None: post_payload['endpointURL'] = self.url if self.parent_node_id is not None: post_payload['endpointParentNodeID'] = self.parent_node_id if self.twin_endpoints_id is not None: consolidated_twin_endpoints_id = copy.deepcopy(self.twin_endpoints_id) if self.twin_endpoints_2_rm is not None: for twin_node_2_rm in self.twin_endpoints_2_rm: if twin_node_2_rm.id is None: twin_node_2_rm.sync() consolidated_twin_endpoints_id.remove(twin_node_2_rm.id) if self.twin_endpoints_2_add is not None: for twin_endpoint_2_add in self.twin_endpoints_2_add: if twin_endpoint_2_add.id is None: twin_endpoint_2_add.save() consolidated_twin_endpoints_id.append(twin_endpoint_2_add.id) post_payload['endpointTwinEndpointsID'] = consolidated_twin_endpoints_id if self.properties is not None: consolidated_properties = copy.deepcopy(self.properties) if self.properties_2_rm is not None: for n_property_name in self.properties_2_rm: consolidated_properties.pop(n_property_name, 0) if self.properties_2_add is not None: for n_property_tuple in self.properties_2_add: consolidated_properties[n_property_tuple[0]] = n_property_tuple[1] if consolidated_properties.__len__() > 0: for key, value in consolidated_properties.items(): consolidated_endpoint_properties.append(DriverTools.property_params(key, value)) post_payload['endpointProperties'] = consolidated_endpoint_properties params = SessionService.complete_transactional_req({'payload': json.dumps(post_payload)}) if MappingService.driver_type != DriverFactory.DRIVER_REST: params['OPERATION'] = 'createEndpoint' args = {'properties': params} else: args = { 'http_operation': 'POST', 'operation_path': '', 'parameters': params } response = EndpointService.requester.call(args) if MappingService.driver_type != DriverFactory.DRIVER_REST: response = response.get() if response.rc != 0: LOGGER.warning('Endpoint.save - Problem while saving endpoint ' + self.url + '. Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + " (" + str(response.rc) + ")") if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message: raise ArianeMappingOverloadError("Endpoint.save", ArianeMappingOverloadError.ERROR_MSG) # traceback.print_stack() else: self.id = response.response_content['endpointID'] if self.twin_endpoints_2_add is not None: for twin_endpoint_2_add in self.twin_endpoints_2_add: twin_endpoint_2_add.sync() if self.twin_endpoints_2_rm is not None: for twin_node_2_rm in self.twin_endpoints_2_rm: twin_node_2_rm.sync() if self.parent_node is not None: self.parent_node.sync() self.sync(json_obj=response.response_content) self.twin_endpoints_2_add.clear() self.twin_endpoints_2_rm.clear() self.properties_2_add.clear() self.properties_2_rm.clear()
0.002641
def is_protein_or_chemical(agent): '''Return True if the agent is a protein/protein family or chemical.''' # Default is True if agent is None if agent is None: return True dbs = set(['UP', 'HGNC', 'CHEBI', 'PFAM-DEF', 'IP', 'INDRA', 'PUBCHEM', 'CHEMBL']) agent_refs = set(agent.db_refs.keys()) if agent_refs.intersection(dbs): return True return False
0.002439
def sni2route(self, sni: SchemaNodeId, sctx: SchemaContext) -> SchemaRoute: """Translate schema node identifier to a schema route. Args: sni: Schema node identifier (absolute or relative). sctx: Schema context. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If a prefix specified in `sni` is not declared. """ nlist = sni.split("/") res = [] for qn in (nlist[1:] if sni[0] == "/" else nlist): res.append(self.translate_node_id(qn, sctx)) return res
0.003241
def c(self): """ continue """ i,node=self._get_next_eval() if node.name in self._bpset: if self.state == RUNNING: return self._break() self.state = RUNNING self._eval(node) # increment to next node self.step=i+1 if self.step < len(self._exe_order): return self.c() else: return self._finish()
0.066667
def finishLearning(self): """ Perform an internal optimization step that speeds up inference if we know learning will not be performed anymore. This call may, for example, remove all potential inputs to each column. """ if self._tfdr is None: raise RuntimeError("Temporal memory has not been initialized") if hasattr(self._tfdr, 'finishLearning'): self.resetSequenceStates() self._tfdr.finishLearning()
0.008909
def _format_value(self, value): """ Return formatted string """ value, unit = self.py3.format_units(value, unit=self.unit, si=self.si_units) return self.py3.safe_format(self.format_value, {"value": value, "unit": unit})
0.015444
def wait_all_futures(self, futures, timeout=None, event_timeout=None): # type: (Union[List[Future], Future, None], float, float) -> None """Services all futures until the list 'futures' are all done then returns. Calls relevant subscription callbacks as they come off the queue and raises an exception on abort Args: futures: a `Future` or list of all futures that the caller wants to wait for timeout: maximum total time in seconds to wait for responses, wait forever if None event_timeout: maximum time in seconds to wait between each response event, wait forever if None """ if timeout is None: end = None else: end = time.time() + timeout if not isinstance(futures, list): if futures: futures = [futures] else: futures = [] filtered_futures = [] for f in futures: if f.done(): if f.exception() is not None: raise f.exception() else: filtered_futures.append(f) while filtered_futures: if event_timeout is not None: until = time.time() + event_timeout if end is not None: until = min(until, end) else: until = end self._service_futures(filtered_futures, until)
0.002654
def mainswitch_state(sequence_number, state): """Create a mainswitch.state message""" return MessageWriter().string("mainswitch.state").uint64(sequence_number).bool(state).get()
0.015544
def _parse_mode(client, command, actor, args): """Parse a mode changes, update states, and dispatch MODE events.""" chantypes = client.server.features.get("CHANTYPES", "#") channel, _, args = args.partition(" ") args = args.lstrip(":") if channel[0] not in chantypes: # Personal modes for modes in args.split(): op, modes = modes[0], modes[1:] for mode in modes: if op == "+": client.user.modes.add(mode) else: client.user.modes.discard(mode) client.dispatch_event("MODE", actor, client.user, op, mode, None) return # channel-specific modes chan = client.server.get_channel(channel) user_modes = set(client._get_prefixes().itervalues()) chanmodes = client._get_chanmodes() list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes argument_modes = list_modes | always_arg_modes | set_arg_modes tokens = args.split() while tokens: modes, tokens = tokens[0], tokens[1:] op, modes = modes[0], modes[1:] for mode in modes: argument = None if mode in (user_modes | argument_modes): argument, tokens = tokens[0], tokens[1:] if mode in user_modes: user = client.server.get_channel(channel).members[argument] if op == "+": user.modes.add(mode) else: user.modes.discard(mode) if op == "+": if mode in (always_arg_modes | set_arg_modes): chan.modes[mode] = argument elif mode in toggle_modes: chan.modes[mode] = True else: if mode in (always_arg_modes | set_arg_modes | toggle_modes): if mode in chan.modes: del chan.modes[mode] # list-type modes (bans+exceptions, invite masks) aren't stored, # but do generate MODE events. client.dispatch_event("MODE", actor, chan, op, mode, argument)
0.000934
def sendDtmfTone(self, tones): """ Send one or more DTMF tones to the remote party (only allowed for an answered call) Note: this is highly device-dependent, and might not work :param digits: A str containining one or more DTMF tones to play, e.g. "3" or "\*123#" :raise CommandError: if the command failed/is not supported :raise InvalidStateException: if the call has not been answered, or is ended while the command is still executing """ if self.answered: dtmfCommandBase = self.DTMF_COMMAND_BASE.format(cid=self.id) toneLen = len(tones) if len(tones) > 1: cmd = ('AT{0}{1};{0}' + ';{0}'.join(tones[1:])).format(dtmfCommandBase, tones[0]) else: cmd = 'AT{0}{1}'.format(dtmfCommandBase, tones) try: self._gsmModem.write(cmd, timeout=(5 + toneLen)) except CmeError as e: if e.code == 30: # No network service - can happen if call is ended during DTMF transmission (but also if DTMF is sent immediately after call is answered) raise InterruptedException('No network service', e) elif e.code == 3: # Operation not allowed - can happen if call is ended during DTMF transmission raise InterruptedException('Operation not allowed', e) else: raise e else: raise InvalidStateException('Call is not active (it has not yet been answered, or it has ended).')
0.009721
def has_obsgroup_id(self, group_id): """ Check for the presence of the given group_id :param string group_id: The group ID :return: True if we have a :class:`meteorpi_model.ObservationGroup` with this Id, False otherwise """ self.con.execute('SELECT 1 FROM archive_obs_groups WHERE publicId = %s', (group_id,)) return len(self.con.fetchall()) > 0
0.009368
def _get_dimension_scales(self, dimension, preserve_domain=False): """ Return the list of scales corresponding to a given dimension. The preserve_domain optional argument specifies whether one should filter out the scales for which preserve_domain is set to True. """ if preserve_domain: return [ self.scales[k] for k in self.scales if ( k in self.scales_metadata and self.scales_metadata[k].get('dimension') == dimension and not self.preserve_domain.get(k) ) ] else: return [ self.scales[k] for k in self.scales if ( k in self.scales_metadata and self.scales_metadata[k].get('dimension') == dimension ) ]
0.002288
def clean(self, value): """ Call the form is_valid to ensure every value supplied is valid """ if not value: raise ValidationError( 'Error found in Form Field: Nothing to validate') data = dict((bf.name, value[i]) for i, bf in enumerate(self.form)) self.form = form = self.form.__class__(data) if not form.is_valid(): error_dict = list(form.errors.items()) raise ValidationError([ ValidationError(mark_safe('{} {}'.format( k.title(), v)), code=k) for k, v in error_dict]) # This call will ensure compress is called as expected. return super(FormField, self).clean(value)
0.002732
def find(self, name: str) -> Optional[ConnectedConsulLockInformation]: """ Finds the lock with the key name that matches that given. :param name: the lock key to match :return: the found lock """ lock = self.consul_client.kv.get(name)[1] if lock is None: return None lock_information = json.loads(lock["Value"], cls=ConsulLockInformationJSONDecoder) return ConnectedConsulLockInformation( self, lock_information.key, lock_information.session_id, lock_information.created, lock_information.seconds_to_lock, lock_information.metadata)
0.00624
def get_waveset(model): """Get optimal wavelengths for sampling a given model. Parameters ---------- model : `~astropy.modeling.Model` Model. Returns ------- waveset : array-like or `None` Optimal wavelengths. `None` if undefined. Raises ------ synphot.exceptions.SynphotError Invalid model. """ if not isinstance(model, Model): raise SynphotError('{0} is not a model.'.format(model)) if isinstance(model, _CompoundModel): waveset = model._tree.evaluate(WAVESET_OPERATORS, getter=None) else: waveset = _get_sampleset(model) return waveset
0.001534
def get_feeds_url(blog_page, root_page): """ Get the feeds urls a blog page instance. It will use an url or another depending if blog_page is the root page. """ if root_page == blog_page: return reverse('blog_page_feed') else: blog_path = strip_prefix_and_ending_slash(blog_page.specific.last_url_part) return reverse('blog_page_feed_slug', kwargs={'blog_path': blog_path})
0.004751
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'): ''' Waits for a task to be completed. task The task to wait for. instance_name The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on. task_type The type of task being performed. Useful information for debugging purposes. sleep_seconds The number of seconds to wait before querying the task again. Defaults to ``1`` second. log_level The level at which to log task information. Default is ``debug``, but ``info`` is also supported. ''' time_counter = 0 start_time = time.time() log.trace('task = %s, task_type = %s', task, task.__class__.__name__) try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) while task_info.state == 'running' or task_info.state == 'queued': if time_counter % sleep_seconds == 0: msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) time.sleep(1.0 - ((time.time() - start_time) % 1.0)) time_counter += 1 try: task_info = task.info except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) if task_info.state == 'success': msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format( instance_name, task_type, time_counter) if log_level == 'info': log.info(msg) else: log.debug(msg) # task is in a successful state return task_info.result else: # task is in an error state try: raise task_info.error except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.FileNotFound as exc: log.exception(exc) raise salt.exceptions.VMwareFileNotFoundError(exc.msg) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.fault.SystemError as exc: log.exception(exc) raise salt.exceptions.VMwareSystemError(exc.msg) except vmodl.fault.InvalidArgument as exc: log.exception(exc) exc_message = exc.msg if exc.faultMessage: exc_message = '{0} ({1})'.format(exc_message, exc.faultMessage[0].message) raise salt.exceptions.VMwareApiError(exc_message)
0.000752
def train_crf(ctx, input, output, clusters): """Train CRF CEM recognizer.""" click.echo('chemdataextractor.crf.train') sentences = [] for line in input: sentence = [] for t in line.split(): token, tag, iob = t.rsplit('/', 2) sentence.append(((token, tag), iob)) if sentence: sentences.append(sentence) tagger = CrfCemTagger(clusters=clusters) tagger.train(sentences, output)
0.002179
async def clear_reactions(self): """|coro| Removes all the reactions from the message. You need the :attr:`~Permissions.manage_messages` permission to use this. Raises -------- HTTPException Removing the reactions failed. Forbidden You do not have the proper permissions to remove all the reactions. """ await self._state.http.clear_reactions(self.channel.id, self.id)
0.006424
def _prepare_for_submission(self, tempfolder, inputdict): """ Create input files. :param tempfolder: aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: dictionary of the input nodes as they would be returned by get_inputs_dict """ parameters, code, structure, surface_sample = \ self._validate_inputs(inputdict) # Prepare CalcInfo to be returned to aiida calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.local_copy_list = [ [structure.get_file_abs_path(), structure.filename], [surface_sample.get_file_abs_path(), surface_sample.filename], ] calcinfo.remote_copy_list = [] calcinfo.retrieve_list = parameters.output_files codeinfo = CodeInfo() # will call ./code.py in.json out.json codeinfo.cmdline_params = parameters.cmdline_params( structure_file_name=structure.filename, surface_sample_file_name=surface_sample.filename) codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] return calcinfo
0.002459
def get_validate_upload_form_kwargs(self): """ Return the keyword arguments for instantiating the form for validating the upload. """ kwargs = { 'storage': self.get_storage(), 'upload_to': self.get_upload_to(), 'content_type_prefix': self.get_content_type_prefix(), 'process_to': self.get_process_to(), 'processed_key_generator': self.get_processed_key_generator(), } # ``data`` may be provided by a POST from the JavaScript if using a # DropZone form, or as querystrings on a redirect GET request from # Amazon if not. data = { 'bucket_name': self._get_bucket_name(), 'key_name': self._get_key_name(), 'etag': self._get_etag(), } kwargs.update({'data': data}) return kwargs
0.002281
def delete_file(self, target, path): """Delete a file from a device :param target: The device(s) to be targeted with this request :type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances :param path: The path on the target to the file to delete. :return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo` if the operation failed on that device :raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting """ command_block = FileSystemServiceCommandBlock() command_block.add_command(DeleteCommand(path)) root = _parse_command_response(self._sci_api.send_sci("file_system", target, command_block.get_command_string())) out_dict = {} for device in root.findall('./file_system/device'): device_id = device.get('id') error = device.find('./error') if error is not None: out_dict[device_id] = _parse_error_tree(error) else: out_dict[device_id] = DeleteCommand.parse_response(device.find('./commands/rm')) return out_dict
0.00565
def tool_classpath_from_products(products, key, scope): """Get a classpath for the tool previously registered under key in the given scope. :param products: The products of the current pants run. :type products: :class:`pants.goal.products.Products` :param string key: The key the tool configuration was registered under. :param string scope: The scope the tool configuration was registered under. :returns: A list of paths. :rtype: list """ callback_product_map = products.get_data('jvm_build_tools_classpath_callbacks') or {} callback = callback_product_map.get(scope, {}).get(key) if not callback: raise TaskError('No bootstrap callback registered for {key} in {scope}' .format(key=key, scope=scope)) return callback()
0.005013
def duration(input_filepath): ''' Show duration in seconds (0 if unavailable). Parameters ---------- input_filepath : str Path to audio file. Returns ------- duration : float Duration of audio file in seconds. If unavailable or empty, returns 0. ''' validate_input_file(input_filepath) output = soxi(input_filepath, 'D') if output == '0': logger.warning("Duration unavailable for %s", input_filepath) return float(output)
0.001969
def get_period_seconds(period): """ return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ if isinstance(period, six.string_types): try: name = 'seconds_per_' + period.lower() result = globals()[name] except KeyError: msg = "period not in (second, minute, hour, day, month, year)" raise ValueError(msg) elif isinstance(period, numbers.Number): result = period elif isinstance(period, datetime.timedelta): result = period.days * get_period_seconds('day') + period.seconds else: raise TypeError('period must be a string or integer') return result
0.031891
def _style_text(text): """ Apply some HTML highlighting to the contents. This can't be done in the """ # Escape text and apply some formatting. # To have really good highlighting, pprint would have to be re-implemented. text = escape(text) text = text.replace(' &lt;iterator object&gt;', " <small>&lt;<var>this object can be used in a 'for' loop</var>&gt;</small>") text = text.replace(' &lt;dynamic item&gt;', ' <small>&lt;<var>this object may have extra field names</var>&gt;</small>') text = text.replace(' &lt;dynamic attribute&gt;', ' <small>&lt;<var>this object may have extra field names</var>&gt;</small>') text = RE_PROXY.sub('\g<1><small>&lt;<var>proxy object</var>&gt;</small>', text) text = RE_FUNCTION.sub('\g<1><small>&lt;<var>object method</var>&gt;</small>', text) text = RE_GENERATOR.sub("\g<1><small>&lt;<var>generator, use 'for' to traverse it</var>&gt;</small>", text) text = RE_OBJECT_ADDRESS.sub('\g<1><small>&lt;<var>\g<2> object</var>&gt;</small>', text) text = RE_MANAGER.sub('\g<1><small>&lt;<var>manager, use <kbd>.all</kbd> to traverse it</var>&gt;</small>', text) text = RE_CLASS_REPR.sub('\g<1><small>&lt;<var>\g<2> class</var>&gt;</small>', text) # Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent text = RE_REQUEST_FIELDNAME.sub('\g<1>:\n <strong style="color: #222;">\g<2></strong>: ', text) text = RE_REQUEST_CLEANUP1.sub('\g<1>', text) text = RE_REQUEST_CLEANUP2.sub(')', text) return mark_safe(text)
0.014603
def tictoc(name='tictoc'): """ with tictoc('any string or not'): print 'cool~~~' cool~~~ 2015-12-30 14:39:28,458 [INFO] tictoc Elapsed: 7.10487365723e-05 secs :param name: str """ t = time.time() yield logg.info('%s Elapsed: %s secs' % (name, time.time() - t))
0.003289
def wrapModel(self, model): """ Converts application-provided model objects to L{IResource} providers. """ res = IResource(model, None) if res is None: frag = INavigableFragment(model) fragmentName = getattr(frag, 'fragmentName', None) if fragmentName is not None: fragDocFactory = self._getDocFactory(fragmentName) if fragDocFactory is not None: frag.docFactory = fragDocFactory if frag.docFactory is None: raise CouldNotLoadFromThemes(frag, self._preferredThemes()) useAthena = isinstance(frag, (athena.LiveFragment, athena.LiveElement)) return self._wrapNavFrag(frag, useAthena) else: return res
0.003759
def _wet_message_received(self, msg): """Report a wet state.""" for callback in self._dry_wet_callbacks: callback(LeakSensorState.WET) self._update_subscribers(0x13)
0.00995
def generate_template(context, config, cloudformation): """call cloudformation to generate the template (json format). :param context: :param config: :param cloudformation: :return: """ spec = inspect.getargspec(cloudformation.generate_template)[0] if len(spec) == 0: return cloudformation.generate_template() elif spec == ['context', 'config']: return cloudformation.generate_template(context, config) else: raise Exception('Arguments of \'generate_template\' not as expected: %s' % spec)
0.00361
def get_Q(self): """Get the model's estimate of Q = \mu P \mu^T We can then separately extract \mu subject to additional constraints, e.g. \mu P 1 = diag(O). """ Z = self.Z.detach().clone().numpy() O = self.O.numpy() I_k = np.eye(self.k) return O @ Z @ np.linalg.inv(I_k + Z.T @ O @ Z) @ Z.T @ O
0.019444
def in_domain(self, points): """ Returns ``True`` if all of the given points are in the domain, ``False`` otherwise. :param np.ndarray points: An `np.ndarray` of type `self.dtype`. :rtype: `bool` """ if np.all(np.isreal(points)): are_greater = np.all(np.greater_equal(points, self._min)) are_smaller = np.all(np.less_equal(points, self._max)) return are_greater and are_smaller else: return False
0.005871
def volume_list(self, search_opts=None): ''' List all block volumes ''' if self.volume_conn is None: raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn volumes = nt_ks.volumes.list(search_opts=search_opts) response = {} for volume in volumes: response[volume.display_name] = { 'name': volume.display_name, 'size': volume.size, 'id': volume.id, 'description': volume.display_description, 'attachments': volume.attachments, 'status': volume.status } return response
0.002861
def submarine(space, smooth=True, taper=20.0): """Return a 'submarine' phantom consisting in an ellipsoid and a box. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. smooth : bool, optional If ``True``, the boundaries are smoothed out. Otherwise, the function steps from 0 to 1 at the boundaries. taper : float, optional Tapering parameter for the boundary smoothing. Larger values mean faster taper, i.e. sharper boundaries. Returns ------- phantom : ``space`` element The submarine phantom in ``space``. """ if space.ndim == 2: if smooth: return _submarine_2d_smooth(space, taper) else: return _submarine_2d_nonsmooth(space) else: raise ValueError('phantom only defined in 2 dimensions, got {}' ''.format(space.ndim))
0.001053
def get_gtf_argument_parser(desc, default_field_name='gene'): """Return an argument parser with basic options for reading GTF files. Parameters ---------- desc: str Description of the ArgumentParser default_field_name: str, optional Name of field in GTF file to look for. Returns ------- `argparse.ArgumentParser` object The argument parser. """ parser = cli.get_argument_parser(desc=desc) parser.add_argument( '-a', '--annotation-file', default='-', type=str, help=textwrap.dedent("""\ Path of Ensembl gene annotation file (in GTF format). The file may be gzip'ed. If set to ``-``, read from ``stdin``.""") ) parser.add_argument( '-o', '--output-file', required=True, type=str, help=textwrap.dedent("""\ Path of output file. If set to ``-``, print to ``stdout``, and redirect logging messages to ``stderr``.""") ) #parser.add_argument( # '-s', '--species', type=str, # choices=sorted(ensembl.SPECIES_CHROMPAT.keys()), default='human', # help=textwrap.dedent("""\ # Species for which to extract genes. (This parameter is ignored # if ``--chromosome-pattern`` is specified.)""") #) parser.add_argument( '-c', '--chromosome-pattern', type=str, required=False, default=None, help=textwrap.dedent("""\ Regular expression that chromosome names have to match. [None] """) ) #parser.add_argument( # '-f', '--field-name', type=str, default=default_field_name, # help=textwrap.dedent("""\ # Rows in the GTF file that do not contain this value # in the third column are ignored.""") #) cli.add_reporting_args(parser) return parser
0.002717
def msetnx(self, *args, **kwargs): """ Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful. """ if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSETNX requires **kwargs or a single dict arg') mapping = args[0] else: mapping = kwargs if len(mapping) == 0: raise ResponseError("wrong number of arguments for 'msetnx' command") for key in mapping.keys(): if self._encode(key) in self.redis: return False for key, value in mapping.items(): self.set(key, value) return True
0.004734
def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]: """ Apply a function along an axis without loading the entire dataset in memory. Args: f_list (list of func): Function(s) that takes a numpy ndarray as argument axis (int): Axis along which to apply the function (0 = rows, 1 = columns) chunksize (int): Number of rows (columns) to load per chunk selection (array of bool): Columns (rows) to include Returns: numpy.ndarray result of function application If you supply a list of functions, the result will be a list of numpy arrays. This is more efficient than repeatedly calling map() one function at a time. """ if hasattr(f_list, '__call__'): raise ValueError("f_list must be a list of functions, not a function itself") result = [] if axis == 0: rows_per_chunk = chunksize for i in range(len(f_list)): result.append(np.zeros(self.shape[0])) ix = 0 while ix < self.shape[0]: rows_per_chunk = min(self.shape[0] - ix, rows_per_chunk) if selection is not None: chunk = self[ix:ix + rows_per_chunk, :][:, selection] else: chunk = self[ix:ix + rows_per_chunk, :] for i in range(len(f_list)): result[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk) ix = ix + rows_per_chunk elif axis == 1: cols_per_chunk = chunksize for i in range(len(f_list)): result.append(np.zeros(self.shape[1])) ix = 0 while ix < self.shape[1]: cols_per_chunk = min(self.shape[1] - ix, cols_per_chunk) if selection is not None: chunk = self[:, ix:ix + cols_per_chunk][selection, :] else: chunk = self[:, ix:ix + cols_per_chunk] for i in range(len(f_list)): result[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk) ix = ix + cols_per_chunk return result
0.026674
def one_line_desc(obj): """Get a one line description of a class.""" logger = logging.getLogger(__name__) try: doc = ParsedDocstring(obj.__doc__) return doc.short_desc except: # pylint:disable=bare-except; We don't want a misbehaving exception to break the program logger.warning("Could not parse docstring for %s", obj, exc_info=True) return ""
0.007576
def latex(self): """Gives a latex representation of the assessment.""" output = self.latex_preamble output += self._repr_latex_() output += self.latex_post return output
0.009569
def users_get_presence(self, user_id=None, username=None, **kwargs): """Gets the online presence of the a user.""" if user_id: return self.__call_api_get('users.getPresence', userId=user_id, kwargs=kwargs) elif username: return self.__call_api_get('users.getPresence', username=username, kwargs=kwargs) else: raise RocketMissingParamException('userID or username required')
0.00907
def get_media_descriptions_metadata(self): """Gets the metadata for all media descriptions. return: (osid.Metadata) - metadata for the media descriptions *compliance: mandatory -- This method must be implemented.* """ metadata = dict(self._media_descriptions_metadata) metadata.update({'existing_string_values': [t['text'] for t in self.my_osid_object_form._my_map['mediaDescriptions']]}) return Metadata(**metadata)
0.006329
def iscsi_resource(self): """Property to provide reference to bios iscsi resource instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return iscsi.ISCSIResource( self._conn, utils.get_subresource_path_by( self, ["Oem", "Hpe", "Links", "iScsi"]), redfish_version=self.redfish_version)
0.004762
def deactivate(): '''Deactivates an environment by restoring all env vars to a clean state stored prior to activating environments ''' if 'CPENV_ACTIVE' not in os.environ or 'CPENV_CLEAN_ENV' not in os.environ: raise EnvironmentError('Can not deactivate environment...') utils.restore_env_from_file(os.environ['CPENV_CLEAN_ENV'])
0.002786
def tag(value): """ Add a tag with generated id. :param value: everything working with the str() function """ rdict = load_feedback() tests = rdict.setdefault("tests", {}) tests["*auto-tag-" + str(hash(str(value)))] = str(value) save_feedback(rdict)
0.003559
def write_object_to_file(self, query_results, filename, fmt="csv", coerce_to_timestamp=False, record_time_added=False): """ Write query results to file. Acceptable formats are: - csv: comma-separated-values file. This is the default format. - json: JSON array. Each element in the array is a different row. - ndjson: JSON array but each element is new-line delimited instead of comma delimited like in `json` This requires a significant amount of cleanup. Pandas doesn't handle output to CSV and json in a uniform way. This is especially painful for datetime types. Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps. By default, this function will try and leave all values as they are represented in Salesforce. You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC). This is can be greatly beneficial as it will make all of your datetime fields look the same, and makes it easier to work with in other database environments :param query_results: the results from a SQL query :type query_results: list of dict :param filename: the name of the file where the data should be dumped to :type filename: str :param fmt: the format you want the output in. Default: 'csv' :type fmt: str :param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps. False if you want them to be left in the same format as they were in Salesforce. Leaving the value as False will result in datetimes being strings. Default: False :type coerce_to_timestamp: bool :param record_time_added: True if you want to add a Unix timestamp field to the resulting data that marks when the data was fetched from Salesforce. Default: False :type record_time_added: bool :return: the dataframe that gets written to the file. :rtype: pd.Dataframe """ fmt = fmt.lower() if fmt not in ['csv', 'json', 'ndjson']: raise ValueError("Format value is not recognized: {}".format(fmt)) # this line right here will convert all integers to floats # if there are any None/np.nan values in the column # that's because None/np.nan cannot exist in an integer column # we should write all of our timestamps as FLOATS in our final schema df = pd.DataFrame.from_records(query_results, exclude=["attributes"]) df.columns = [column.lower() for column in df.columns] # convert columns with datetime strings to datetimes # not all strings will be datetimes, so we ignore any errors that occur # we get the object's definition at this point and only consider # features that are DATE or DATETIME if coerce_to_timestamp and df.shape[0] > 0: # get the object name out of the query results # it's stored in the "attributes" dictionary # for each returned record object_name = query_results[0]['attributes']['type'] self.log.info("Coercing timestamps for: %s", object_name) schema = self.describe_object(object_name) # possible columns that can be converted to timestamps # are the ones that are either date or datetime types # strings are too general and we risk unintentional conversion possible_timestamp_cols = [ field['name'].lower() for field in schema['fields'] if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns ] df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp) if record_time_added: fetched_time = time.time() df["time_fetched_from_salesforce"] = fetched_time # write the CSV or JSON file depending on the option # NOTE: # datetimes here are an issue. # There is no good way to manage the difference # for to_json, the options are an epoch or a ISO string # but for to_csv, it will be a string output by datetime # For JSON we decided to output the epoch timestamp in seconds # (as is fairly standard for JavaScript) # And for csv, we do a string if fmt == "csv": # there are also a ton of newline objects that mess up our ability to write to csv # we remove these newlines so that the output is a valid CSV format self.log.info("Cleaning data and writing to CSV") possible_strings = df.columns[df.dtypes == "object"] df[possible_strings] = df[possible_strings].apply( lambda x: x.str.replace("\r\n", "").str.replace("\n", "") ) # write the dataframe df.to_csv(filename, index=False) elif fmt == "json": df.to_json(filename, "records", date_unit="s") elif fmt == "ndjson": df.to_json(filename, "records", lines=True, date_unit="s") return df
0.003872
def cli(wio, get_debug, debug): ''' Change setting of device. \b DOES: The config command lets you change setting of device through upd. 1. Ensure your device is Configure Mode. 2. Change your computer network to Wio's AP. \b EXAMPLE: wio config --debug [on|off], enable/disable wio debug wio config --get-debug, get wio debug status ''' if debug: if debug == "on": cmd = "ENDEBUG: 1" elif debug == "off": cmd = "ENDEBUG: 0" if not cmd: return debug_error() result = udp.send(cmd) if not result: return debug_error() click.echo("Setting success!! Device will reboot!!") elif get_debug: try: result = udp.udp_debug() except Exception as e: return get_debug_error() if result == "1": click.echo("debug: on") elif result == '0': click.echo("debug: off") else: return get_debug_error() else: click.echo("Note:") click.echo(" 1. Ensure your device is Configure Mode.") click.echo(" 2. Change your computer network to Wio's AP.") click.echo() click.echo("Use:") click.echo(" wio config --debug [on|off], enable/disable wio debug") click.echo(" wio config --get-debug, get wio debug status")
0.002753