text
stringlengths
78
104k
score
float64
0
0.18
def generate_headers(self, token): """Generate auth headers""" headers = {} token = self.encode_token(token) if self.config["header"]: headers[self.config["header"]] = token if self.config["cookie"]: headers["Set-Cookie"] = dump_cookie( self.config["cookie"], token, httponly=True, max_age=self.config["expiration"] ) return headers
0.004464
def get_char(self, offset=0): """Return the current character in the working string.""" if not self.has_space(offset=offset): return '' return self.string[self.pos + offset]
0.009524
def InitSpecCheck(self): """ make an interactive grid in which users can edit specimen names as well as which sample a specimen belongs to """ #wait = wx.BusyInfo("Please wait, working...") #wx.SafeYield() self.contribution.propagate_lithology_cols() spec_df = self.contribution.tables['specimens'].df self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER) self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD, 'specimens', 'specimens', self.panel, main_frame=self.main_frame) # redefine default 'save & exit grid' button to go to next dialog instead self.grid_frame.exitButton.SetLabel('Save and continue') grid = self.grid_frame.grid self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, self.InitSampCheck), self.grid_frame.exitButton) # add back button self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back', name='back_btn') self.backButton.Disable() self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5) # re-do fit self.grid_frame.do_fit(None, self.min_size) # center self.grid_frame.Centre() return
0.007534
def split_docstring(self, block): """Split a code block into a docstring and a body.""" try: first_line, rest_of_lines = block.split("\n", 1) except ValueError: pass else: raw_first_line = split_leading_trailing_indent(rem_comment(first_line))[1] if match_in(self.just_a_string, raw_first_line): return first_line, rest_of_lines return None, block
0.006652
def keep_vertices(self, indices_to_keep, ret_kept_faces=False): ''' Keep the given vertices and discard the others, and any faces to which they may belong. If `ret_kept_faces` is `True`, return the original indices of the kept faces. Otherwise return `self` for chaining. ''' import numpy as np if self.v is None: return indices_to_keep = np.array(indices_to_keep, dtype=np.uint32) initial_num_verts = self.v.shape[0] if self.f is not None: initial_num_faces = self.f.shape[0] f_indices_to_keep = self.all_faces_with_verts(indices_to_keep, as_boolean=True) # Why do we test this? Don't know. But we do need to test it before we # mutate self.v. vn_should_update = self.vn is not None and self.vn.shape[0] == initial_num_verts vc_should_update = self.vc is not None and self.vc.shape[0] == initial_num_verts self.v = self.v[indices_to_keep] if vn_should_update: self.vn = self.vn[indices_to_keep] if vc_should_update: self.vc = self.vc[indices_to_keep] if self.f is not None: v_old_to_new = np.zeros(initial_num_verts, dtype=int) f_old_to_new = np.zeros(initial_num_faces, dtype=int) v_old_to_new[indices_to_keep] = np.arange(len(indices_to_keep), dtype=int) self.f = v_old_to_new[self.f[f_indices_to_keep]] f_old_to_new[f_indices_to_keep] = np.arange(self.f.shape[0], dtype=int) else: # Make the code below work, in case there is somehow degenerate # segm even though there are no faces. f_indices_to_keep = [] if self.segm is not None: new_segm = {} for segm_name, segm_faces in self.segm.items(): faces = np.array(segm_faces, dtype=int) valid_faces = faces[f_indices_to_keep[faces]] if len(valid_faces): new_segm[segm_name] = f_old_to_new[valid_faces] self.segm = new_segm if new_segm else None if hasattr(self, '_raw_landmarks') and self._raw_landmarks is not None: self.recompute_landmarks() return np.nonzero(f_indices_to_keep)[0] if ret_kept_faces else self
0.002999
def __find_sync_range(self, messages, preamble_end: int, search_end: int): """ Finding the synchronization works by finding the first difference between two messages. This is performed for all messages and the most frequent first difference is chosen :type messages: list of Message :param preamble_end: End of preamble = start of search :param search_end: End of search = start of first other label """ possible_sync_pos = defaultdict(int) for i, msg in enumerate(messages): bits_i = msg.decoded_bits[preamble_end:search_end] for j in range(i, len(messages)): bits_j = messages[j].decoded_bits[preamble_end:search_end] first_diff = next((k for k, (bit_i, bit_j) in enumerate(zip(bits_i, bits_j)) if bit_i != bit_j), None) if first_diff is not None: first_diff = preamble_end + 4 * (first_diff // 4) if (first_diff - preamble_end) >= 4: possible_sync_pos[(preamble_end, first_diff)] += 1 try: sync_interval = max(possible_sync_pos, key=possible_sync_pos.__getitem__) return sync_interval except ValueError: return None
0.00546
def update_user(self, user, attributes, attribute_mapping, force_save=False): """Update a user with a set of attributes and returns the updated user. By default it uses a mapping defined in the settings constant SAML_ATTRIBUTE_MAPPING. For each attribute, if the user object has that field defined it will be set. """ if not attribute_mapping: return user user_modified = False for saml_attr, django_attrs in attribute_mapping.items(): attr_value_list = attributes.get(saml_attr) if not attr_value_list: logger.debug( 'Could not find value for "%s", not updating fields "%s"', saml_attr, django_attrs) continue for attr in django_attrs: if hasattr(user, attr): user_attr = getattr(user, attr) if callable(user_attr): modified = user_attr(attr_value_list) else: modified = self._set_attribute(user, attr, attr_value_list[0]) user_modified = user_modified or modified else: logger.debug( 'Could not find attribute "%s" on user "%s"', attr, user) logger.debug('Sending the pre_save signal') signal_modified = any( [response for receiver, response in pre_user_save.send_robust(sender=user.__class__, instance=user, attributes=attributes, user_modified=user_modified)] ) if user_modified or signal_modified or force_save: user.save() return user
0.002685
def delete_organization_course(organization, course_key): """ Removes an existing organization-course relationship from app/local state No response currently defined for this operation """ try: relationship = internal.OrganizationCourse.objects.get( organization=organization['id'], course_id=text_type(course_key), active=True, ) _inactivate_organization_course_relationship(relationship) except internal.OrganizationCourse.DoesNotExist: # If we're being asked to delete an organization-course link # that does not exist in the database then our work is done pass
0.001488
def _machinectl(cmd, output_loglevel='debug', ignore_retcode=False, use_vt=False): ''' Helper function to run machinectl ''' prefix = 'machinectl --no-legend --no-pager' return __salt__['cmd.run_all']('{0} {1}'.format(prefix, cmd), output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt)
0.00207
def get_ss_value(tag): """ Getters for data that also work with implicit transfersyntax :param tag: the tag to read """ # data is int formatted as string so convert te string first and cast to int if tag.VR == 'OB' or tag.VR == 'UN': value = struct.unpack('h', tag.value)[0] return value return tag.value
0.005731
def median_low(name, num, minimum=0, maximum=0, ref=None): ''' Calculates the low mean of the ``num`` most recent values. Requires a list. USAGE: .. code-block:: yaml foo: calc.median_low: - name: myregentry - num: 5 ''' return calc( name=name, num=num, oper='median_low', minimum=minimum, maximum=maximum, ref=ref )
0.002288
def parse_string(self): """Parse a regular unquoted string from the token stream.""" aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower()) if aliased_value is not None: return aliased_value return String(self.current_token.value)
0.00692
def getBezierPaths(self,origin=None): """ This function returns array that can be used as a Cubic Bezier Path in matplotlib. The function returns two arrays, the first one contains the verticies for each particles and has the shape (Nvert, Nparticles, 2) where Nvert is the number of verticies. The second array returned describes the type of verticies to be used with matplotlib's Patch class. Arguments --------- origin : multiple, optional If `origin` is None (default), then none of the coordinates are shifted. If `origin` is an integer then the particle with that index is used as the origin. if `origin` is equal to `com`, then the centre of mass is used as the origin. Examples -------- The following example reads in a SimulationArchive and plots the trajectories as Cubic Bezier Curves. It also plots the actual datapoints stored in the SimulationArchive. Note that the SimulationArchive needs to have enough datapoints to allow for smooth and reasonable orbits. >>> from matplotlib.path import Path >>> import matplotlib.patches as patches >>> sa = rebound.SimulationArchive("test.bin") >>> verts, codes = sa.getBezierPaths(origin=0) >>> fig, ax = plt.subplots() >>> for j in range(sa[0].N): >>> path = Path(verts[:,j,:], codes) >>> patch = patches.PathPatch(path, facecolor='none') >>> ax.add_patch(patch) >>> ax.scatter(verts[::3,j,0],verts[::3,j,1]) >>> ax.set_aspect('equal') >>> ax.autoscale_view() """ import numpy as np Npoints = len(self)*3-2 if len(self)<=1: raise Runtim Nparticles = self[0].N verts = np.zeros((Npoints,Nparticles,2)) xy = np.zeros((len(self),Nparticles,2)) if origin=="com": origin = -2 elif origin is not None: try: origin = int(origin) except: raise AttributeError("Cannot parse origin") if origin<0 or origin>=Nparticles: raise AttributeError("Origin index out of range") for i, sim in enumerate(self): if origin is None: shift = (0,0,0,0) elif origin == -2: sp = sim.calculate_com() shift = (sp.x,sp.y,sp.vx,sp.vy) else: sp = sim.particles[origin] shift = (sp.x,sp.y,sp.vx,sp.vy) for j in range(sim.N): p = sim.particles[j] if i==0: verts[0,j] = p.x-shift[0],p.y-shift[1] verts[1,j] = p.vx-shift[2], p.vy-shift[3] else: dt = sim.t-tlast # time since last snapshot verts[-2+i*3,j] = verts[-2+i*3,j]*dt/3.+verts[-3+i*3,j] verts[ 0+i*3,j] = p.x-shift[0],p.y-shift[1] verts[-1+i*3,j] = -p.vx+shift[2], -p.vy+shift[3] verts[-1+i*3,j] = verts[-1+i*3+0,j]*dt/3.+verts[ 0+i*3,j] if i!=len(self)-1: verts[+1+i*3,j] = p.vx-shift[2], p.vy-shift[3] xy[i,j] = p.x,p.y tlast = sim.t codes = np.full(Npoints,4,dtype=np.uint8) # Hardcoded 4 = matplotlib.path.Path.CURVE4 codes[0] = 1 # Hardcoded 1 = matplotlib.path.Path.MOVETO return verts, codes
0.015102
def _filter_update_security_group_rule(rule): '''Only two fields are allowed for modification: external_service and external_service_id ''' allowed = ['external_service', 'external_service_id'] filtered = {} for k, val in rule.iteritems(): if k in allowed: if isinstance(val, basestring) and \ len(val) <= GROUP_NAME_MAX_LENGTH: filtered[k] = val return filtered
0.002242
def elliptical(cls, shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre=(0., 0.), invert=False): """ Setup a mask where unmasked pixels are within an ellipse of an input arc second major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. major_axis_radius_arcsec : float The major-axis (in arc seconds) of the ellipse within which pixels are unmasked. axis_ratio : float The axis-ratio of the ellipse within which pixels are unmasked. phi : float The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive \ x-axis). centre: (float, float) The centre of the ellipse used to mask pixels. """ mask = mask_util.mask_elliptical_from_shape_pixel_scale_and_radius(shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
0.008449
def delete(sld, tld, nameserver): ''' Deletes a nameserver. Returns ``True`` if the nameserver was deleted successfully sld SLD of the domain name tld TLD of the domain name nameserver Nameserver to delete CLI Example: .. code-block:: bash salt '*' namecheap_domains_ns.delete sld tld nameserver ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.ns.delete') opts['SLD'] = sld opts['TLD'] = tld opts['Nameserver'] = nameserver response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False domainnsdeleteresult = response_xml.getElementsByTagName('DomainNSDeleteResult')[0] return salt.utils.namecheap.string_to_value(domainnsdeleteresult.getAttribute('IsSuccess'))
0.003659
def create(cls, fields=None, **fields_kwargs): """ create an instance of cls with the passed in fields and set it into the db fields -- dict -- field_name keys, with their respective values **fields_kwargs -- dict -- if you would rather pass in fields as name=val, that works also """ # NOTE -- you cannot use hydrate/populate here because populate alters modified fields instance = cls(fields, **fields_kwargs) instance.save() return instance
0.00969
def get_model_indices(cls, index): ''' Returns the list of model indices (i.e. ModelIndex objects) defined for this index. :param index: index name. ''' try: return cls._idx_name_to_mdl_to_mdlidx[index].values() except KeyError: raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
0.009592
def save(self, fname, compression='blosc'): """ Save method for the data geometry object The data will be saved as a 'geo' file, which is a dictionary containing the elements of a data geometry object saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.geo) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ if hasattr(self, 'dtype'): if 'list' in self.dtype: data = np.array(self.data) elif 'df' in self.dtype: data = {k: np.array(v).astype('str') for k, v in self.data.to_dict('list').items()} else: data = self.data # put geo vars into a dict geo = { 'data' : data, 'xform_data' : np.array(self.xform_data), 'reduce' : self.reduce, 'align' : self.align, 'normalize' : self.normalize, 'semantic' : self.semantic, 'corpus' : np.array(self.corpus) if isinstance(self.corpus, list) else self.corpus, 'kwargs' : self.kwargs, 'version' : self.version, 'dtype' : self.dtype } # if extension wasn't included, add it if fname[-4:]!='.geo': fname+='.geo' # save dd.io.save(fname, geo, compression=compression)
0.011543
def weights(self, matrix_id=0): """ Return the frame for the respective weight matrix. :param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return. :returns: an H2OFrame which represents the weight matrix identified by matrix_id """ num_weight_matrices = len(self._model_json["output"]["weights"]) if matrix_id not in list(range(num_weight_matrices)): raise ValueError( "Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} " "was requested.".format(num_weight_matrices, matrix_id)) return h2o.get_frame(self._model_json["output"]["weights"][matrix_id]["URL"].split("/")[3])
0.007732
def get_sn(unit): """θŽ·ε–ζ–‡ζœ¬θ‘Œηš„ε₯子数量 Keyword arguments: unit -- ζ–‡ζœ¬θ‘Œ Return: sn -- ε₯ζ•° """ sn = 0 match_re = re.findall(str(sentence_delimiters), unit) if match_re: string = ''.join(match_re) sn = len(string) return int(sn)
0.003195
def delete_comment(self, resource_id, ent_id): """ Delete a comment :param resource_id: ... :param ent_id: ... """ self.requester.post( '/{endpoint}/{entity}/{id}/delete_comment?id={ent_id}', endpoint=self.endpoint, entity=self.entity, id=resource_id, ent_id=ent_id )
0.005556
def update_object(self, form, obj): """ Saves the new value to the target object. """ field_name = form.cleaned_data['name'] value = form.cleaned_data['value'] setattr(obj, field_name, value) save_kwargs = {} if CAN_UPDATE_FIELDS: save_kwargs['update_fields'] = [field_name] obj.save(**save_kwargs) data = json.dumps({ 'status': 'success', }) return HttpResponse(data, content_type="application/json")
0.003953
def author_list(self): ''' The list of authors als text, for admin submission list overview.''' author_list = [self.submitter] + \ [author for author in self.authors.all().exclude(pk=self.submitter.pk)] return ",\n".join([author.get_full_name() for author in author_list])
0.012987
def export_saved_model(self, sess, export_dir, tag_set, signatures): """Convenience function to access ``TFNode.export_saved_model`` directly from this object instance.""" TFNode.export_saved_model(sess, export_dir, tag_set, signatures)
0.008197
def rsa_encrypt_key_base64_encoded(rsaprivatekey, rsapublickey, plainkey): # type: (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey, # cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey, # bytes) -> str """Encrypt a plaintext key using RSA and PKCS1_OAEP padding :param rsaprivatekey: RSA private key :type rsaprivatekey: cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey :param rsapublickey: RSA public key :type rsapublickey: cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey :param bytes plainkey: plain key :rtype: str :return: encrypted key """ if rsapublickey is None: rsapublickey = rsaprivatekey.public_key() enckey = rsapublickey.encrypt( plainkey, cryptography.hazmat.primitives.asymmetric.padding.OAEP( mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1( algorithm=cryptography.hazmat.primitives.hashes.SHA1()), algorithm=cryptography.hazmat.primitives.hashes.SHA1(), label=None)) return blobxfer.util.base64_encode_as_string(enckey)
0.00087
def init(self, settings_file="zappa_settings.json"): """ Initialize a new Zappa project by creating a new zappa_settings.json in a guided process. This should probably be broken up into few separate componants once it's stable. Testing these inputs requires monkeypatching with mock, which isn't pretty. """ # Make sure we're in a venv. self.check_venv() # Ensure that we don't already have a zappa_settings file. if os.path.isfile(settings_file): raise ClickException("This project already has a " + click.style("{0!s} file".format(settings_file), fg="red", bold=True) + "!") # Explain system. click.echo(click.style(u"""\nβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β•šβ•β•β–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ•”β• β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ•”β• β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β•β• β–ˆβ–ˆβ•”β•β•β•β• β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β•šβ•β•β•β•β•β•β•β•šβ•β• β•šβ•β•β•šβ•β• β•šβ•β• β•šβ•β• β•šβ•β•\n""", fg='green', bold=True)) click.echo(click.style("Welcome to ", bold=True) + click.style("Zappa", fg='green', bold=True) + click.style("!\n", bold=True)) click.echo(click.style("Zappa", bold=True) + " is a system for running server-less Python web applications" " on AWS Lambda and AWS API Gateway.") click.echo("This `init` command will help you create and configure your new Zappa deployment.") click.echo("Let's get started!\n") # Create Env while True: click.echo("Your Zappa configuration can support multiple production stages, like '" + click.style("dev", bold=True) + "', '" + click.style("staging", bold=True) + "', and '" + click.style("production", bold=True) + "'.") env = input("What do you want to call this environment (default 'dev'): ") or "dev" try: self.check_stage_name(env) break except ValueError: click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red")) # Detect AWS profiles and regions # If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack session = botocore.session.Session() config = session.full_config profiles = config.get("profiles", {}) profile_names = list(profiles.keys()) click.echo("\nAWS Lambda and API Gateway are only available in certain regions. "\ "Let's check to make sure you have a profile set up in one that will work.") if not profile_names: profile_name, profile = None, None click.echo("We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}" .format(click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True))) elif len(profile_names) == 1: profile_name = profile_names[0] profile = profiles[profile_name] click.echo("Okay, using profile {}!".format(click.style(profile_name, bold=True))) else: if "default" in profile_names: default_profile = [p for p in profile_names if p == "default"][0] else: default_profile = profile_names[0] while True: profile_name = input("We found the following profiles: {}, and {}. "\ "Which would you like us to use? (default '{}'): " .format( ', '.join(profile_names[:-1]), profile_names[-1], default_profile )) or default_profile if profile_name in profiles: profile = profiles[profile_name] break else: click.echo("Please enter a valid name for your AWS profile.") profile_region = profile.get("region") if profile else None # Create Bucket click.echo("\nYour Zappa deployments will need to be uploaded to a " + click.style("private S3 bucket", bold=True) + ".") click.echo("If you don't have a bucket yet, we'll create one for you too.") default_bucket = "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9)) while True: bucket = input("What do you want to call your bucket? (default '%s'): " % default_bucket) or default_bucket if is_valid_bucket_name(bucket): break click.echo(click.style("Invalid bucket name!", bold=True)) click.echo("S3 buckets must be named according to the following rules:") click.echo("""* Bucket names must be unique across all existing bucket names in Amazon S3. * Bucket names must comply with DNS naming conventions. * Bucket names must be at least 3 and no more than 63 characters long. * Bucket names must not contain uppercase characters or underscores. * Bucket names must start with a lowercase letter or number. * Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.). Bucket names can contain lowercase letters, numbers, and hyphens. Each label must start and end with a lowercase letter or a number. * Bucket names must not be formatted as an IP address (for example, 192.168.5.4). * When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL wildcard certificate only matches buckets that don't contain periods. To work around this, use HTTP or write your own certificate verification logic. We recommend that you do not use periods (".") in bucket names when using virtual hosted–style buckets. """) # Detect Django/Flask try: # pragma: no cover import django has_django = True except ImportError as e: has_django = False try: # pragma: no cover import flask has_flask = True except ImportError as e: has_flask = False print('') # App-specific if has_django: # pragma: no cover click.echo("It looks like this is a " + click.style("Django", bold=True) + " application!") click.echo("What is the " + click.style("module path", bold=True) + " to your projects's Django settings?") django_settings = None matches = detect_django_settings() while django_settings in [None, '']: if matches: click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True)) django_settings = input("Where are your project's settings? (default '%s'): " % matches[0]) or matches[0] else: click.echo("(This will likely be something like 'your_project.settings')") django_settings = input("Where are your project's settings?: ") django_settings = django_settings.replace("'", "") django_settings = django_settings.replace('"', "") else: matches = None if has_flask: click.echo("It looks like this is a " + click.style("Flask", bold=True) + " application.") matches = detect_flask_apps() click.echo("What's the " + click.style("modular path", bold=True) + " to your app's function?") click.echo("This will likely be something like 'your_module.app'.") app_function = None while app_function in [None, '']: if matches: click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True)) app_function = input("Where is your app's function? (default '%s'): " % matches[0]) or matches[0] else: app_function = input("Where is your app's function?: ") app_function = app_function.replace("'", "") app_function = app_function.replace('"', "") # TODO: Create VPC? # Memory size? Time limit? # Domain? LE keys? Region? # 'Advanced Settings' mode? # Globalize click.echo("\nYou can optionally deploy to " + click.style("all available regions", bold=True) + " in order to provide fast global service.") click.echo("If you are using Zappa for the first time, you probably don't want to do this!") global_deployment = False while True: global_type = input("Would you like to deploy this application " + click.style("globally", bold=True) + "? (default 'n') [y/n/(p)rimary]: ") if not global_type: break if global_type.lower() in ["y", "yes", "p", "primary"]: global_deployment = True break if global_type.lower() in ["n", "no"]: global_deployment = False break # The given environment name zappa_settings = { env: { 'profile_name': profile_name, 's3_bucket': bucket, 'runtime': get_venv_from_python_version(), 'project_name': self.get_project_name() } } if profile_region: zappa_settings[env]['aws_region'] = profile_region if has_django: zappa_settings[env]['django_settings'] = django_settings else: zappa_settings[env]['app_function'] = app_function # Global Region Deployment if global_deployment: additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region] # Create additional stages if global_type.lower() in ["p", "primary"]: additional_regions = [r for r in additional_regions if '-1' in r] for region in additional_regions: env_name = env + '_' + region.replace('-', '_') g_env = { env_name: { 'extends': env, 'aws_region': region } } zappa_settings.update(g_env) import json as json # hjson is fine for loading, not fine for writing. zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4) click.echo("\nOkay, here's your " + click.style("zappa_settings.json", bold=True) + ":\n") click.echo(click.style(zappa_settings_json, fg="yellow", bold=False)) confirm = input("\nDoes this look " + click.style("okay", bold=True, fg="green") + "? (default 'y') [y/n]: ") or 'yes' if confirm[0] not in ['y', 'Y', 'yes', 'YES']: click.echo("" + click.style("Sorry", bold=True, fg='red') + " to hear that! Please init again.") return # Write with open("zappa_settings.json", "w") as zappa_settings_file: zappa_settings_file.write(zappa_settings_json) if global_deployment: click.echo("\n" + click.style("Done", bold=True) + "! You can also " + click.style("deploy all", bold=True) + " by executing:\n") click.echo(click.style("\t$ zappa deploy --all", bold=True)) click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n") click.echo(click.style("\t$ zappa update --all", bold=True)) else: click.echo("\n" + click.style("Done", bold=True) + "! Now you can " + click.style("deploy", bold=True) + " your Zappa application by executing:\n") click.echo(click.style("\t$ zappa deploy %s" % env, bold=True)) click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n") click.echo(click.style("\t$ zappa update %s" % env, bold=True)) click.echo("\nTo learn more, check out our project page on " + click.style("GitHub", bold=True) + " here: " + click.style("https://github.com/Miserlou/Zappa", fg="cyan", bold=True)) click.echo("and stop by our " + click.style("Slack", bold=True) + " channel here: " + click.style("https://slack.zappa.io", fg="cyan", bold=True)) click.echo("\nEnjoy!,") click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!") return
0.0067
def check(self, defn, msg=None): """Uses the byte range in the object definition to determine the number of bytes and compares to the size defined in the type. Assumes the defn has 'type' and 'name' attributes, and a slice() method """ if isinstance(defn.type, dtype.PrimitiveType): # Check the nbytes designated in the YAML match the PDT nbytes = defn.type.nbytes defnbytes = defn.slice().stop - defn.slice().start if nbytes != defnbytes: self.messages.append(self.msg % defn.name) self.messages.append("Definition size of (" + str(defnbytes) + " bytes) does not match size of data" + " type " +str(defn.type.name) + " (" + str(nbytes) + " byte(s))") # TODO self.messages.append("TBD location message") self.valid = False
0.003043
def auth( cls, consumer_key, redirect_uri='http://example.com/', state=None, ): ''' This is a test method for verifying if oauth worked http://getpocket.com/developer/docs/authentication ''' code = cls.get_request_token(consumer_key, redirect_uri, state) auth_url = 'https://getpocket.com/auth/authorize?request_token='\ '%s&redirect_uri=%s' % (code, redirect_uri) raw_input( 'Please open %s in your browser to authorize the app and ' 'press enter:' % auth_url ) return cls.get_access_token(consumer_key, code)
0.004739
def load_resource(resource_url: str, forceupdate: bool = False): """Load BEL Resource file Forceupdate will create a new index in Elasticsearch regardless of whether an index with the resource version already exists. Args: resource_url: URL from which to download the resource to load into the BEL API forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches """ log.info(f"Loading resource {resource_url}") try: # Download resource fo = bel.utils.download_file(resource_url) if not fo: log.error(f"Could not download and open file {resource_url}") return "Failed to download resource_url" # Get metadata fo.seek(0) with gzip.open(fo, "rt") as f: metadata = json.loads(f.__next__()) if "metadata" not in metadata: log.error(f"Missing metadata entry for {resource_url}") return "Cannot load resource file - missing metadata object in first line of file" # Load resource files if metadata["metadata"]["type"] == "namespace": bel.resources.namespace.load_terms(fo, metadata, forceupdate) elif metadata["metadata"]["type"] == "ortholog": bel.resources.ortholog.load_orthologs(fo, metadata) finally: fo.close()
0.00289
def fetch(cls, channel, start, end, host=None, port=None, verbose=False, connection=None, verify=False, pad=None, allow_tape=None, scaled=None, type=None, dtype=None): """Fetch data from NDS Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the data channel for which to query start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine host : `str`, optional URL of NDS server to use, if blank will try any server (in a relatively sensible order) to get the data port : `int`, optional port number for NDS server query, must be given with `host` verify : `bool`, optional, default: `False` check channels exist in database before asking for data scaled : `bool`, optional apply slope and bias calibration to ADC data, for non-ADC data this option has no effect connection : `nds2.connection`, optional open NDS connection to use verbose : `bool`, optional print verbose output about NDS progress, useful for debugging; if ``verbose`` is specified as a string, this defines the prefix for the progress meter type : `int`, optional NDS2 channel type integer dtype : `type`, `numpy.dtype`, `str`, optional identifier for desired output data type """ return cls.DictClass.fetch( [channel], start, end, host=host, port=port, verbose=verbose, connection=connection, verify=verify, pad=pad, scaled=scaled, allow_tape=allow_tape, type=type, dtype=dtype)[str(channel)]
0.002017
def search(ont, searchterm): """ Search for things using labels """ namedGraph = get_named_graph(ont) query = """ SELECT ?c ?l WHERE {{ GRAPH <{g}> {{ ?c rdfs:label ?l FILTER regex(?l,'{s}','i') }} }} """.format(s=searchterm, g=namedGraph) bindings = run_sparql(query) return [(r['c']['value'],r['l']['value']) for r in bindings]
0.005181
def filter_alias_create_namespace(namespace): """ Filter alias name and alias command inside alias create namespace to appropriate strings. Args namespace: The alias create namespace. Returns: Filtered namespace where excessive whitespaces are removed in strings. """ def filter_string(s): return ' '.join(s.strip().split()) namespace.alias_name = filter_string(namespace.alias_name) namespace.alias_command = filter_string(namespace.alias_command) return namespace
0.003788
def get_data_object(data_id, use_data_config=True): """ Normalize the data_id and query the server. If that is unavailable try the raw ID """ normalized_data_reference = normalize_data_name(data_id, use_data_config=use_data_config) client = DataClient() data_obj = client.get(normalized_data_reference) # Try with the raw ID if not data_obj and data_id != normalized_data_reference: data_obj = client.get(data_id) return data_obj
0.004175
def _set_retain(self, v, load=False): """ Setter method for retain, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/l2vpn/evpn/retain (container) If this variable is read-only (config: false) in the source YANG file, then _set_retain is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_retain() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """retain must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__retain = t if hasattr(self, '_set'): self._set()
0.006337
def _did_receive_event(self, connection): """ Receive an event from connection """ if not self._is_running: return if connection.has_timeouted: return response = connection.response data = None if response.status_code != 200: pushcenter_logger.error("[NURESTPushCenter]: Connection failure [%s] %s" % (response.status_code, response.errors)) else: data = response.data if len(self._delegate_methods) > 0: for m in self._delegate_methods: try: m(data) except Exception as exc: pushcenter_logger.error("[NURESTPushCenter] Delegate method %s failed:\n%s" % (m, exc)) elif data: events = data['events'] self.nb_events_received += len(events) self.nb_push_received += 1 pushcenter_logger.info("[NURESTPushCenter] Received Push #%s (total=%s, latest=%s)\n%s" % (self.nb_push_received, self.nb_events_received, len(events), json.dumps(events, indent=4))) self._last_events.extend(events) if self._is_running: uuid = None if data and 'uuid' in data: uuid = data['uuid'] self._listen(uuid)
0.003674
def sign(ctx, file, account): """ Sign a message with an account """ if not file: print_message("Prompting for message. Terminate with CTRL-D", "info") file = click.get_text_stream("stdin") m = Message(file.read(), bitshares_instance=ctx.bitshares) print_message(m.sign(account), "info")
0.003096
def pdb(self): """Start the python debugger Calling pdb won't do anything in a multithread context """ if self.embed_disabled: self.warning_log("Pdb is disabled when runned from the grid runner because of the multithreading") # noqa return False if BROME_CONFIG['runner']['play_sound_on_pdb']: say(BROME_CONFIG['runner']['sound_on_pdb']) set_trace()
0.004577
def check_constraint(column, lenum, **kwargs): """ Returns a SQL CHECK constraint string given a column name and a :class:`~coaster.utils.classes.LabeledEnum`. Alembic may not detect the CHECK constraint when autogenerating migrations, so you may need to do this manually using the Python console to extract the SQL string:: from coaster.sqlalchemy import StateManager from your_app.models import YOUR_ENUM print str(StateManager.check_constraint('your_column', YOUR_ENUM).sqltext) :param str column: Column name :param LabeledEnum lenum: :class:`~coaster.utils.classes.LabeledEnum` to retrieve valid values from :param kwargs: Additional options passed to CheckConstraint """ return CheckConstraint( str(column_constructor(column).in_(lenum.keys()).compile(compile_kwargs={'literal_binds': True})), **kwargs)
0.005203
def _dims2shape(*dims): """Convert input dimensions to a shape.""" if not dims: raise ValueError("expected at least one dimension spec") shape = list() for dim in dims: if isinstance(dim, int): dim = (0, dim) if isinstance(dim, tuple) and len(dim) == 2: if dim[0] < 0: raise ValueError("expected low dimension to be >= 0") if dim[1] < 0: raise ValueError("expected high dimension to be >= 0") if dim[0] > dim[1]: raise ValueError("expected low <= high dimensions") start, stop = dim else: raise TypeError("expected dimension to be int or (int, int)") shape.append((start, stop)) return tuple(shape)
0.001285
def close(self): """Closes the connection. All outstanding futures for replies will be sent a DisconnectError. """ if self._recv_task: self._recv_task.cancel() self._disable_monitoring() if self._monitor_task and not self._monitor_task.done(): self._monitor_task.cancel() self._receiver.cancel() self._socket.close(linger=0) self._msg_router.fail_all(DisconnectError())
0.004255
def _AddProvidesEdges(self, rdf_artifact): """Add an edge for every attribute the given artifact provides. This method adds a directed edge from the artifact node to every attribute this artifact provides. Args: rdf_artifact: The artifact object. """ for attribute in rdf_artifact.provides: self._AddEdge(rdf_artifact.name, attribute)
0.005376
def sample(self, n, mass_min=0.1, mass_max=10., steps=10000, seed=None): """ Sample initial mass values between mass_min and mass_max, following the IMF distribution. ADW: Should this be `sample` or `simulate`? Parameters: ----------- n : number of samples to draw mass_min : minimum mass to sample from mass_max : maximum mass to sample from steps : number of steps for isochrone sampling seed : random seed (passed to np.random.seed) Returns: -------- mass : array of randomly sampled mass values """ if seed is not None: np.random.seed(seed) d_mass = (mass_max - mass_min) / float(steps) mass = np.linspace(mass_min, mass_max, steps) cdf = np.insert(np.cumsum(d_mass * self.pdf(mass[1:], log_mode=False)), 0, 0.) cdf = cdf / cdf[-1] f = scipy.interpolate.interp1d(cdf, mass) return f(np.random.uniform(size=n))
0.003988
def get_n_excluded_patches(self): """ Gets number of excluded patches from patches_base: #patches_base=1.0.0+THIS_NUMBER """ base = self.get_patches_base() if not base: return 0 p = base.rfind('+') if p == -1: return 0 try: n = int(base[p+1:]) return n except TypeError: return 0
0.004773
def next_message(self): '''called as each msg is ready''' msg = self.msg if msg is None: self.paused = True if self.paused: self.root.after(100, self.next_message) return try: speed = float(self.playback.get()) except: speed = 0.0 timestamp = getattr(msg, '_timestamp') now = time.strftime("%H:%M:%S", time.localtime(timestamp)) self.clock.configure(text=now) if speed == 0.0: self.root.after(200, self.next_message) else: self.root.after(int(1000*(timestamp - self.last_timestamp) / speed), self.next_message) self.last_timestamp = timestamp while True: self.msg = self.mlog.recv_match(condition=args.condition) if self.msg is None and self.mlog.f.tell() > self.filesize - 10: self.paused = True return if self.msg is not None and self.msg.get_type() != "BAD_DATA": break pos = float(self.mlog.f.tell()) / self.filesize self.slider.set(pos) self.filepos = self.slider.get() if msg.get_type() != "BAD_DATA": for m in self.mout: m.write(msg.get_msgbuf()) if msg.get_type() == "GPS_RAW": self.fdm.set('latitude', msg.lat, units='degrees') self.fdm.set('longitude', msg.lon, units='degrees') if args.gpsalt: self.fdm.set('altitude', msg.alt, units='meters') if msg.get_type() == "GPS_RAW_INT": self.fdm.set('latitude', msg.lat/1.0e7, units='degrees') self.fdm.set('longitude', msg.lon/1.0e7, units='degrees') if args.gpsalt: self.fdm.set('altitude', msg.alt/1.0e3, units='meters') if msg.get_type() == "VFR_HUD": if not args.gpsalt: self.fdm.set('altitude', msg.alt, units='meters') self.fdm.set('num_engines', 1) self.fdm.set('vcas', msg.airspeed, units='mps') if msg.get_type() == "ATTITUDE": self.fdm.set('phi', msg.roll, units='radians') self.fdm.set('theta', msg.pitch, units='radians') self.fdm.set('psi', msg.yaw, units='radians') self.fdm.set('phidot', msg.rollspeed, units='rps') self.fdm.set('thetadot', msg.pitchspeed, units='rps') self.fdm.set('psidot', msg.yawspeed, units='rps') if msg.get_type() == "RC_CHANNELS_SCALED": self.fdm.set("right_aileron", msg.chan1_scaled*0.0001) self.fdm.set("left_aileron", -msg.chan1_scaled*0.0001) self.fdm.set("rudder", msg.chan4_scaled*0.0001) self.fdm.set("elevator", msg.chan2_scaled*0.0001) self.fdm.set('rpm', msg.chan3_scaled*0.01) if msg.get_type() == 'STATUSTEXT': print("APM: %s" % msg.text) if msg.get_type() == 'SYS_STATUS': self.flightmode.configure(text=self.mlog.flightmode) if msg.get_type() == "BAD_DATA": if mavutil.all_printable(msg.data): sys.stdout.write(msg.data) sys.stdout.flush() if self.fdm.get('latitude') != 0: for f in self.fgout: f.write(self.fdm.pack())
0.001192
def setdefault(self, key, value): """We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults. """ try: super(FlaskConfigStorage, self).setdefault(key, value) except RuntimeError: self._defaults.__setitem__(key, value)
0.005764
def from_array(a, mode=None, info={}): """Create a PNG :class:`Image` object from a 2- or 3-dimensional array. One application of this function is easy PIL-style saving: ``png.from_array(pixels, 'L').save('foo.png')``. .. note : The use of the term *3-dimensional* is for marketing purposes only. It doesn't actually work. Please bear with us. Meanwhile enjoy the complimentary snacks (on request) and please use a 2-dimensional array. Unless they are specified using the *info* parameter, the PNG's height and width are taken from the array size. For a 3 dimensional array the first axis is the height; the second axis is the width; and the third axis is the channel number. Thus an RGB image that is 16 pixels high and 8 wide will use an array that is 16x8x3. For 2 dimensional arrays the first axis is the height, but the second axis is ``width*channels``, so an RGB image that is 16 pixels high and 8 wide will use a 2-dimensional array that is 16x24 (each row will be 8*3==24 sample values). *mode* is a string that specifies the image colour format in a PIL-style mode. It can be: ``'L'`` greyscale (1 channel) ``'LA'`` greyscale with alpha (2 channel) ``'RGB'`` colour image (3 channel) ``'RGBA'`` colour image with alpha (4 channel) The mode string can also specify the bit depth (overriding how this function normally derives the bit depth, see below). Appending ``';16'`` to the mode will cause the PNG to be 16 bits per channel; any decimal from 1 to 16 can be used to specify the bit depth. When a 2-dimensional array is used *mode* determines how many channels the image has, and so allows the width to be derived from the second array dimension. The array is expected to be a ``numpy`` array, but it can be any suitable Python sequence. For example, a list of lists can be used: ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact rules are: ``len(a)`` gives the first dimension, height; ``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the third dimension, unless an exception is raised in which case a 2-dimensional array is assumed. It's slightly more complicated than that because an iterator of rows can be used, and it all still works. Using an iterator allows data to be streamed efficiently. The bit depth of the PNG is normally taken from the array element's datatype (but if *mode* specifies a bitdepth then that is used instead). The array element's datatype is determined in a way which is supposed to work both for ``numpy`` arrays and for Python ``array.array`` objects. A 1 byte datatype will give a bit depth of 8, a 2 byte datatype will give a bit depth of 16. If the datatype does not have an implicit size, for example it is a plain Python list of lists, as above, then a default of 8 is used. The *info* parameter is a dictionary that can be used to specify metadata (in the same style as the arguments to the :class:``png.Writer`` class). For this function the keys that are useful are: height overrides the height derived from the array dimensions and allows *a* to be an iterable. width overrides the width derived from the array dimensions. bitdepth overrides the bit depth derived from the element datatype (but must match *mode* if that also specifies a bit depth). Generally anything specified in the *info* dictionary will override any implicit choices that this function would otherwise make, but must match any explicit ones. For example, if the *info* dictionary has a ``greyscale`` key then this must be true when mode is ``'L'`` or ``'LA'`` and false when mode is ``'RGB'`` or ``'RGBA'``. """ # We abuse the *info* parameter by modifying it. Take a copy here. # (Also typechecks *info* to some extent). info = dict(info) # Syntax check mode string. bitdepth = None try: # Assign the 'L' or 'RGBA' part to `gotmode`. if mode.startswith('L'): gotmode = 'L' mode = mode[1:] elif mode.startswith('RGB'): gotmode = 'RGB' mode = mode[3:] else: raise Error() if mode.startswith('A'): gotmode += 'A' mode = mode[1:] # Skip any optional ';' while mode.startswith(';'): mode = mode[1:] # Parse optional bitdepth if mode: try: bitdepth = int(mode) except (TypeError, ValueError): raise Error() except Error: raise Error("mode string should be 'RGB' or 'L;16' or similar.") mode = gotmode # Get bitdepth from *mode* if possible. if bitdepth: if info.get('bitdepth') and bitdepth != info['bitdepth']: raise Error("mode bitdepth (%d) should match info bitdepth (%d)." % (bitdepth, info['bitdepth'])) info['bitdepth'] = bitdepth # Fill in and/or check entries in *info*. # Dimensions. if 'size' in info: # Check width, height, size all match where used. for dimension,axis in [('width', 0), ('height', 1)]: if dimension in info: if info[dimension] != info['size'][axis]: raise Error( "info[%r] should match info['size'][%r]." % (dimension, axis)) info['width'],info['height'] = info['size'] if 'height' not in info: try: l = len(a) except TypeError: raise Error( "len(a) does not work, supply info['height'] instead.") info['height'] = l # Colour format. if 'greyscale' in info: if bool(info['greyscale']) != ('L' in mode): raise Error("info['greyscale'] should match mode.") info['greyscale'] = 'L' in mode if 'alpha' in info: if bool(info['alpha']) != ('A' in mode): raise Error("info['alpha'] should match mode.") info['alpha'] = 'A' in mode planes = len(mode) if 'planes' in info: if info['planes'] != planes: raise Error("info['planes'] should match mode.") # In order to work out whether we the array is 2D or 3D we need its # first row, which requires that we take a copy of its iterator. # We may also need the first row to derive width and bitdepth. a,t = itertools.tee(a) row = t.next() del t try: row[0][0] threed = True testelement = row[0] except (IndexError, TypeError): threed = False testelement = row if 'width' not in info: if threed: width = len(row) else: width = len(row) // planes info['width'] = width # Not implemented yet assert not threed if 'bitdepth' not in info: try: dtype = testelement.dtype # goto the "else:" clause. Sorry. except AttributeError: try: # Try a Python array.array. bitdepth = 8 * testelement.itemsize except AttributeError: # We can't determine it from the array element's # datatype, use a default of 8. bitdepth = 8 else: # If we got here without exception, we now assume that # the array is a numpy array. if dtype.kind == 'b': bitdepth = 1 else: bitdepth = 8 * dtype.itemsize info['bitdepth'] = bitdepth for thing in 'width height bitdepth greyscale alpha'.split(): assert thing in info return Image(a, info)
0.001017
def check_data(cls, name, dims, is_unstructured): """ A validation method for the data shape The default method does nothing and should be subclassed to validate the results. If the plotter accepts a :class:`InteractiveList`, it should accept a list for name and dims Parameters ---------- name: str or list of str The variable name(s) of the data dims: list of str or list of lists of str The dimension name(s) of the data is_unstructured: bool or list of bool True if the corresponding array is unstructured Returns ------- list of bool or None True, if everything is okay, False in case of a serious error, None if it is intermediate. Each object in this list corresponds to one in the given `name` list of str The message giving more information on the reason. Each object in this list corresponds to one in the given `name`""" if isinstance(name, six.string_types): name = [name] dims = [dims] is_unstructured = [is_unstructured] N = len(name) if len(dims) != N or len(is_unstructured) != N: return [False] * N, [ 'Number of provided names (%i) and dimensions ' '(%i) or unstructured information (%i) are not the same' % ( N, len(dims), len(is_unstructured))] * N return [True] * N, [''] * N
0.0013
def in_use(self): """Returns True if there is a :class:`State` object that uses this ``Flow``""" state = State.objects.filter(flow=self).first() return bool(state)
0.010256
def render_response(self): """Render as a string formatted for HTTP response headers (detailed 'Set-Cookie: ' style). """ # Use whatever renderers are defined for name and value. # (.attributes() is responsible for all other rendering.) name, value = self.name, self.value renderer = self.attribute_renderers.get('name', None) if renderer: name = renderer(name) renderer = self.attribute_renderers.get('value', None) if renderer: value = renderer(value) return '; '.join( ['{0}={1}'.format(name, value)] + [key if isinstance(val, bool) else '='.join((key, val)) for key, val in self.attributes().items()] )
0.002621
def result_list(context): """ Displays the headers and data list together """ view = context['view'] object_list = context['object_list'] headers = list(result_headers(view)) num_sorted_fields = 0 for h in headers: if h['sortable'] and h['sorted']: num_sorted_fields += 1 context.update({ 'result_headers': headers, 'num_sorted_fields': num_sorted_fields, 'results': list(results(view, object_list))}) return context
0.002
def home_handler(self, config=None, prefix=None, **args): """Handler for /home redirect path after Google auth. OAuth ends up back here from Google. Set the account cookie and close window to trigger next step. """ gresponse = self.google_get_token(config, prefix) gdata = self.google_get_data(config, gresponse) email = gdata.get('email', 'NO_EMAIL') name = gdata.get('name', 'NO_NAME') # Make and store cookie from identity, set and close window cookie = self.access_cookie(name + ' ' + email) return self.set_cookie_close_window_response(cookie)
0.00314
def _parse_text_DB(self, s): """Returns a dict of table interpreted from s. s should be Json string encoding a dict { table_name : [fields_name,...] , [rows,... ] }""" dic = self.decode_json_str(s) new_dic = {} for table_name, (header, rows) in dic.items(): newl = [{c: ligne[i] for i, c in enumerate(header)} for ligne in rows] new_dic[table_name] = newl return new_dic
0.006466
def set_font(font, section='appearance', option='font'): """Set font""" CONF.set(section, option+'/family', to_text_string(font.family())) CONF.set(section, option+'/size', float(font.pointSize())) CONF.set(section, option+'/italic', int(font.italic())) CONF.set(section, option+'/bold', int(font.bold())) FONT_CACHE[(section, option)] = font
0.002732
def is_compatible_space(space, base_space): """Check compatibility of a (power) space with a base space. Compatibility here means that the spaces are equal or ``space`` is a non-empty power space of ``base_space`` up to different data types. Parameters ---------- space, base_space : `LinearSpace` Spaces to check for compatibility. ``base_space`` cannot be a `ProductSpace`. Returns ------- is_compatible : bool ``True`` if - ``space == base_space`` or - ``space.astype(base_space.dtype) == base_space``, provided that these properties exist, or - ``space`` is a power space of nonzero length and one of the three situations applies to ``space[0]`` (recursively). Otherwise ``False``. Examples -------- Scalar spaces: >>> base = odl.rn(2) >>> is_compatible_space(odl.rn(2), base) True >>> is_compatible_space(odl.rn(3), base) False >>> is_compatible_space(odl.rn(2, dtype='float32'), base) True Power spaces: >>> is_compatible_space(odl.rn(2) ** 2, base) True >>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space False >>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base) True """ if isinstance(base_space, ProductSpace): return False if isinstance(space, ProductSpace): if not space.is_power_space: return False elif len(space) == 0: return False else: return is_compatible_space(space[0], base_space) else: if hasattr(space, 'astype') and hasattr(base_space, 'dtype'): # TODO: maybe only the shape should play a role? comp_space = space.astype(base_space.dtype) else: comp_space = space return comp_space == base_space
0.00053
def get_version_info(): """Extract version information as a dictionary from version.py.""" version_info = {} with open(os.path.join("refcycle", "version.py"), 'r') as f: version_code = compile(f.read(), "version.py", 'exec') exec(version_code, version_info) return version_info
0.003236
def fallback_findfile(filename): """ :param str filename: :return: try to find the full filename, e.g. in modules, etc :rtype: str|None """ mods = [m for m in sys.modules.values() if m and hasattr(m, "__file__") and filename in m.__file__] if len(mods) == 0: return None alt_fn = mods[0].__file__ if alt_fn[-4:-1] == ".py": alt_fn = alt_fn[:-1] # *.pyc or whatever if not os.path.exists(alt_fn) and alt_fn.startswith("./"): # Maybe current dir changed. alt_fn2 = _cur_pwd + alt_fn[1:] if os.path.exists(alt_fn2): return alt_fn2 # Try dirs of some other mods. for m in ["__main__", "better_exchook"]: if hasattr(sys.modules.get(m), "__file__"): alt_fn2 = os.path.dirname(sys.modules[m].__file__) + alt_fn[1:] if os.path.exists(alt_fn2): return alt_fn2 return alt_fn
0.002125
def append_cluster_attribute(self, index_canvas, index_cluster, data, marker = None, markersize = None): """! @brief Append cluster attribure for cluster on specific canvas. @details Attribute it is data that is visualized for specific cluster using its color, marker and markersize if last two is not specified. @param[in] index_canvas (uint): Index canvas where cluster is located. @param[in] index_cluster (uint): Index cluster whose attribute should be added. @param[in] data (list): List of points (data) that represents attribute. @param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas. @param[in] markersize (uint): Size of marker. """ cluster_descr = self.__canvas_clusters[index_canvas][index_cluster] attribute_marker = marker if attribute_marker is None: attribute_marker = cluster_descr.marker attribure_markersize = markersize if attribure_markersize is None: attribure_markersize = cluster_descr.markersize attribute_color = cluster_descr.color added_attribute_cluster_descriptor = canvas_cluster_descr(data, None, attribute_marker, attribure_markersize, attribute_color) self.__canvas_clusters[index_canvas][index_cluster].attributes.append(added_attribute_cluster_descriptor)
0.012899
def update(cls, first_name=None, middle_name=None, last_name=None, public_nick_name=None, address_main=None, address_postal=None, avatar_uuid=None, tax_resident=None, document_type=None, document_number=None, document_country_of_issuance=None, document_front_attachment_id=None, document_back_attachment_id=None, date_of_birth=None, place_of_birth=None, country_of_birth=None, nationality=None, language=None, region=None, gender=None, status=None, sub_status=None, legal_guardian_alias=None, session_timeout=None, card_ids=None, card_limits=None, daily_limit_without_confirmation_login=None, notification_filters=None, display_name=None, custom_headers=None): """ Modify a specific person object's data. :type user_person_id: int :param first_name: The person's first name. :type first_name: str :param middle_name: The person's middle name. :type middle_name: str :param last_name: The person's last name. :type last_name: str :param public_nick_name: The person's public nick name. :type public_nick_name: str :param address_main: The user's main address. :type address_main: object_.Address :param address_postal: The person's postal address. :type address_postal: object_.Address :param avatar_uuid: The public UUID of the user's avatar. :type avatar_uuid: str :param tax_resident: The user's tax residence numbers for different countries. :type tax_resident: list[object_.TaxResident] :param document_type: The type of identification document the person registered with. :type document_type: str :param document_number: The identification document number the person registered with. :type document_number: str :param document_country_of_issuance: The country which issued the identification document the person registered with. :type document_country_of_issuance: str :param document_front_attachment_id: The reference to the uploaded picture/scan of the front side of the identification document. :type document_front_attachment_id: int :param document_back_attachment_id: The reference to the uploaded picture/scan of the back side of the identification document. :type document_back_attachment_id: int :param date_of_birth: The person's date of birth. Accepts ISO8601 date formats. :type date_of_birth: str :param place_of_birth: The person's place of birth. :type place_of_birth: str :param country_of_birth: The person's country of birth. Formatted as a SO 3166-1 alpha-2 country code. :type country_of_birth: str :param nationality: The person's nationality. Formatted as a SO 3166-1 alpha-2 country code. :type nationality: str :param language: The person's preferred language. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type language: str :param region: The person's preferred region. Formatted as a ISO 639-1 language code plus a ISO 3166-1 alpha-2 country code, seperated by an underscore. :type region: str :param gender: The person's gender. Can be: MALE, FEMALE and UNKNOWN. :type gender: str :param status: The user status. You are not allowed to update the status via PUT. :type status: str :param sub_status: The user sub-status. Can be updated to SUBMIT if status is RECOVERY. :type sub_status: str :param legal_guardian_alias: The legal guardian of the user. Required for minors. :type legal_guardian_alias: object_.Pointer :param session_timeout: The setting for the session timeout of the user in seconds. :type session_timeout: int :param card_ids: Card ids used for centralized card limits. :type card_ids: list[object_.BunqId] :param card_limits: The centralized limits for user's cards. :type card_limits: list[object_.CardLimit] :param daily_limit_without_confirmation_login: The amount the user can pay in the session without asking for credentials. :type daily_limit_without_confirmation_login: object_.Amount :param notification_filters: The types of notifications that will result in a push notification or URL callback for this UserPerson. :type notification_filters: list[object_.NotificationFilter] :param display_name: The person's legal name. Available legal names can be listed via the 'user/{user_id}/legal-name' endpoint. :type display_name: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) request_map = { cls.FIELD_FIRST_NAME: first_name, cls.FIELD_MIDDLE_NAME: middle_name, cls.FIELD_LAST_NAME: last_name, cls.FIELD_PUBLIC_NICK_NAME: public_nick_name, cls.FIELD_ADDRESS_MAIN: address_main, cls.FIELD_ADDRESS_POSTAL: address_postal, cls.FIELD_AVATAR_UUID: avatar_uuid, cls.FIELD_TAX_RESIDENT: tax_resident, cls.FIELD_DOCUMENT_TYPE: document_type, cls.FIELD_DOCUMENT_NUMBER: document_number, cls.FIELD_DOCUMENT_COUNTRY_OF_ISSUANCE: document_country_of_issuance, cls.FIELD_DOCUMENT_FRONT_ATTACHMENT_ID: document_front_attachment_id, cls.FIELD_DOCUMENT_BACK_ATTACHMENT_ID: document_back_attachment_id, cls.FIELD_DATE_OF_BIRTH: date_of_birth, cls.FIELD_PLACE_OF_BIRTH: place_of_birth, cls.FIELD_COUNTRY_OF_BIRTH: country_of_birth, cls.FIELD_NATIONALITY: nationality, cls.FIELD_LANGUAGE: language, cls.FIELD_REGION: region, cls.FIELD_GENDER: gender, cls.FIELD_STATUS: status, cls.FIELD_SUB_STATUS: sub_status, cls.FIELD_LEGAL_GUARDIAN_ALIAS: legal_guardian_alias, cls.FIELD_SESSION_TIMEOUT: session_timeout, cls.FIELD_CARD_IDS: card_ids, cls.FIELD_CARD_LIMITS: card_limits, cls.FIELD_DAILY_LIMIT_WITHOUT_CONFIRMATION_LOGIN: daily_limit_without_confirmation_login, cls.FIELD_NOTIFICATION_FILTERS: notification_filters, cls.FIELD_DISPLAY_NAME: display_name } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id()) response_raw = api_client.put(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
0.002994
def laplacian_pyramid_image(shape, n_levels=4, sd=None): """Simple laplacian pyramid paramaterization of an image. For more flexibility, use a sum of lowres_tensor()s. Args: shape: shape of resulting image, [batch, width, height, channels]. n_levels: number of levels of laplacian pyarmid. sd: standard deviation of param initialization. Returns: tensor with shape from first argument. """ batch_dims = shape[:-3] w, h, ch = shape[-3:] pyramid = 0 for n in range(n_levels): k = 2 ** n pyramid += lowres_tensor(shape, batch_dims + (w // k, h // k, ch), sd=sd) return pyramid
0.003049
def html_to_rgb(html): """Convert the HTML color to (r, g, b). Parameters: :html: the HTML definition of the color (#RRGGBB or #RGB or a color name). Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] Throws: :ValueError: If html is neither a known color name or a hexadecimal RGB representation. >>> '(%g, %g, %g)' % html_to_rgb('#ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('#f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon') '(1, 0.980392, 0.803922)' """ html = html.strip().lower() if html[0]=='#': html = html[1:] elif html in NAMED_COLOR: html = NAMED_COLOR[html][1:] if len(html)==6: rgb = html[:2], html[2:4], html[4:] elif len(html)==3: rgb = ['%c%c' % (v,v) for v in html] else: raise ValueError("input #%s is not in #RRGGBB format" % html) return tuple(((int(n, 16) / 255.0) for n in rgb))
0.01184
async def dump_blob(elem, elem_type=None): """ Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return: """ elem_is_blob = isinstance(elem, x.BlobType) data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem if data is None or len(data) == 0: return b'' if isinstance(data, (bytes, bytearray, list)): return base64.b16encode(bytes(data)) else: raise ValueError('Unknown blob type')
0.001845
def _set_scores(self): """ Set anomaly scores using a weighted sum. """ anom_scores_ema = self.exp_avg_detector.run() anom_scores_deri = self.derivative_detector.run() anom_scores = {} for timestamp in anom_scores_ema.timestamps: # Compute a weighted anomaly score. anom_scores[timestamp] = max(anom_scores_ema[timestamp], anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (1 - DEFAULT_DETECTOR_EMA_WEIGHT)) # If ema score is significant enough, take the bigger one of the weighted score and deri score. if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT: anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp]) self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
0.005411
def p_ConstValue_float(p): """ConstValue : FLOAT""" p[0] = model.Value(type=model.Value.FLOAT, value=p[1])
0.027273
def _login_authentication(self, login, password, authz_id=""): """SASL LOGIN authentication :param login: username :param password: clear password :return: True on success, False otherwise. """ extralines = [b'"%s"' % base64.b64encode(login.encode("utf-8")), b'"%s"' % base64.b64encode(password.encode("utf-8"))] code, data = self.__send_command("AUTHENTICATE", [b"LOGIN"], extralines=extralines) if code == "OK": return True return False
0.003413
def _fix_call_activities_signavio(self, bpmn, filename): """ Signavio produces slightly invalid BPMN for call activity nodes... It is supposed to put a reference to the id of the called process in to the calledElement attribute. Instead it stores a string (which is the name of the process - not its ID, in our interpretation) in an extension tag. This code gets the name of the 'subprocess reference', finds a process with a matching name, and sets the calledElement attribute to the id of the process. """ for node in xpath_eval(bpmn)(".//bpmn:callActivity"): calledElement = node.get('calledElement', None) if not calledElement: signavioMetaData = xpath_eval(node, extra_ns={ 'signavio': SIGNAVIO_NS})( './/signavio:signavioMetaData[@metaKey="entry"]') if not signavioMetaData: raise ValidationException( 'No Signavio "Subprocess reference" specified.', node=node, filename=filename) subprocess_reference = one(signavioMetaData).get('metaValue') matches = [] for b in list(self.bpmn.values()): for p in xpath_eval(b)(".//bpmn:process"): if (p.get('name', p.get('id', None)) == subprocess_reference): matches.append(p) if not matches: raise ValidationException( "No matching process definition found for '%s'." % subprocess_reference, node=node, filename=filename) if len(matches) != 1: raise ValidationException( "More than one matching process definition " " found for '%s'." % subprocess_reference, node=node, filename=filename) node.set('calledElement', matches[0].get('id'))
0.000955
def one_cycle_scheduler(lr_max:float, **kwargs:Any)->OneCycleScheduler: "Instantiate a `OneCycleScheduler` with `lr_max`." return partial(OneCycleScheduler, lr_max=lr_max, **kwargs)
0.021164
def modify_folder_grant( self, folder_ids, perm, zid=None, grantee_name=None, gt='usr', flags=None ): """ :param folder_ids: list of ids :param perm: permission to grant to the user on folder(s) :param zid: id of user to grant rights :param grantee_name: email address of user to grant rights :param flags: folder's flags """ f_ids = self._return_comma_list(folder_ids) params = {'action': { 'id': f_ids, 'op': 'grant', 'grant': {'perm': perm, 'gt': gt} }} if perm == 'none': params['action']['op'] = '!grant' params['action']['zid'] = zid # Remove key to raise Zimsoap exception if no zid provided if not zid: params['action'].pop('zid', None) if grantee_name: params['action']['grant']['d'] = grantee_name elif zid: params['action']['grant']['zid'] = zid else: raise TypeError('missing zid or grantee_name') self.request('FolderAction', params)
0.002573
def add_display_name(self, display_name): """Adds a display_name. arg: display_name (displayText): the new display name raise: InvalidArgument - ``display_name`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``display_name`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if self.get_display_names_metadata().is_read_only(): raise NoAccess() if not isinstance(display_name, DisplayText): raise InvalidArgument('display_name must be instance of DisplayText') self.add_or_replace_value('displayNames', display_name)
0.004304
def update_journals(self): """773 journal translations.""" for field in record_get_field_instances(self.record, '773'): subs = field_get_subfield_instances(field) new_subs = [] for idx, (key, value) in enumerate(subs): if key == 'p': journal_name = self.get_config_item(value, "journals", allow_substring=False) journal_name = journal_name.replace('. ', '.').strip() new_subs.append((key, journal_name)) else: new_subs.append((key, value)) record_delete_field(self.record, tag="773", field_position_global=field[4]) record_add_field(self.record, "773", subfields=new_subs)
0.003788
def r_hat(self): """Get rhat data for the variable.""" _, y_vals, values, colors = self.labels_ticks_and_vals() for y, value, color in zip(y_vals, values, colors): if value.ndim != 2 or value.shape[0] < 2: yield y, None, color else: yield y, _get_split_rhat(value), color
0.005698
def _needs_elements(self, f): ''' Decorator used to make sure that there are elements prior to running the task. ''' @wraps(f) def wrapper(self, *args, **kwargs): if self.elements == None: self.getelements() return f(self, *args, **kwargs) return wrapper
0.040293
def gradients(ys, xs, grad_ys=None): """Compute gradients in dtf. Args: ys: a list of Tensors xs: a list of Tensors grad_ys: an optional list of Tensors Returns: grad_xs: a list of Tensors """ graph = ys[0].graph if not grad_ys: grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys] # figure out what Tensors are downstream of xs downstream = set(xs) for op in graph.operations: if op.has_gradient: if set(op.inputs) & downstream: downstream |= set(op.outputs) tensor_to_gradient = dict(zip(ys, grad_ys)) for op in graph.operations[::-1]: grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs] if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream): with tf.variable_scope(op.name + "/gradients"): input_grads = op.gradient(grad_outputs) for inp, grad in zip(op.inputs, input_grads): if inp in downstream and grad is not None: if inp in tensor_to_gradient: tensor_to_gradient[inp] += grad else: tensor_to_gradient[inp] = grad return [tensor_to_gradient.get(x, None) for x in xs]
0.012637
def unget_bytes(self, string): """Adds bytes to be internal buffer to be read This method is for reporting bytes from an in_stream read not initiated by this Input object""" self.unprocessed_bytes.extend(string[i:i + 1] for i in range(len(string)))
0.00625
def fetch(self, is_dl_forced=False): '''connection details for DISCO''' cxn = {} cxn['host'] = 'nif-db.crbs.ucsd.edu' cxn['database'] = 'disco_crawler' cxn['port'] = '5432' cxn['user'] = config.get_config()['user']['disco'] cxn['password'] = config.get_config()['keys'][cxn['user']] self.dataset.setFileAccessUrl( 'jdbc:postgresql://'+cxn['host']+':'+cxn['port']+'/'+cxn['database'], is_object_literal=True) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) #for testing self.fetch_from_pgdb(self.tables, cxn) self.get_files(is_dl_forced) # FIXME: Everything needed for data provenance? fstat = os.stat('/'.join((self.rawdir, 'dvp.pr_nlx_157874_1'))) filedate = datetime.utcfromtimestamp(fstat[ST_CTIME]).strftime("%Y-%m-%d") self.dataset.setVersion(filedate) return
0.004224
def _apply_axes_mapping(self, target, inverse=False): """ Apply the transposition to the target iterable. Parameters ---------- target - iterable The iterable to transpose. This would be suitable for things such as a shape as well as a list of ``__getitem__`` keys. inverse - bool Whether to map old dimension to new dimension (forward), or new dimension to old dimension (inverse). Default is False (forward). Returns ------- A tuple derived from target which has been ordered based on the new axes. """ if len(target) != self.ndim: raise ValueError('The target iterable is of length {}, but ' 'should be of length {}.'.format(len(target), self.ndim)) if inverse: axis_map = self._inverse_axes_map else: axis_map = self._forward_axes_map result = [None] * self.ndim for axis, item in enumerate(target): result[axis_map[axis]] = item return tuple(result)
0.001676
def _setup(self): """ Generates _reverse_map from _map """ ValueMap._setup(self) cls = self.__class__ if cls._map is not None: cls._size = max(self._map.keys()) + 1
0.00885
def getPiGosper(n): """Returns a list containing first n digits of Pi """ mypi = piGenGosper() result = [] if n > 0: result += [next(mypi) for i in range(n)] mypi.close() return result
0.004545
def refresh_from_server(self): """Refresh the group from the server in place.""" group = self.manager.get(id=self.id) self.__init__(self.manager, **group.data)
0.010929
def set_attributes(path, archive=None, hidden=None, normal=None, notIndexed=None, readonly=None, system=None, temporary=None): ''' Set file attributes for a file. Note that the normal attribute means that all others are false. So setting it will clear all others. Args: path (str): The path to the file or directory archive (bool): Sets the archive attribute. Default is None hidden (bool): Sets the hidden attribute. Default is None normal (bool): Resets the file attributes. Cannot be used in conjunction with any other attribute. Default is None notIndexed (bool): Sets the indexed attribute. Default is None readonly (bool): Sets the readonly attribute. Default is None system (bool): Sets the system attribute. Default is None temporary (bool): Sets the temporary attribute. Default is None Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' file.set_attributes c:\\temp\\a.txt normal=True salt '*' file.set_attributes c:\\temp\\a.txt readonly=True hidden=True ''' if not os.path.exists(path): raise CommandExecutionError('Path not found: {0}'.format(path)) if normal: if archive or hidden or notIndexed or readonly or system or temporary: raise CommandExecutionError( 'Normal attribute may not be used with any other attributes') ret = win32file.SetFileAttributes(path, 128) return True if ret is None else False # Get current attributes intAttributes = win32file.GetFileAttributes(path) # individually set or clear bits for appropriate attributes if archive is not None: if archive: intAttributes |= 0x20 else: intAttributes &= 0xFFDF if hidden is not None: if hidden: intAttributes |= 0x2 else: intAttributes &= 0xFFFD if notIndexed is not None: if notIndexed: intAttributes |= 0x2000 else: intAttributes &= 0xDFFF if readonly is not None: if readonly: intAttributes |= 0x1 else: intAttributes &= 0xFFFE if system is not None: if system: intAttributes |= 0x4 else: intAttributes &= 0xFFFB if temporary is not None: if temporary: intAttributes |= 0x100 else: intAttributes &= 0xFEFF ret = win32file.SetFileAttributes(path, intAttributes) return True if ret is None else False
0.000754
def commit_index(self, message): """ Commit the current index. :param message: str :return: str the generated commit sha """ tree_id = self.write_tree() args = ['commit-tree', tree_id, '-p', self.ref_head] # todo, this can end in a race-condition with other processes adding commits commit = self.command_exec(args, message)[0].decode('utf-8').strip() self.command_exec(['update-ref', self.ref_head, commit]) return commit
0.005848
def import_str(self, csv, params={}): """ Imports a CSV string. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.create """ if not self._canvas_account_id: raise MissingAccountID() params["import_type"] = SISImportModel.CSV_IMPORT_TYPE url = SIS_IMPORTS_API.format( self._canvas_account_id) + ".json{}".format(self._params(params)) headers = {"Content-Type": "text/csv"} return SISImportModel(data=self._post_resource(url, headers, csv))
0.003534
def initialise_logging(level: str, target: str, short_format: bool): """Initialise basic logging facilities""" try: log_level = getattr(logging, level) except AttributeError: raise SystemExit( "invalid log level %r, expected any of 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'" % level ) handler = create_handler(target=target) logging.basicConfig( level=log_level, format='%(asctime)-15s (%(process)d) %(message)s' if not short_format else '%(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[handler] )
0.004983
def _not_empty(self, view, slice_): """Checks if the density is too low. """ img2d = self._get_axis(self._image, view, slice_) return (np.count_nonzero(img2d) / img2d.size) > self._min_density
0.009217
def STORE_SLICE_1(self, instr): 'obj[lower:] = expr' lower = self.ast_stack.pop() value = self.ast_stack.pop() expr = self.ast_stack.pop() kw = dict(lineno=instr.lineno, col_offset=0) slice = _ast.Slice(lower=lower, step=None, upper=None, **kw) subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw) assign = _ast.Assign(targets=[subscr], value=expr, **kw) self.ast_stack.append(assign)
0.006263
def get_data_size(self, sport, plan, from_day, from_month, from_year, to_day, to_month, to_year, event_id=None, event_name=None, market_types_collection=None, countries_collection=None, file_type_collection=None, session=None): """ Returns a dictionary of file count and combines size files. :param sport: sport to filter data for. :param plan: plan type to filter for, Basic Plan, Advanced Plan or Pro Plan. :param from_day: day of month to start data from. :param from_month: month to start data from. :param from_year: year to start data from. :param to_day: day of month to end data at. :param to_month: month to end data at. :param to_year: year to end data at. :param event_id: id of a specific event to get data for. :param event_name: name of a specific event to get data for. :param market_types_collection: list of specific marketTypes to filter for. :param countries_collection: list of countries to filter for. :param file_type_collection: list of file types. :param requests.session session: Requests session object :rtype: dict """ params = clean_locals(locals()) method = 'GetAdvBasketDataSize' (response, elapsed_time) = self.request(method, params, session) return response
0.005666
def is_valid_query(self, query): """ Return True if the search query is valid. e.g.: * not empty, * not too short, """ # No query, no item if not query: return False # Query is too short, no item if len(query) < self.get_query_size_min(): return False return True
0.005319
def traverse(self, root = None, display = None, q = Stack()): ''' API: traverse(self, root = None, display = None, q = Stack()) Description: Traverses tree starting from node named root. Used strategy (BFS, DFS) is controlled by argument q. It is a DFS if q is Queue(), BFS if q is Stack(). Starts search from root argument if it is given. Starts from root node of the tree otherwise. Pre: Node indicated by root argument should exist. Input: root: Starting node name. display: Display argument. q: Queue data structure instance. It is either a Stack() or Queue(). ''' if root == None: root = self.root if display == None: display = self.attr['display'] if isinstance(q, Queue): addToQ = q.enqueue removeFromQ = q.dequeue elif isinstance(q, Stack): addToQ = q.push removeFromQ = q.pop addToQ(root) while not q.isEmpty(): current = removeFromQ() #print current if display: self.display(highlight = [current]) for n in self.get_children(current): addToQ(n)
0.009924
def set_ssl(self, for_hosts=[], key_file=None, cert_file=None, ca_certs=None, cert_validator=None, ssl_version=DEFAULT_SSL_VERSION, password=None): """ Sets up SSL configuration for the given hosts. This ensures socket is wrapped in a SSL connection, raising an exception if the SSL module can't be found. :param for_hosts: a list of tuples describing hosts this SSL configuration should be applied to :param cert_file: the path to a X509 certificate :param key_file: the path to a X509 key file :param ca_certs: the path to the a file containing CA certificates to validate the server against. If this is not set, server side certificate validation is not done. :param cert_validator: function which performs extra validation on the client certificate, for example checking the returned certificate has a commonName attribute equal to the hostname (to avoid man in the middle attacks). The signature is: (OK, err_msg) = validation_function(cert, hostname) where OK is a boolean, and cert is a certificate structure as returned by ssl.SSLSocket.getpeercert() :param ssl_version: SSL protocol to use for the connection. This should be one of the PROTOCOL_x constants provided by the ssl module. The default is ssl.PROTOCOL_TLSv1 """ if not ssl: raise Exception("SSL connection requested, but SSL library not found") for host_port in for_hosts: self.__ssl_params[host_port] = dict(key_file=key_file, cert_file=cert_file, ca_certs=ca_certs, cert_validator=cert_validator, ssl_version=ssl_version, password=password)
0.009116
def order_enum(field, members): """ Make an annotation value that can be used to sort by an enum field. ``field`` The name of an EnumChoiceField. ``members`` An iterable of Enum members in the order to sort by. Use like: .. code-block:: python desired_order = [MyEnum.bar, MyEnum.baz, MyEnum.foo] ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', desired_order))\\ .order_by('my_order') As Enums are iterable, ``members`` can be the Enum itself if the default ordering is desired: .. code-block:: python ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', MyEnum))\\ .order_by('my_order') .. warning:: On Python 2, Enums may not have a consistent order, depending upon how they were defined. You can set an explicit order using ``__order__`` to fix this. See the ``enum34`` docs for more information. Any enum members not present in the list of members will be sorted to the end of the results. """ members = list(members) return Case( *(When(**{field: member, 'then': i}) for i, member in enumerate(members)), default=len(members), output_field=IntegerField())
0.000769
def locateChild(self, context, segments): """ Unwrap the wrapped resource if HTTPS is already being used, otherwise wrap it in a helper which will preserve the wrapping all the way down to the final resource. """ request = IRequest(context) if request.isSecure(): return self.wrappedResource, segments return _SecureWrapper(self.urlGenerator, self.wrappedResource), segments
0.006667
def lookup_job_tasks(self, statuses, user_ids=None, job_ids=None, job_names=None, task_ids=None, task_attempts=None, labels=None, create_time_min=None, create_time_max=None, max_tasks=0, page_size=0): """Yields operations based on the input criteria. If any of the filters are empty or {'*'}, then no filtering is performed on that field. Filtering by both a job id list and job name list is unsupported. Args: statuses: {'*'}, or a list of job status strings to return. Valid status strings are 'RUNNING', 'SUCCESS', 'FAILURE', or 'CANCELED'. user_ids: a list of ids for the user(s) who launched the job. job_ids: a list of job ids to return. job_names: a list of job names to return. task_ids: a list of specific tasks within the specified job(s) to return. task_attempts: a list of specific attempts within the specified tasks(s) to return. labels: a list of LabelParam with user-added labels. All labels must match the task being fetched. create_time_min: a timezone-aware datetime value for the earliest create time of a task, inclusive. create_time_max: a timezone-aware datetime value for the most recent create time of a task, inclusive. max_tasks: the maximum number of job tasks to return or 0 for no limit. page_size: the page size to use for each query to the pipelins API. Raises: ValueError: if both a job id list and a job name list are provided Yeilds: Genomics API Operations objects. """ # Build a filter for operations to return ops_filter = self._build_query_filter( statuses, user_ids, job_ids, job_names, task_ids, task_attempts, labels, create_time_min, create_time_max) # Execute the operations.list() API to get batches of operations to yield page_token = None tasks_yielded = 0 while True: # If max_tasks is set, let operations.list() know not to send more than # we need. max_to_fetch = None if max_tasks: max_to_fetch = max_tasks - tasks_yielded ops, page_token = self._operations_list(ops_filter, max_to_fetch, page_size, page_token) for op in ops: yield op tasks_yielded += 1 assert (max_tasks >= tasks_yielded or not max_tasks) if not page_token or 0 < max_tasks <= tasks_yielded: break
0.007692
def company(random=random, *args, **kwargs): """ Produce a company name >>> mock_random.seed(0) >>> company(random=mock_random) 'faculty of applied chimp' >>> mock_random.seed(1) >>> company(random=mock_random) 'blistersecret studios' >>> mock_random.seed(2) >>> company(random=mock_random) 'pooppooppoop studios' >>> mock_random.seed(3) >>> company(random=mock_random) 'britchesshop' >>> mock_random.seed(4) >>> company(random=mock_random, capitalize=True) 'Mystery Studies Department' >>> mock_random.seed(5) >>> company(random=mock_random, slugify=True) 'the-law-offices-of-magnificentslap-boatbench-and-smellmouse' """ return random.choice([ "faculty of applied {noun}", "{noun}{second_noun} studios", "{noun}{noun}{noun} studios", "{noun}shop", "{noun} studies department", "the law offices of {lastname}, {noun}, and {other_lastname}", "{country} ministry of {plural}", "{city} municipal {noun} department", "{city} plumbing", "department of {noun} studies", "{noun} management systems", "{plural} r us", "inter{verb}", "the {noun} warehouse", "integrated {noun} and {second_noun}", "the {noun} and {second_noun} pub", "e-cyber{verb}", "{adjective}soft", "{domain} Inc.", "{thing} incorporated", "{noun}co", ]).format(noun=noun(random=random), plural=plural(random=random), country=country(random=random), city=city(random=random), adjective=adjective(random=random), lastname=lastname(random=random), other_lastname=lastname(random=random), domain=domain(random=random), second_noun=noun(random=random), verb=verb(random=random), thing=thing(random=random))
0.000509
def feature_list(): """ Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc Returns ------- list List of :class:`.Feature` objects """ lib_features_c_array = ctypes.POINTER(Feature)() lib_features_size = ctypes.c_size_t() check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size))) features = [lib_features_c_array[i] for i in range(lib_features_size.value)] return features
0.007576
def event_log_filter_between_date(start, end, utc): """betweenDate Query filter that SoftLayer_EventLog likes :param string start: lower bound date in mm/dd/yyyy format :param string end: upper bound date in mm/dd/yyyy format :param string utc: utc offset. Defaults to '+0000' """ return { 'operation': 'betweenDate', 'options': [ {'name': 'startDate', 'value': [format_event_log_date(start, utc)]}, {'name': 'endDate', 'value': [format_event_log_date(end, utc)]} ] }
0.003676
def dir2cart(d): """ Converts a list or array of vector directions in degrees (declination, inclination) to an array of the direction in cartesian coordinates (x,y,z) Parameters ---------- d : list or array of [dec,inc] or [dec,inc,intensity] Returns ------- cart : array of [x,y,z] Examples -------- >>> pmag.dir2cart([200,40,1]) array([-0.71984631, -0.26200263, 0.64278761]) """ ints = np.ones(len(d)).transpose( ) # get an array of ones to plug into dec,inc pairs d = np.array(d) rad = np.pi/180. if len(d.shape) > 1: # array of vectors decs, incs = d[:, 0] * rad, d[:, 1] * rad if d.shape[1] == 3: ints = d[:, 2] # take the given lengths else: # single vector decs, incs = np.array(float(d[0])) * rad, np.array(float(d[1])) * rad if len(d) == 3: ints = np.array(d[2]) else: ints = np.array([1.]) cart = np.array([ints * np.cos(decs) * np.cos(incs), ints * np.sin(decs) * np.cos(incs), ints * np.sin(incs)]).transpose() return cart
0.001775
def _set_roi_mask(self, roi_mask): """Sets a new ROI mask.""" if isinstance(roi_mask, np.ndarray): # not (roi_mask is None or roi_mask=='auto'): self._verify_shape_compatibility(roi_mask, 'ROI set') self.roi_mask = roi_mask self.roi_list = np.unique(roi_mask.flatten()) np.setdiff1d(self.roi_list, cfg.background_value) else: self.roi_mask = np.ones(self.carpet.shape[:-1]) # last dim is self.fixed_dim already self.roi_list = [1, ]
0.009042
def element_statistics(tree, element_type): """ Prints the names and counts of all elements present in an `etree._ElementTree`, e.g. a SaltDocument:: SStructure: 65 SSpan: 32 SToken: 154 STextualDS: 1 Parameters ---------- tree : lxml.etree._ElementTree an ElementTree that represents a complete SaltXML document element_type : str an XML tag, e.g. 'nodes', 'edges', 'labels' """ elements = get_elements(tree, element_type) stats = defaultdict(int) for i, element in enumerate(elements): stats[get_xsi_type(element)] += 1 for (etype, count) in stats.items(): print "{0}: {1}".format(etype, count)
0.001408
def set(self, value): """ Sets the value of the object :param value: An integer :raises: ValueError - when an invalid value is passed """ if not isinstance(value, int_types): raise TypeError(unwrap( ''' %s value must be an integer, not %s ''', type_name(self), type_name(value) )) self._native = value self.contents = int_to_bytes(value, signed=False) self._header = None if self._indefinite: self._indefinite = False self.method = 0 if self._trailer != b'': self._trailer = b''
0.002725
def setCurrentRecord(self, record): """ Sets the current record for this tree to the inputed record. :param record | <orb.Table> """ if self.isLoading(): self._tempCurrentRecord = record return for i in range(self.topLevelItemCount()): if self._setCurrentRecord(self.topLevelItem(i), record): return True return False
0.008677