text
stringlengths
78
104k
score
float64
0
0.18
def render(self, context): """Render.""" try: from django.core.urlresolvers import reverse except ImportError: # pylint: disable=no-name-in-module, import-error from django.urls import reverse # Check if we have real or Django static-served media if self.static_media_url is not None: # Real. return os.path.join(self.static_media_url, self.rel_path) # Django. return reverse("cloud_browser_media", args=[self.rel_path], current_app='cloud_browser')
0.003263
def add_wic(self, old_wic, wic): """ Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties :param str old_wic: Old WIC slot :param str wic: WIC name """ new_wic = 'wic' + old_wic[-1] self.node['properties'][new_wic] = wic
0.006135
def __parse_relation(): '''Gets and parses file''' relation_filename = get_file('relation.tsv') vertice_filename = get_file('vertice.tsv') relation_textfile = open(relation_filename, 'r') vertice_textfile = open(vertice_filename, 'r') # Parse vertice: vertices = {} next(vertice_textfile) for line in vertice_textfile: tokens = line.strip().split('\t') vertices[tokens[0]] = tokens[1] next(relation_textfile) for line in relation_textfile: tokens = line.strip().split('\t') source_chebi_id = int(vertices[tokens[3]]) target_chebi_id = int(vertices[tokens[2]]) typ = tokens[1] if source_chebi_id not in __OUTGOINGS: __OUTGOINGS[source_chebi_id] = [] if target_chebi_id not in __INCOMINGS: __INCOMINGS[target_chebi_id] = [] target_relation = Relation(typ, str(target_chebi_id), tokens[4]) source_relation = Relation(typ, str(source_chebi_id), tokens[4]) __OUTGOINGS[source_chebi_id].append(target_relation) __INCOMINGS[target_chebi_id].append(source_relation)
0.000884
def _create_client(self, clt_class, url, public=True, special=False): """ Creates a client instance for the service. """ if self.service == "compute" and not special: # Novaclient requires different parameters. client = pyrax.connect_to_cloudservers(region=self.region, context=self.identity, verify_ssl=self.verify_ssl) client.identity = self.identity else: client = clt_class(self.identity, region_name=self.region, management_url=url, verify_ssl=self.verify_ssl) return client
0.006504
def try_one_generator (project, name, generator, target_type, properties, sources): """ Checks if generator invocation can be pruned, because it's guaranteed to fail. If so, quickly returns empty list. Otherwise, calls try_one_generator_really. """ if __debug__: from .targets import ProjectTarget assert isinstance(project, ProjectTarget) assert isinstance(name, basestring) or name is None assert isinstance(generator, Generator) assert isinstance(target_type, basestring) assert isinstance(properties, property_set.PropertySet) assert is_iterable_typed(sources, virtual_target.VirtualTarget) source_types = [] for s in sources: source_types.append (s.type ()) viable_source_types = viable_source_types_for_generator (generator) if source_types and viable_source_types != ['*'] and\ not set_.intersection (source_types, viable_source_types): if project.manager ().logger ().on (): id = generator.id () project.manager ().logger ().log (__name__, "generator '%s' pruned" % id) project.manager ().logger ().log (__name__, "source_types" '%s' % source_types) project.manager ().logger ().log (__name__, "viable_source_types '%s'" % viable_source_types) return [] else: return try_one_generator_really (project, name, generator, target_type, properties, sources)
0.017808
def get_grade_entries_on_date(self, from_, to): """Gets a ``GradeEntryList`` effective during the entire given date range inclusive but not confined to the date range. arg: from (osid.calendaring.DateTime): start of date range arg: to (osid.calendaring.DateTime): end of date range return: (osid.grading.GradeEntryList) - the returned ``GradeEntry`` list raise: InvalidArgument - ``from`` is greater than ``to`` raise: NullArgument - ``from or to`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.RelationshipLookupSession.get_relationships_on_date grade_entry_list = [] for grade_entry in self.get_grade_entries(): if overlap(from_, to, grade_entry.start_date, grade_entry.end_date): grade_entry_list.append(grade_entry) return objects.GradeEntryList(grade_entry_list, runtime=self._runtime)
0.003484
def hide_arp_holder_arp_entry_interfacetype_Ve_Ve(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop('arp_ip_address') interfacetype = ET.SubElement(arp_entry, "interfacetype") Ve = ET.SubElement(interfacetype, "Ve") Ve = ET.SubElement(Ve, "Ve") Ve.text = kwargs.pop('Ve') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004098
def get_unique_name(self, cursor): """get the spelling or create a unique name for a cursor""" name = '' if cursor.kind in [CursorKind.UNEXPOSED_DECL]: return '' # covers most cases name = cursor.spelling # if its a record decl or field decl and its type is unnamed if cursor.spelling == '': # a unnamed object at the root TU if (cursor.semantic_parent and cursor.semantic_parent.kind == CursorKind.TRANSLATION_UNIT): name = self.make_python_name(cursor.get_usr()) log.debug('get_unique_name: root unnamed type kind %s',cursor.kind) elif cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL, CursorKind.CLASS_DECL,CursorKind.FIELD_DECL]: name = self._make_unknown_name(cursor) log.debug('Unnamed cursor type, got name %s',name) else: log.debug('Unnamed cursor, No idea what to do') #import code #code.interact(local=locals()) return '' if cursor.kind in [CursorKind.STRUCT_DECL,CursorKind.UNION_DECL, CursorKind.CLASS_DECL]: names= {CursorKind.STRUCT_DECL: 'struct', CursorKind.UNION_DECL: 'union', CursorKind.CLASS_DECL: 'class', CursorKind.TYPE_REF: ''} name = '%s_%s'%(names[cursor.kind],name) log.debug('get_unique_name: name "%s"',name) return name
0.010658
def _iso_handler(obj): """ Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object """ if hasattr(obj, 'isoformat'): result = obj.isoformat() else: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
0.00303
def create_organization(organization): """ Inserts a new organization into app/local state given the following dictionary: { 'name': string, 'description': string } Returns an updated dictionary including a new 'id': integer field/value """ # Trust, but verify... if not organization.get('name'): exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organization_obj = serializers.deserialize_organization(organization) try: organization = internal.Organization.objects.get( name=organization_obj.name, ) # If the organization exists, but was inactivated, we can simply turn it back on if not organization.active: _activate_organization(organization_obj) except internal.Organization.DoesNotExist: organization = internal.Organization.objects.create( name=organization_obj.name, short_name=organization_obj.short_name, description=organization_obj.description, logo=organization_obj.logo, active=True ) return serializers.serialize_organization(organization)
0.003314
def binarize(x, values, threshold=None, included_in='upper'): """Binarizes the values of x. Parameters ---------- values : tuple of two floats The lower and upper value to which the inputs are mapped. threshold : float The threshold; defaults to (values[0] + values[1]) / 2 if None. included_in : str Whether the threshold value itself belongs to the lower or upper interval. """ lower, upper = values if threshold is None: threshold = (lower + upper) / 2. x = x.copy() if included_in == 'lower': x[x <= threshold] = lower x[x > threshold] = upper elif included_in == 'upper': x[x < threshold] = lower x[x >= threshold] = upper else: raise ValueError('included_in must be "lower" or "upper"') return x
0.001189
def walk_files_for(paths, supported_extensions): """ Iterating files for given extensions. Args: supported_extensions (list): supported file extentsion for which to check loc and com. Returns: str: yield each full path and filename found. """ for path in paths: for root, _, files in os.walk(path): if Application.ignore_path(root.replace(path, '')): continue for filename in files: extension = os.path.splitext(filename)[1] if extension in supported_extensions: yield path, os.path.join(root, filename), extension
0.004184
def dump(self, force=False): """ Encodes the value using DER :param force: If the encoded contents already exist, clear them and regenerate to ensure they are in DER format instead of BER format :return: A byte string of the DER-encoded value """ contents = b'' for child in self._children: contents += child.dump(force=force) return contents
0.004376
def __safe_validation_callback(self, event): # type: (str) -> Any """ Calls the ``@ValidateComponent`` or ``@InvalidateComponent`` callback, ignoring raised exceptions :param event: The kind of life-cycle callback (in/validation) :return: The callback result, or None """ if self.state == StoredInstance.KILLED: # Invalid state return None try: return self.__validation_callback(event) except FrameworkException as ex: # Important error self._logger.exception( "Critical error calling back %s: %s", self.name, ex ) # Kill the component self._ipopo_service.kill(self.name) # Store the exception as it is a validation error self.error_trace = traceback.format_exc() if ex.needs_stop: # Framework must be stopped... self._logger.error( "%s said that the Framework must be stopped.", self.name ) self.bundle_context.get_framework().stop() return False except: self._logger.exception( "Component '%s': error calling @ValidateComponent callback", self.name, ) # Store the exception as it is a validation error self.error_trace = traceback.format_exc() return False
0.002686
def add_external_reference_to_entity(self,entity_id,ext_ref): """ Adds an external reference to a entity specified by the entity identifier @param entity_id: the entity identifier @type entity_id: string @param ext_ref: the external reference @type ext_ref: L{CexternalReference} """ node_entity = self.map_entity_id_to_node.get(entity_id) if node_entity is not None: entity = Centity(node_entity,self.type) entity.add_external_reference(ext_ref) else: print>>sys.stderr,'Trying to add a reference to the entity',entity_id,'but can not be found in this file'
0.014837
def mouseReleaseEvent(self, event): """Create a new event or marker, or show the previous power spectrum """ if not self.scene: return if self.event_sel: return if self.deselect: self.deselect = False return if not self.ready: return chk_marker = self.parent.notes.action['new_bookmark'].isChecked() chk_event = self.parent.notes.action['new_event'].isChecked() y_distance = self.parent.value('y_distance') if chk_marker or chk_event: x_in_scene = self.mapToScene(event.pos()).x() y_in_scene = self.mapToScene(event.pos()).y() # it can happen that selection is empty (f.e. double-click) if self.sel_xy[0] is not None: # max resolution = sampling frequency # in case there is no data s_freq = self.parent.info.dataset.header['s_freq'] at_s_freq = lambda x: round(x * s_freq) / s_freq start = at_s_freq(self.sel_xy[0]) end = at_s_freq(x_in_scene) if abs(end - start) < self.parent.value('min_marker_dur'): end = start if start <= end: time = (start, end) else: time = (end, start) if chk_marker: self.parent.notes.add_bookmark(time) elif chk_event and start != end: eventtype = self.parent.notes.idx_eventtype.currentText() # if dragged across > 1.5 chan, event is marked on all chan if abs(y_in_scene - self.sel_xy[1]) > 1.5 * y_distance: chan = '' else: chan_idx = int(floor(self.sel_xy[1] / y_distance)) chan = self.chan[chan_idx] self.parent.notes.add_event(eventtype, time, chan) else: # normal selection if self.idx_info in self.scene.items(): self.scene.removeItem(self.idx_info) self.idx_info = None # restore spectrum self.parent.spectrum.update() self.parent.spectrum.display_window() # general garbage collection self.sel_chan = None self.sel_xy = (None, None) if self.idx_sel in self.scene.items(): self.scene.removeItem(self.idx_sel) self.idx_sel = None
0.001936
def transpose(self, rows): """ Transposes the grid to allow for cols """ res = OrderedDict() for row, cols in rows.items(): for col, cell in cols.items(): if col not in res: res[col] = OrderedDict() res[col][row] = cell return res
0.005848
def set_raw(self, *args): """ writes all *args as styled commands 'm' to the handle """ if not self.enabled: return args = map(lambda x: str(x), [x for x in args if x is not None]) self._write_raw(';'.join(args) + 'm')
0.011278
def get_console_info(kernel32, handle): """Get information about this current console window. http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231 https://code.google.com/p/colorama/issues/detail?id=47 https://bitbucket.org/pytest-dev/py/src/4617fe46/py/_io/terminalwriter.py Windows 10 Insider since around February 2016 finally introduced support for ANSI colors. No need to replace stdout and stderr streams to intercept colors and issue multiple SetConsoleTextAttribute() calls for these consoles. :raise OSError: When GetConsoleScreenBufferInfo or GetConsoleMode API calls fail. :param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance. :param int handle: stderr or stdout handle. :return: Foreground and background colors (integers) as well as native ANSI support (bool). :rtype: tuple """ # Query Win32 API. csbi = ConsoleScreenBufferInfo() # Populated by GetConsoleScreenBufferInfo. lpcsbi = ctypes.byref(csbi) dword = ctypes.c_ulong() # Populated by GetConsoleMode. lpdword = ctypes.byref(dword) if not kernel32.GetConsoleScreenBufferInfo(handle, lpcsbi) or not kernel32.GetConsoleMode(handle, lpdword): raise ctypes.WinError() # Parse data. # buffer_width = int(csbi.dwSize.X - 1) # buffer_height = int(csbi.dwSize.Y) # terminal_width = int(csbi.srWindow.Right - csbi.srWindow.Left) # terminal_height = int(csbi.srWindow.Bottom - csbi.srWindow.Top) fg_color = csbi.wAttributes % 16 bg_color = csbi.wAttributes & 240 native_ansi = bool(dword.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING) return fg_color, bg_color, native_ansi
0.004172
def findArgs(args, prefixes): """ Extracts the list of arguments that start with any of the specified prefix values """ return list([ arg for arg in args if len([p for p in prefixes if arg.lower().startswith(p.lower())]) > 0 ])
0.041152
def _generate_time_steps(self, trajectory_list): """A generator to yield single time-steps from a list of trajectories.""" for single_trajectory in trajectory_list: assert isinstance(single_trajectory, trajectory.Trajectory) # Skip writing trajectories that have only a single time-step -- this # could just be a repeated reset. if single_trajectory.num_time_steps <= 1: continue for index, time_step in enumerate(single_trajectory.time_steps): # The first time-step doesn't have reward/processed_reward, if so, just # setting it to 0.0 / 0 should be OK. raw_reward = time_step.raw_reward if not raw_reward: raw_reward = 0.0 processed_reward = time_step.processed_reward if not processed_reward: processed_reward = 0 action = time_step.action if action is None: # The last time-step doesn't have action, and this action shouldn't be # used, gym's spaces have a `sample` function, so let's just sample an # action and use that. action = self.action_space.sample() action = gym_spaces_utils.gym_space_encode(self.action_space, action) if six.PY3: # py3 complains that, to_example cannot handle np.int64 ! action_dtype = self.action_space.dtype if action_dtype in [np.int64, np.int32]: action = list(map(int, action)) elif action_dtype in [np.float64, np.float32]: action = list(map(float, action)) # same with processed_reward. processed_reward = int(processed_reward) assert time_step.observation is not None yield { TIMESTEP_FIELD: [index], ACTION_FIELD: action, # to_example errors on np.float32 RAW_REWARD_FIELD: [float(raw_reward)], PROCESSED_REWARD_FIELD: [processed_reward], # to_example doesn't know bools DONE_FIELD: [int(time_step.done)], OBSERVATION_FIELD: gym_spaces_utils.gym_space_encode(self.observation_space, time_step.observation), }
0.008961
def select_torrent(self): """Select torrent. First check if specific element/info is obtained in content_page. Specify to user if it wants best rated torrent or select one from list. If the user wants best rated: Directly obtain magnet/torrent. Else: build table with all data and enable the user select the torrent. """ try: self.found_torrents = not bool(self.key_search in self.content_page.text) if not self.found_torrents: print('No torrents found.') sys.exit(1) self.soupify() if self.mode_search == 'list': self.build_table() if len(self.hrefs) == 1: print('Press "0" to download it.') elif len(self.hrefs) >= 2: print('\nSelect one of the following torrents. ' + 'Enter a number between: 0 and ' + str(len(self.hrefs) - 1)) print('If you want to exit write "' + Colors.LRED + 'Q' + Colors.ENDC + '" or "' + Colors.LRED + 'q' + Colors.ENDC + '".') print('If you want to go back to menu and search again write "' + Colors.LGREEN + 'B' + Colors.ENDC + '" or "' + Colors.LGREEN + 'b' + Colors.ENDC + '".') while not(self.picked_choice): self.picked_choice = self.handle_select() except Exception: print('ERROR select_torrent: ') logging.error(traceback.format_exc()) sys.exit(0)
0.001762
def write(self, proto): """Populate serialization proto instance. :param proto: (TemporalMemoryShimProto) the proto instance to populate """ super(TemporalMemoryShim, self).write(proto.baseTM) proto.connections.write(self.connections) proto.predictiveCells = self.predictiveCells
0.003289
def update(self): """It updates process information of the cgroup.""" pids = fileops.readlines(self.paths['cgroup.procs']) self.pids = [int(pid) for pid in pids if pid != ''] self.n_procs = len(pids)
0.008658
def areIndicesValid(self, inds): """ Test if indices are valid @param inds index set @return True if valid, False otherwise """ return reduce(operator.and_, [0 <= inds[d] < self.dims[d] for d in range(self.ndims)], True)
0.006536
def ParseFileHash(hash_obj, result): """Parses Hash rdfvalue into ExportedFile's fields.""" if hash_obj.HasField("md5"): result.hash_md5 = str(hash_obj.md5) if hash_obj.HasField("sha1"): result.hash_sha1 = str(hash_obj.sha1) if hash_obj.HasField("sha256"): result.hash_sha256 = str(hash_obj.sha256) if hash_obj.HasField("pecoff_md5"): result.pecoff_hash_md5 = str(hash_obj.pecoff_md5) if hash_obj.HasField("pecoff_sha1"): result.pecoff_hash_sha1 = str(hash_obj.pecoff_sha1) if hash_obj.HasField("signed_data"): StatEntryToExportedFileConverter.ParseSignedData(hash_obj.signed_data[0], result)
0.009777
def write_records(records, output_file, split=False): """Write FASTA records Write a FASTA file from an iterable of records. Parameters ---------- records : iterable Input records to write. output_file : file, str or pathlib.Path Output FASTA file to be written into. split : bool, optional If True, each record is written into its own separate file. Default is False. """ if split: for record in records: with open( "{}{}.fa".format(output_file, record.id), "w" ) as record_handle: SeqIO.write(record, record_handle, "fasta") else: SeqIO.write(records, output_file, "fasta")
0.001383
def generate_all_paired_mutations_for_position(self, chain_ids, chain_sequence_mappings = {}, residue_ids_to_ignore = [], typed_residue_ids_to_ignore = [], silent = True): '''Generates a set of mutations for the chains in chain_ids where each set corresponds to the "same" residue (see below) in both chains and where the wildtype residues match. e.g. if chain A and B both have K19 then the set of mutations K19A, ... K19I, K19L, K19Y will be included in in the returned results unless 19 is in residue_ids_to_ignore or typed_residue_ids_to_ignore. residue_ids_to_ignore should be a list/set of residue IDs. typed_residue_ids_to_ignore should be a dict residue ID -> residue AA. It is used similarly to residue_ids_to_ignore but we also assert that the residue types match the sequences in the chains. By default, "same residue" is inferred by residue ID i.e. the generation assumes that a residue with some ID in one chain corresponds to the residue with the same ID in another chain. If this is not true then a mapping between chain residues is necessary and should be provided using the chain_sequence_mappings parameter. chain_sequence_mappings should be a dict from pairs of chain IDs to SequenceMap objects. As all sequences are compared with the first chain in chain_ids, only mappings from that first chain to any other chain are used. This function is useful in certain cases e.g. generating a set of mutations where we make the same mutation in both chains of a homodimer or a quasi-homodimer (where we only mutate the positions which agree). ''' residue_ids_to_ignore = set([str(r).strip() for r in residue_ids_to_ignore]) for k, v in typed_residue_ids_to_ignore.iteritems(): typed_residue_ids_to_ignore[k] = v.strip() assert(len(chain_ids) > 0) first_chain = chain_ids[0] mutations = [] if sorted(set(self.atom_sequences.keys()).intersection(set(chain_ids))) == sorted(chain_ids): aas = sorted(residue_type_3to1_map.values()) aas.remove('X') sequence = self.atom_sequences[first_chain] for res_id in sequence.order: chain_res_ids = {} for c in chain_ids: chain_res_ids[c] = c + res_id[1:] if c != first_chain and chain_sequence_mappings.get((first_chain, c)): chain_res_ids[c] = chain_sequence_mappings[(first_chain, c)][res_id] sres_id = str(res_id)[1:].strip() skip = sres_id in residue_ids_to_ignore if not skip and sres_id in typed_residue_ids_to_ignore: for c in chain_ids: if chain_res_ids[c] in self.atom_sequences[c].sequence: if not typed_residue_ids_to_ignore[sres_id] == self.atom_sequences[c][chain_res_ids[c]].ResidueAA: raise Exception('Expected to find {0} at residue {1} but found {2} in chain {3} at this position.'.format(typed_residue_ids_to_ignore[sres_id], sres_id, self.atom_sequences[c][chain_res_id].ResidueAA, c)) skip = True if skip: if not silent: print('Skipping residue {0} as requested.'.format(res_id)) continue for c in chain_ids: if (chain_res_ids[c]) not in self.atom_sequences[c].sequence: if not silent: print('Skipping residue {0} as it is missing from chain {1}.'.format(res_id, c)) skip = True if skip: continue chain_res_aas = set([self.atom_sequences[c][chain_res_ids[c]].ResidueAA for c in chain_ids if chain_res_ids[c] in self.atom_sequences[c].sequence]) if len(chain_res_aas) > 1: if not silent: colortext.warning('Skipping residue {0} as the amino acid type differs between the specified chains.'.format(res_id)) continue wt_aa = chain_res_aas.pop() for mut_aa in aas: if mut_aa != wt_aa: mutations.append([ChainMutation(wt_aa, str(chain_res_ids[c])[1:].strip(), mut_aa, Chain = c) for c in chain_ids]) return mutations else: raise Exception('Chain(s) {0} could not be found in the PDB file.'.format(', '.join(sorted(set(chain_ids).difference(set(self.atom_sequences.keys()))))))
0.008022
def screenshot(self): """ Take screenshot with session check Returns: PIL.Image """ b64data = self.http.get('/screenshot').value raw_data = base64.b64decode(b64data) from PIL import Image buff = io.BytesIO(raw_data) return Image.open(buff)
0.006173
def create(self, ip_access_control_list_sid): """ Create a new IpAccessControlListInstance :param unicode ip_access_control_list_sid: The SID of the IP Access Control List that you want to associate with the trunk :returns: Newly created IpAccessControlListInstance :rtype: twilio.rest.trunking.v1.trunk.ip_access_control_list.IpAccessControlListInstance """ data = values.of({'IpAccessControlListSid': ip_access_control_list_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return IpAccessControlListInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
0.008368
def redirect_to(request, url, permanent=True, query_string=False, **kwargs): r""" Redirect to a given URL. The given url may contain dict-style string formatting, which will be interpolated against the params in the URL. For example, to redirect from ``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf:: urlpatterns = patterns('', (r'^foo/(?P<id>\d+)/$', 'django.views.generic.simple.redirect_to', {'url' : '/bar/%(id)s/'}), ) If the given url is ``None``, a HttpResponseGone (410) will be issued. If the ``permanent`` argument is False, then the response will have a 302 HTTP status code. Otherwise, the status code will be 301. If the ``query_string`` argument is True, then the GET query string from the request is appended to the URL. """ args = request.META.get('QUERY_STRING', '') if url is not None: if kwargs: url = url % kwargs if args and query_string: url = "%s?%s" % (url, args) klass = (permanent and HttpResponsePermanentRedirect or HttpResponseRedirect) return klass(url) else: logger.warning( 'Gone: %s', request.path, extra={ 'status_code': 410, 'request': request }) return HttpResponseGone()
0.000705
def update(self, data=None, timeout=-1, force=''): """Updates server profile template. Args: data: Data to update the resource. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. force: Force the update operation. Returns: A dict with the updated resource data. """ uri = self.data['uri'] resource = deepcopy(self.data) resource.update(data) # Removes related fields to serverHardware in case of unassign if resource.get('serverHardwareUri') is None: resource.pop('enclosureBay', None) resource.pop('enclosureUri', None) self.data = self._helper.update(resource, uri, force, timeout) return self
0.003382
def proto_unknown(theABF): """protocol: unknown.""" abf=ABF(theABF) abf.log.info("analyzing as an unknown protocol") plot=ABFplot(abf) plot.rainbow=False plot.title=None plot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZE plot.kwargs["lw"]=.5 plot.figure_chronological() plt.gca().set_axis_bgcolor('#AAAAAA') # different background if unknown protocol frameAndSave(abf,"UNKNOWN")
0.027907
def opening(image, radius=None, mask=None, footprint=None): '''Do a morphological opening image - pixel image to operate on radius - use a structuring element with the given radius. If no radius, use an 8-connected structuring element. mask - if present, only use unmasked pixels for operations ''' eroded_image = grey_erosion(image, radius, mask, footprint) return grey_dilation(eroded_image, radius, mask, footprint)
0.00431
def xlsx_blob(self): """ Return the byte stream of an Excel file formatted as chart data for the category chart specified in the chart data object. """ xlsx_file = BytesIO() with self._open_worksheet(xlsx_file) as (workbook, worksheet): self._populate_worksheet(workbook, worksheet) return xlsx_file.getvalue()
0.005291
def make_pvc( name, storage_class, access_modes, storage, labels=None, annotations=None, ): """ Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc """ pvc = V1PersistentVolumeClaim() pvc.kind = "PersistentVolumeClaim" pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name pvc.metadata.annotations = (annotations or {}).copy() pvc.metadata.labels = (labels or {}).copy() pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() pvc.spec.resources.requests = {"storage": storage} if storage_class: pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class}) pvc.spec.storage_class_name = storage_class return pvc
0.003125
def show_slug_with_level(context, page, lang=None, fallback=True): """Display slug with level by language.""" if not lang: lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if not page: return '' return {'content': page.slug_with_level(lang)}
0.002933
def _convert_volume(self, volume): """ This is for ingesting the "volumes" of a app description """ data = { 'host': volume.get('hostPath'), 'container': volume.get('containerPath'), 'readonly': volume.get('mode') == 'RO', } return data
0.00625
def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec: """ Further readings `CF Broker API#Update <https://docs.cloudfoundry.org/services/api.html#updating_service_instance>`_ :param instance_id: Instance id provided by the platform :param details: Details about the service to update :param async_allowed: Client allows async creation :rtype: UpdateServiceSpec :raises ErrAsyncRequired: If async is required but not supported """ raise NotImplementedError()
0.00692
def hill_climbing(problem, iterations_limit=0, viewer=None): ''' Hill climbing search. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=1, stop_when_no_better=True, viewer=viewer)
0.001565
def connect(self): """Create new connection unless we already have one.""" if not getattr(self._local, 'conn', None): try: server = self._servers.get() logger.debug('Connecting to %s', server) self._local.conn = ClientTransport(server, self._framed_transport, self._timeout, self._recycle) except (Thrift.TException, socket.timeout, socket.error): logger.warning('Connection to %s failed.', server) self._servers.mark_dead(server) return self.connect() return self._local.conn
0.00625
def edges(self, zfill = 3): """ Returns the aspect ratio of all elements. """ edges = self.split("edges", at = "coords").unstack() edges["lx"] = edges.x[1]-edges.x[0] edges["ly"] = edges.y[1]-edges.y[0] edges["lz"] = edges.z[1]-edges.z[0] edges["l"] = np.linalg.norm(edges[["lx", "ly", "lz"]], axis = 1) edges = (edges.l).unstack() edges.columns = pd.MultiIndex.from_product([["length"], ["e" + "{0}".format(s).zfill(zfill) for s in np.arange(edges.shape[1])]]) edges[("stats", "lmax")] = edges.length.max(axis = 1) edges[("stats", "lmin")] = edges.length.min(axis = 1) edges[("stats", "aspect_ratio")] = edges.stats.lmax / edges.stats.lmin return edges.sort_index(axis = 1)
0.022021
def WeightedLearner(unweighted_learner): """Given a learner that takes just an unweighted dataset, return one that takes also a weight for each example. [p. 749 footnote 14]""" def train(dataset, weights): return unweighted_learner(replicated_dataset(dataset, weights)) return train
0.003268
def add_domain_name(list_name, item_name): ''' Adds a domain name to a domain name list. list_name(str): The name of the specific policy domain name list to append to. item_name(str): The domain name to append. CLI Example: .. code-block:: bash salt '*' bluecoat_sslv.add_domain_name MyDomainName foo.bar.com ''' payload = {"jsonrpc": "2.0", "id": "ID0", "method": "add_policy_domain_names", "params": [list_name, {"item_name": item_name}]} response = __proxy__['bluecoat_sslv.call'](payload, True) return _validate_change_result(response)
0.003125
def set(self, value, mode=None): """Sets metric value. :param int|long value: New value. :param str|unicode mode: Update mode. * None - Unconditional update. * max - Sets metric value if it is greater that the current one. * min - Sets metric value if it is less that the current one. :rtype: bool """ if mode == 'max': func = uwsgi.metric_set_max elif mode == 'min': func = uwsgi.metric_set_min else: func = uwsgi.metric_set return func(self.name, value)
0.003295
def cleanup_logger(self): """Clean up logger to close out file handles. After this is called, writing to self.log will get logs ending up getting discarded. """ self.log_handler.close() self.log.removeHandler(self.log_handler)
0.007273
def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model
0.002959
def list(self, language=values.unset, model_build=values.unset, status=values.unset, limit=None, page_size=None): """ Lists QueryInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param unicode language: The ISO language-country string that specifies the language used by the Query resources to read :param unicode model_build: The SID or unique name of the Model Build to be queried :param unicode status: The status of the resources to read :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.autopilot.v1.assistant.query.QueryInstance] """ return list(self.stream( language=language, model_build=model_build, status=status, limit=limit, page_size=page_size, ))
0.007383
def LayerTree_loadSnapshot(self, tiles): """ Function path: LayerTree.loadSnapshot Domain: LayerTree Method name: loadSnapshot Parameters: Required arguments: 'tiles' (type: array) -> An array of tiles composing the snapshot. Returns: 'snapshotId' (type: SnapshotId) -> The id of the snapshot. Description: Returns the snapshot identifier. """ assert isinstance(tiles, (list, tuple) ), "Argument 'tiles' must be of type '['list', 'tuple']'. Received type: '%s'" % type( tiles) subdom_funcs = self.synchronous_command('LayerTree.loadSnapshot', tiles=tiles ) return subdom_funcs
0.045313
def get_grade_system_admin_session(self): """Gets the ``OsidSession`` associated with the grade system administration service. return: (osid.grading.GradeSystemAdminSession) - a ``GradeSystemAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_grade_system_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_grade_system_admin()`` is ``true``.* """ if not self.supports_grade_system_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.GradeSystemAdminSession(runtime=self._runtime)
0.00411
def _parse_header_links(response): """ Parse the links from a Link: header field. .. todo:: Links with the same relation collide at the moment. :param bytes value: The header value. :rtype: `dict` :return: A dictionary of parsed links, keyed by ``rel`` or ``url``. """ values = response.headers.getRawHeaders(b'link', [b'']) value = b','.join(values).decode('ascii') with LOG_HTTP_PARSE_LINKS(raw_link=value) as action: links = {} replace_chars = u' \'"' for val in re.split(u', *<', value): try: url, params = val.split(u';', 1) except ValueError: url, params = val, u'' link = {} link[u'url'] = url.strip(u'<> \'"') for param in params.split(u';'): try: key, value = param.split(u'=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links[link.get(u'rel') or link.get(u'url')] = link action.add_success_fields(parsed_links=links) return links
0.000864
def get_proj_info(self, token): """ Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info """ r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
0.005747
def post(self, command, data=None): """Post data to API.""" now = calendar.timegm(datetime.datetime.now().timetuple()) if now > self.expiration: auth = self.__open("/oauth/token", data=self.oauth) self.__sethead(auth['access_token']) return self.__open("%s%s" % (self.api, command), headers=self.head, data=data)
0.005063
def auth(self, request): """ build the request to access to the Twitter website with all its required parms :param request: makes the url to call Twitter + the callback url :return: go to the Twitter website to ask to the user to allow the access of TriggerHappy """ callback_url = self.callback_url(request) twitter = Twython(self.consumer_key, self.consumer_secret) req_token = twitter.get_authentication_tokens( callback_url=callback_url) request.session['oauth_token'] = req_token['oauth_token'] request.session['oauth_token_secret'] = req_token['oauth_token_secret'] return req_token['auth_url']
0.002782
def visit_keyword(self, node): """ Process keyword arguments. """ if self.should_check_whitelist(node): if node.arg not in self.whitelist and not node.arg.startswith("debug_"): self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(node.arg))) if self.should_check_extra_exception(node): self.check_exception_arg(node.value) super(LoggingVisitor, self).generic_visit(node)
0.00823
def negated(input_words, include_nt=True): """ Determine if input contains negation words """ input_words = [str(w).lower() for w in input_words] neg_words = [] neg_words.extend(NEGATE) for word in neg_words: if word in input_words: return True if include_nt: for word in input_words: if "n't" in word: return True if "least" in input_words: i = input_words.index("least") if i > 0 and input_words[i - 1] != "at": return True return False
0.001779
def empty_hdf5_file(h5f, ifo=None): """Determine whether PyCBC-HDF5 file is empty A file is considered empty if it contains no groups at the base level, or if the ``ifo`` group contains only the ``psd`` dataset. Parameters ---------- h5f : `str` path of the pycbc_live file to test ifo : `str`, optional prefix for the interferometer of interest (e.g. ``'L1'``), include this for a more robust test of 'emptiness' Returns ------- empty : `bool` `True` if the file looks to have no content, otherwise `False` """ # the decorator opens the HDF5 file for us, so h5f is guaranteed to # be an h5py.Group object h5f = h5f.file if list(h5f) == []: return True if ifo is not None and (ifo not in h5f or list(h5f[ifo]) == ['psd']): return True return False
0.001153
def if_sqlserver_disable_constraints_triggers(session: SqlASession, tablename: str) -> None: """ If we're running under SQL Server, disable triggers AND constraints for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name """ with if_sqlserver_disable_constraints(session, tablename): with if_sqlserver_disable_triggers(session, tablename): yield
0.001949
def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. """ if not self.onOffset(dt): if self.n >= 0: return self._next_opening_time(dt) else: return self._prev_opening_time(dt) return dt
0.006192
def restart_with_reloader(self): """Spawn a new Python interpreter with the same arguments as this one, but running the reloader thread. """ while 1: _log("info", " * Restarting with %s" % self.name) args = _get_args_for_reloading() # a weird bug on windows. sometimes unicode strings end up in the # environment and subprocess.call does not like this, encode them # to latin1 and continue. if os.name == "nt" and PY2: new_environ = {} for key, value in iteritems(os.environ): if isinstance(key, text_type): key = key.encode("iso-8859-1") if isinstance(value, text_type): value = value.encode("iso-8859-1") new_environ[key] = value else: new_environ = os.environ.copy() new_environ["WERKZEUG_RUN_MAIN"] = "true" exit_code = subprocess.call(args, env=new_environ, close_fds=False) if exit_code != 3: return exit_code
0.001753
def extract_value(self, data): """Extract the id key and validate the request structure.""" errors = [] if 'id' not in data: errors.append('Must have an `id` field') if 'type' not in data: errors.append('Must have a `type` field') elif data['type'] != self.type_: errors.append('Invalid `type` specified') if errors: raise ValidationError(errors) # If ``attributes`` is set, we've folded included data into this # relationship. Unserialize it if we have a schema set; otherwise we # fall back below to old behaviour of only IDs. if 'attributes' in data and self.__schema: result = self.schema.load({'data': data, 'included': self.root.included_data}) return result.data if _MARSHMALLOW_VERSION_INFO[0] < 3 else result id_value = data.get('id') if self.__schema: id_value = self.schema.fields['id'].deserialize(id_value) return id_value
0.002918
def load(s, **kwargs): """Load yaml file""" try: return loads(s, **kwargs) except TypeError: return loads(s.read(), **kwargs)
0.006536
def load_csv_to_model(path, model, field_names=None, delimiter=None, batch_len=10000, dialect=None, num_header_rows=1, mode='rUb', strip=True, clear=False, dry_run=True, ignore_errors=True, verbosity=2): '''Bulk create database records from batches of rows in a csv file.''' reader_kwargs = {} errors = collections.Counter() if delimiter or dialect: reader_kwargs['dialect'] = dialect or 'excel' if delimiter: reader_kwargs['delimiter'] = delimiter reader_kwargs['delimiter'] = str(reader_kwargs['delimiter'][0]) delimiter = reader_kwargs['delimiter'] path = path or './' if not delimiter: for d in ',', '|', '\t', ';': try: return load_csv_to_model(path=path, model=model, field_names=field_names, delimiter=d, batch_len=batch_len, dialect=dialect, num_header_rows=num_header_rows, strip=strip, clear=clear, dry_run=dry_run, ignore_errors=ignore_errors, verbosity=verbosity) except: pass return None if clear: clear_model(model, dry_run=dry_run, verbosity=verbosity) M = 0 with open(path, mode) as f: reader = csv.reader(f, **reader_kwargs) header_rows = [] i = 0 while len(header_rows) < num_header_rows and i < 100: row = reader.next() i += 1 if not row or any(compiled_regex.match(row[0]) for compiled_regex in header_rows_to_ignore): if verbosity > 1: print 'IGNORED: %r' % row else: header_rows += [row] if verbosity > 2: print 'HEADER: %r' % header_rows if verbosity > 0: N = count_lines(path, mode) - i + 10 # + 10 fudge factor in case multiple newlines in a single csv row widgets = [pb.Counter(), '/%d lines: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()] i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start() if verbosity > 3: print 'Generating all the batches before iterating may take a while...' for batch_num, batch_of_rows in enumerate(util.generate_batches(reader, batch_len)): if verbosity > 2: print print i batch_of_objects = [] for j, row in enumerate(batch_of_rows): if verbosity > 3: print j, row if not row or all(not el for el in row): if verbosity > 2: print 'IGNORED: %r' % row continue if verbosity or not ignore_errors: M = M or len(row) if len(row) != M: print 'ERROR importing row #%d in batch_num=%d which is row #%d overall. The row had %d columns, but previous rows had %d.' % (j + 1, batch_num + 1, i + j + 1, len(row), M) print 'Erroneously parsed row:' print repr(row) if not ignore_errors: raise ValueError('ERROR importing row #%d which had %d columns, but previous rows had %d.' % (i + j + 1, len(row), M)) try: obj, row_errors = django_object_from_row(row, model=model, field_names=field_names, strip=strip, verbosity=verbosity) batch_of_objects += [obj] errors += row_errors except: if verbosity > 0: print 'Error importing row #%d' % (i + j + 1) print_exc() if not ignore_errors: raise if verbosity > 0: try: pbar.update(i + j) except: print_exc() if not ignore_errors: raise i += len(batch_of_rows) if not dry_run: model.objects.bulk_create(batch_of_objects) elif verbosity > 0: print "DRY_RUN: NOT bulk creating batch of %d records in %r" % (len(batch_of_objects), model) if verbosity > 0: pbar.finish() return i
0.004946
def get_kernel_string(kernel_source, params=None): """ retrieve the kernel source and return as a string This function processes the passed kernel_source argument, which could be a function, a string with a filename, or just a string with code already. If kernel_source is a function, the function is called with instance parameters in 'params' as the only argument. If kernel_source looks like filename, the file is read in, but if the file does not exist, it is assumed that the string is not a filename after all. :param kernel_source: One of the sources for the kernel, could be a function that generates the kernel code, a string containing a filename that points to the kernel source, or just a string that contains the code. :type kernel_source: string or callable :param params: Dictionary containing the tunable parameters for this specific kernel instance, only needed when kernel_source is a generator. :type param: dict :returns: A string containing the kernel code. :rtype: string """ #logging.debug('get_kernel_string called with %s', str(kernel_source)) logging.debug('get_kernel_string called') kernel_string = None if callable(kernel_source): kernel_string = kernel_source(params) elif isinstance(kernel_source, str): if looks_like_a_filename(kernel_source): kernel_string = read_file(kernel_source) or kernel_source else: kernel_string = kernel_source else: raise TypeError("Error kernel_source is not a string nor a callable function") return kernel_string
0.00303
def setRect(self, rect): """ Sets the window bounds from a tuple of (x,y,w,h) """ self.x, self.y, self.w, self.h = rect
0.047244
def fuzzer(buffer, fuzz_factor=101): """Fuzz given buffer. Take a buffer of bytes, create a copy, and replace some bytes with random values. Number of bytes to modify depends on fuzz_factor. This code is taken from Charlie Miller's fuzzer code. :param buffer: the data to fuzz. :type buffer: byte array :param fuzz_factor: degree of fuzzing. :type fuzz_factor: int :return: fuzzed buffer. :rtype: byte array """ buf = deepcopy(buffer) num_writes = number_of_bytes_to_modify(len(buf), fuzz_factor) for _ in range(num_writes): random_byte = random.randrange(256) random_position = random.randrange(len(buf)) buf[random_position] = random_byte return buf
0.001357
def channels_open(self, room_id, **kwargs): """Adds the channel back to the user’s list of channels.""" return self.__call_api_post('channels.open', roomId=room_id, kwargs=kwargs)
0.015385
def initial(self, request, *args, **kwargs): """Disallow users other than the user whose email is being reset.""" email = request.data.get('email') if request.user.is_authenticated() and email != request.user.email: raise PermissionDenied() return super(ResendConfirmationEmail, self).initial( request, *args, **kwargs )
0.00489
def chrono(ctx, app_id, sentence_file, json_flag, sentence, doc_time, request_id): # type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA """Extract expression expressing date and time and normalize its value """ app_id = clean_app_id(app_id) sentence = clean_sentence(sentence, sentence_file) api = GoolabsAPI(app_id) ret = api.chrono( sentence=sentence, doc_time=doc_time, request_id=request_id, ) if json_flag: click.echo(format_json(api.response.json())) return for pair in ret['datetime_list']: click.echo(u'{0}: {1}'.format(text(pair[0]), pair[1]))
0.001453
def excepthook(type, value, tb): """Report an exception.""" if (issubclass(type, Error) or issubclass(type, lib50.Error)) and str(value): for line in str(value).split("\n"): cprint(str(line), "yellow") else: cprint(_("Sorry, something's wrong! Let [email protected] know!"), "yellow") if excepthook.verbose: traceback.print_exception(type, value, tb) cprint(_("Submission cancelled."), "red")
0.006536
def _convert(self, desired_type: Type[T], source_obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: """ Delegates to the user-provided method. Passes the appropriate part of the options according to the function name. :param desired_type: :param source_obj: :param logger: :param options: :return: """ try: if self.unpack_options: opts = self.get_applicable_options(options) if self.function_args is not None: return self.conversion_method(desired_type, source_obj, logger, **self.function_args, **opts) else: return self.conversion_method(desired_type, source_obj, logger, **opts) else: if self.function_args is not None: return self.conversion_method(desired_type, source_obj, logger, options, **self.function_args) else: return self.conversion_method(desired_type, source_obj, logger, options) except TypeError as e: raise CaughtTypeError.create(self.conversion_method, e)
0.006803
def make_converters(data_types) -> dict: """ Return a mapping between data type names, and casting functions, or class definitions to convert text into its Python object. Parameters ---------- data_types: dict-like data field name str: python primitive type or class. Example ------- >> make_converters({'student': str, 'score': float, 'grade': Grade) -> {'student_name': passthrough, 'score': parse_float, 'grade': Grade) """ return {k: TYPE_CASTERS.get(v, v) for k, v in data_types.items()}
0.001821
def adjoint(self): r"""Return the (right) adjoint. Notes ----- Due to technicalities of operators from a real space into a complex space, this does not satisfy the usual adjoint equation: .. math:: \langle Ax, y \rangle = \langle x, A^*y \rangle Instead it is an adjoint in a weaker sense as follows: .. math:: \langle A^*Ax, y \rangle = \langle Ax, Ay \rangle Examples -------- The adjoint satisfies the adjoint equation for complex spaces >>> c3 = odl.cn(3) >>> op = ComplexEmbedding(c3, scalar=1j) >>> x = c3.element([1 + 1j, 2 + 2j, 3 + 3j]) >>> y = c3.element([3 + 1j, 2 + 2j, 3 + 1j]) >>> Axy = op(x).inner(y) >>> xAty = x.inner(op.adjoint(y)) >>> Axy == xAty True For real domains, it only satisfies the (right) adjoint equation >>> r3 = odl.rn(3) >>> op = ComplexEmbedding(r3, scalar=1j) >>> x = r3.element([1, 2, 3]) >>> y = r3.element([3, 2, 3]) >>> AtAxy = op.adjoint(op(x)).inner(y) >>> AxAy = op(x).inner(op(y)) >>> AtAxy == AxAy True """ if self.domain.is_real: # Real domain # Optimizations for simple cases. if self.scalar.real == self.scalar: return self.scalar.real * RealPart(self.range) elif 1j * self.scalar.imag == self.scalar: return self.scalar.imag * ImagPart(self.range) else: # General case return (self.scalar.real * RealPart(self.range) + self.scalar.imag * ImagPart(self.range)) else: # Complex domain return ComplexEmbedding(self.range, self.scalar.conjugate())
0.001085
def containerIsRunning(name_or_id): '''Check if container with the given name or ID (str) is running. No side effects. Idempotent. Returns True if running, False if not.''' require_str("name_or_id", name_or_id) try: container = getContainer(name_or_id) # Refer to the latest status list here: https://docs.docker.com/engine/ # api/v1.33/#operation/ContainerList if container: if container.status == 'created': return False elif container.status == 'restarting': return True elif container.status == 'running': return True elif container.status == 'removing': return False elif container.status == 'paused': return False elif container.status == 'exited': return False elif container.status == 'dead': return False else: return False except NotFound as exc: return False return False
0.008507
def df_random(num_numeric=3, num_categorical=3, num_rows=100): """Generate a dataframe with random data. This is a general method to easily generate a random dataframe, for more control of the random 'distributions' use the column methods (df_numeric_column, df_categorical_column) For other distributions you can use numpy methods directly (see example at bottom of this file) Args: num_numeric (int): The number of numeric columns (default = 3) num_categorical (int): The number of categorical columns (default = 3) num_rows (int): The number of rows to generate (default = 100) """ # Construct DataFrame df = pd.DataFrame() column_names = string.ascii_lowercase # Create numeric columns for name in column_names[:num_numeric]: df[name] = df_numeric_column(num_rows=num_rows) # Create categorical columns for name in column_names[num_numeric:num_numeric+num_categorical]: df[name] = df_categorical_column(['foo', 'bar', 'baz'], num_rows=num_rows) # Return the dataframe return df
0.0045
def transform(self, X, y=None): """Transform data by adding two virtual features. Parameters ---------- X: numpy ndarray, {n_samples, n_components} New data, where n_samples is the number of samples and n_components is the number of components. y: None Unused Returns ------- X_transformed: array-like, shape (n_samples, n_features) The transformed feature set """ X = check_array(X) n_features = X.shape[1] X_transformed = np.copy(X) non_zero_vector = np.count_nonzero(X_transformed, axis=1) non_zero = np.reshape(non_zero_vector, (-1, 1)) zero_col = np.reshape(n_features - non_zero_vector, (-1, 1)) X_transformed = np.hstack((non_zero, X_transformed)) X_transformed = np.hstack((zero_col, X_transformed)) return X_transformed
0.00216
def isscalar(cls, dataset, dim): """ Tests if dimension is scalar in each subpath. """ if not dataset.data: return True ds = cls._inner_dataset_template(dataset) isscalar = [] for d in dataset.data: ds.data = d isscalar.append(ds.interface.isscalar(ds, dim)) return all(isscalar)
0.005277
def extrusion(target, throat_length='throat.length', throat_area='throat.area'): r""" Calculate throat volume from the throat area and the throat length. This method is useful for abnormal shaped throats. Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_length and throat_area : strings The dictionary keys containing the arrays with the throat area and length values. Notes ----- At present this models does NOT account for the volume reprsented by the intersection of the throat with a spherical pore body. """ leng = target[throat_length] area = target[throat_area] value = leng*area return value
0.001134
def decode(self, envelope, session, target=None, modification_code=None, **kwargs): """ Methods checks envelope for 'modification_code' existence and removes it. :param envelope: original envelope :param session: original session :param target: flag, that specifies whether code must be searched and removed at the start or at the end :param modification_code: code to search/remove :param kwargs: additional arguments :return: WMessengerTextEnvelope or WMessengerBytesEnvelope (depends on the original envelope) """ self.__args_check(envelope, target, modification_code) message = envelope.message() if len(message) < len(modification_code): raise ValueError('Invalid message length') if isinstance(envelope, WMessengerTextEnvelope): target_envelope_cls = WMessengerTextEnvelope else: # isinstance(envelope, WMessengerBytesEnvelope) target_envelope_cls = WMessengerBytesEnvelope if target == WMessengerFixedModificationLayer.Target.head: if message[:len(modification_code)] != modification_code: raise ValueError('Invalid header in message') return target_envelope_cls(message[len(modification_code):], meta=envelope) else: # target == WMessengerFixedModificationLayer.Target.tail if message[-len(modification_code):] != modification_code: raise ValueError('Invalid tail in message') return target_envelope_cls(message[:-len(modification_code)], meta=envelope)
0.020322
def _From(self, t): """ Handle "from xyz import foo, bar as baz". """ # fixme: Are From and ImportFrom handled differently? self._fill("from ") self._write(t.modname) self._write(" import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname)
0.006593
def decode_from_file(estimator, filename, hparams, decode_hp, decode_to_file=None, checkpoint_path=None): """Compute predictions on entries in filename and write them out.""" if not decode_hp.batch_size: decode_hp.batch_size = 32 tf.logging.info( "decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size) # Inputs vocabulary is set to targets if there are no inputs in the problem, # e.g., for language models where the inputs are just a prefix of targets. p_hp = hparams.problem_hparams has_input = "inputs" in p_hp.vocabulary inputs_vocab_key = "inputs" if has_input else "targets" inputs_vocab = p_hp.vocabulary[inputs_vocab_key] targets_vocab = p_hp.vocabulary["targets"] problem_name = FLAGS.problem filename = _add_shard_to_filename(filename, decode_hp) tf.logging.info("Performing decoding from file (%s)." % filename) if has_input: sorted_inputs, sorted_keys = _get_sorted_inputs( filename, decode_hp.delimiter) else: sorted_inputs = _get_language_modeling_inputs( filename, decode_hp.delimiter, repeat=decode_hp.num_decodes) sorted_keys = range(len(sorted_inputs)) num_sentences = len(sorted_inputs) num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1 if estimator.config.use_tpu: length = getattr(hparams, "length", 0) or hparams.max_length batch_ids = [] for line in sorted_inputs: if has_input: ids = inputs_vocab.encode(line.strip()) + [1] else: ids = targets_vocab.encode(line) if len(ids) < length: ids.extend([0] * (length - len(ids))) else: ids = ids[:length] batch_ids.append(ids) np_ids = np.array(batch_ids, dtype=np.int32) def input_fn(params): batch_size = params["batch_size"] dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids}) dataset = dataset.map( lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))}) dataset = dataset.batch(batch_size) return dataset else: def input_fn(): input_gen = _decode_batch_input_fn( num_decode_batches, sorted_inputs, inputs_vocab, decode_hp.batch_size, decode_hp.max_input_size, task_id=decode_hp.multiproblem_task_id, has_input=has_input) gen_fn = make_input_fn_from_generator(input_gen) example = gen_fn() return _decode_input_tensor_to_features_dict(example, hparams) decodes = [] result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) start_time = time.time() total_time_per_step = 0 total_cnt = 0 def timer(gen): while True: try: start_time = time.time() item = next(gen) elapsed_time = time.time() - start_time yield elapsed_time, item except StopIteration: break for elapsed_time, result in timer(result_iter): if decode_hp.return_beams: beam_decodes = [] beam_scores = [] output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) scores = None if "scores" in result: if np.isscalar(result["scores"]): result["scores"] = result["scores"].reshape(1) scores = np.split(result["scores"], decode_hp.beam_size, axis=0) for k, beam in enumerate(output_beams): tf.logging.info("BEAM %d:" % k) score = scores and scores[k] _, decoded_outputs, _ = log_decode_results( result["inputs"], beam, problem_name, None, inputs_vocab, targets_vocab, log_results=decode_hp.log_results, skip_eos_postprocess=decode_hp.skip_eos_postprocess) beam_decodes.append(decoded_outputs) if decode_hp.write_beam_scores: beam_scores.append(score) if decode_hp.write_beam_scores: decodes.append("\t".join([ "\t".join([d, "%.2f" % s]) for d, s in zip(beam_decodes, beam_scores) ])) else: decodes.append("\t".join(beam_decodes)) else: _, decoded_outputs, _ = log_decode_results( result["inputs"], result["outputs"], problem_name, None, inputs_vocab, targets_vocab, log_results=decode_hp.log_results, skip_eos_postprocess=decode_hp.skip_eos_postprocess) decodes.append(decoded_outputs) total_time_per_step += elapsed_time total_cnt += result["outputs"].shape[-1] duration = time.time() - start_time tf.logging.info("Elapsed Time: %5.5f" % duration) tf.logging.info("Averaged Single Token Generation Time: %5.7f " "(time %5.7f count %d)" % (total_time_per_step / total_cnt, total_time_per_step, total_cnt)) if decode_hp.batch_size == 1: tf.logging.info("Inference time %.4f seconds " "(Latency = %.4f ms/setences)" % (duration, 1000.0*duration/num_sentences)) else: tf.logging.info("Inference time %.4f seconds " "(Throughput = %.4f sentences/second)" % (duration, num_sentences/duration)) # If decode_to_file was provided use it as the output filename without change # (except for adding shard_id if using more shards for decoding). # Otherwise, use the input filename plus model, hp, problem, beam, alpha. decode_filename = decode_to_file if decode_to_file else filename if not decode_to_file: decode_filename = _decode_filename(decode_filename, problem_name, decode_hp) else: decode_filename = _add_shard_to_filename(decode_filename, decode_hp) tf.logging.info("Writing decodes into %s" % decode_filename) outfile = tf.gfile.Open(decode_filename, "w") for index in range(len(sorted_inputs)): outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter)) outfile.flush() outfile.close() output_dir = os.path.join(estimator.model_dir, "decode") tf.gfile.MakeDirs(output_dir) run_postdecode_hooks(DecodeHookArgs( estimator=estimator, problem=hparams.problem, output_dirs=[output_dir], hparams=hparams, decode_hparams=decode_hp, predictions=list(result_iter) ), None)
0.011959
def Bernoulli(p, tag=None): """ A Bernoulli random variate Parameters ---------- p : scalar The probability of success """ assert ( 0 < p < 1 ), 'Bernoulli probability "p" must be between zero and one, non-inclusive' return uv(ss.bernoulli(p), tag=tag)
0.006472
def randomwif(prefix, num): """ Obtain a random private/public key pair """ from bitsharesbase.account import PrivateKey t = [["wif", "pubkey"]] for n in range(0, num): wif = PrivateKey() t.append([str(wif), format(wif.pubkey, prefix)]) print_table(t)
0.003425
def stop(self): """ Instructs the kernel process to stop channels and the kernel manager to then shutdown the process. """ logger.debug('Stopping kernel') self.kc.stop_channels() self.km.shutdown_kernel(now=True) del self.km
0.006944
def calf(self, spec): """ Typical safe usage is this, which sets everything that could be problematic up. Requires the filename which everything will be produced to. """ if not isinstance(spec, Spec): raise TypeError('spec must be of type Spec') if not spec.get(BUILD_DIR): tempdir = realpath(mkdtemp()) spec.advise(CLEANUP, shutil.rmtree, tempdir) build_dir = join(tempdir, 'build') mkdir(build_dir) spec[BUILD_DIR] = build_dir else: build_dir = self.realpath(spec, BUILD_DIR) if not isdir(build_dir): logger.error("build_dir '%s' is not a directory", build_dir) raise_os_error(errno.ENOTDIR, build_dir) self.realpath(spec, EXPORT_TARGET) # Finally, handle setup which may set up the deferred advices, # as all the toolchain (and its runtime and/or its parent # runtime and related toolchains) spec advises should have been # done. spec.handle(SETUP) try: process = ('prepare', 'compile', 'assemble', 'link', 'finalize') for p in process: spec.handle('before_' + p) getattr(self, p)(spec) spec.handle('after_' + p) spec.handle(SUCCESS) except ToolchainCancel: # quietly handle the issue and move on out of here. pass finally: spec.handle(CLEANUP)
0.001301
def get_subreddit(self, subreddit_name, *args, **kwargs): """Return a Subreddit object for the subreddit_name specified. The additional parameters are passed directly into the :class:`.Subreddit` constructor. """ sr_name_lower = subreddit_name.lower() if sr_name_lower == 'random': return self.get_random_subreddit() elif sr_name_lower == 'randnsfw': return self.get_random_subreddit(nsfw=True) return objects.Subreddit(self, subreddit_name, *args, **kwargs)
0.00365
def axis_updated(self, event: InputEvent, prefix=None): """ Called to process an absolute axis event from evdev, this is called internally by the controller implementations :internal: :param event: The evdev event to process :param prefix: If present, a named prefix that should be applied to the event code when searching for the axis """ if prefix is not None: axis = self.axes_by_code.get(prefix + str(event.code)) else: axis = self.axes_by_code.get(event.code) if axis is not None: axis.receive_device_value(event.value) else: logger.debug('Unknown axis code {} ({}), value {}'.format(event.code, prefix, event.value))
0.006443
def chk_edges(self): """Check that all edge nodes exist in local subset.""" goids = set(self.go2obj) self.chk_edges_nodes(self.edges, goids, "is_a") for reltype, edges in self.edges_rel.items(): self.chk_edges_nodes(edges, goids, reltype)
0.007092
def fieldinfo(self): """Retrieve info about all vdata fields. Args:: no argument Returns:: list where each element describes a field of the vdata; each field is described by an 7-element tuple containing the following elements: - field name - field data type (one of HC.xxx constants) - field order - number of attributes attached to the field - field index number - field external size - field internal size C library equivalent : no equivalent """ lst = [] for n in range(self._nfields): fld = self.field(n) lst.append((fld._name, fld._type, fld._order, fld._nattrs, fld._index, fld._esize, fld._isize)) return lst
0.001976
def _generate_initial_model(self): """Creates the initial model for the optimistation. Raises ------ TypeError Raised if the model failed to build. This could be due to parameters being passed to the specification in the wrong format. """ initial_parameters = [p.current_value for p in self.current_parameters] try: initial_model = self.specification(*initial_parameters) except TypeError: raise TypeError( 'Failed to build initial model. Make sure that the input ' 'parameters match the number and order of arguements ' 'expected by the input specification.') initial_model.pack_new_sequences(self.sequences) self.current_energy = self.eval_function(initial_model) self.best_energy = copy.deepcopy(self.current_energy) self.best_parameters = copy.deepcopy(self.current_parameters) self.best_model = initial_model return
0.001921
def quote_edge(identifier): """Return DOT edge statement node_id from string, quote if needed. >>> quote_edge('spam') 'spam' >>> quote_edge('spam spam:eggs eggs') '"spam spam":"eggs eggs"' >>> quote_edge('spam:eggs:s') 'spam:eggs:s' """ node, _, rest = identifier.partition(':') parts = [quote(node)] if rest: port, _, compass = rest.partition(':') parts.append(quote(port)) if compass: parts.append(compass) return ':'.join(parts)
0.001931
def copyfile(source, destination, force=True): '''copy a file from a source to its destination. ''' if os.path.exists(destination) and force is True: os.remove(destination) shutil.copyfile(source, destination) return destination
0.003906
def Stat(self, urns): """Returns metadata about all urns. Currently the metadata include type, and last update time. Args: urns: The urns of the objects to open. Yields: A dict of metadata. Raises: ValueError: A string was passed instead of an iterable. """ if isinstance(urns, string_types): raise ValueError("Expected an iterable, not string.") for subject, values in data_store.DB.MultiResolvePrefix( urns, ["aff4:type", "metadata:last"]): res = dict(urn=rdfvalue.RDFURN(subject)) for v in values: if v[0] == "aff4:type": res["type"] = v elif v[0] == "metadata:last": res["last"] = rdfvalue.RDFDatetime(v[1]) yield res
0.010767
def report(self, item_id, report_format="json"): """Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json, html, all, dropped, package_files. :type item_id: int :param item_id: Task ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure. """ report_format = report_format.lower() response = self._request("tasks/report/{id}/{format}".format(id=item_id, format=report_format)) # if response is JSON, return it as an object if report_format == "json": try: return json.loads(response.content.decode('utf-8')) except ValueError: pass # otherwise, return the raw content. return response.content
0.004032
def has_external_dependency(name): 'Check that a non-Python dependency is installed.' for directory in os.environ['PATH'].split(':'): if os.path.exists(os.path.join(directory, name)): return True return False
0.004167
def get_id2parents(objs): """Get all parent item IDs for each item in dict keys.""" id2parents = {} for obj in objs: _get_id2parents(id2parents, obj.item_id, obj) return id2parents
0.004902
def get(self, key, default=None, with_age=False): " Return the value for key if key is in the dictionary, else default. " try: return self.__getitem__(key, with_age) except KeyError: if with_age: return default, None else: return default
0.006079
def expert_to_gates(self): """Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` and shapes `[expert_batch_size_i]` """ return tf.split( self._nonzero_gates, self._part_sizes_tensor, 0, num=self._num_experts)
0.002849
def get_oauth_url(self): """ Returns the URL with OAuth params """ params = OrderedDict() if "?" in self.url: url = self.url[:self.url.find("?")] for key, value in parse_qsl(urlparse(self.url).query): params[key] = value else: url = self.url params["oauth_consumer_key"] = self.consumer_key params["oauth_timestamp"] = self.timestamp params["oauth_nonce"] = self.generate_nonce() params["oauth_signature_method"] = "HMAC-SHA256" params["oauth_signature"] = self.generate_oauth_signature(params, url) query_string = urlencode(params) return "%s?%s" % (url, query_string)
0.002805
def principal_axis_system(self): """ Returns a chemical shielding tensor aligned to the principle axis system so that only the 3 diagnol components are non-zero """ return ChemicalShielding(np.diag(np.sort(np.linalg.eigvals(self))))
0.011029