text
stringlengths
78
104k
score
float64
0
0.18
def _hydrate_pivot_relation(self, models): """ Hydrate the pivot table relationship on the models. :type models: list """ for model in models: pivot = self.new_existing_pivot(self._clean_pivot_attributes(model)) model.set_relation("pivot", pivot)
0.009615
def learn(self, objects): """ Learns all provided objects :param objects: dict mapping object name to array of sensations, where each sensation is composed of location and feature SDR for each column. For example: {'obj1' : [[[1,1,1],[101,205,523, ..., 1021]],...], ...} Note: Each column must have the same number of sensations as the other columns. :type objects: dict[str, array] """ self.setLearning(True) for objectName, sensationList in objects.iteritems(): self.sendReset() print "Learning :", objectName prevLoc = [None] * self.numColumns numFeatures = len(sensationList[0]) displacement = [0] * self.dimensions for sensation in xrange(numFeatures): for col in xrange(self.numColumns): location = np.array(sensationList[col][sensation][0]) feature = sensationList[col][sensation][1] # Compute displacement from previous location if prevLoc[col] is not None: displacement = location - prevLoc[col] prevLoc[col] = location # learn each pattern multiple times for _ in xrange(self.repeat): # Sense feature at location self.motorInput[col].addDataToQueue(displacement) self.sensorInput[col].addDataToQueue(feature, False, 0) # Only move to the location on the first sensation. displacement = [0] * self.dimensions self.network.run(self.repeat * numFeatures) # update L2 representations for the object self.learnedObjects[objectName] = self.getL2Representations()
0.010551
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): """Factory function for unpickling pytz tzinfo instances. This is shared for both StaticTzInfo and DstTzInfo instances, because database changes could cause a zones implementation to switch between these two base classes and we can't break pickles on a pytz version upgrade. """ # Raises a KeyError if zone no longer exists, which should never happen # and would be a bug. tz = pytz.timezone(zone) # A StaticTzInfo - just return it if utcoffset is None: return tz # This pickle was created from a DstTzInfo. We need to # determine which of the list of tzinfo instances for this zone # to use in order to restore the state of any datetime instances using # it correctly. utcoffset = memorized_timedelta(utcoffset) dstoffset = memorized_timedelta(dstoffset) try: return tz._tzinfos[(utcoffset, dstoffset, tzname)] except KeyError: # The particular state requested in this timezone no longer exists. # This indicates a corrupt pickle, or the timezone database has been # corrected violently enough to make this particular # (utcoffset,dstoffset) no longer exist in the zone, or the # abbreviation has been changed. pass # See if we can find an entry differing only by tzname. Abbreviations # get changed from the initial guess by the database maintainers to # match reality when this information is discovered. for localized_tz in tz._tzinfos.values(): if (localized_tz._utcoffset == utcoffset and localized_tz._dst == dstoffset): return localized_tz # This (utcoffset, dstoffset) information has been removed from the # zone. Add it back. This might occur when the database maintainers have # corrected incorrect information. datetime instances using this # incorrect information will continue to do so, exactly as they were # before being pickled. This is purely an overly paranoid safety net - I # doubt this will ever been needed in real life. inf = (utcoffset, dstoffset, tzname) tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) return tz._tzinfos[inf]
0.000444
def make_report(plots, path): ''' Creates a fat html report based on the previously created files plots is a list of Plot objects defined by a path and title statsfile is the file to which the stats have been saved, which is parsed to a table (rather dodgy) ''' logging.info("Writing html report.") html_head = """<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <style> table, th, td { text-align: left; padding: 2px; /* border: 1px solid black; border-collapse: collapse; */ } h2 { line-height: 0pt; } </style> <title>NanoComp Report</title> </head>""" html_content = ["\n<body>\n<h1>NanoComp report</h1>"] html_content.append("<h2>Summary statistics</h2>") html_content.append(utils.stats2html(path + "NanoStats.txt")) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_content.append("<h2>Plots</h2>") for plot in plots: html_content.append("\n<h3>" + plot.title + "</h3>\n" + plot.encode()) html_content.append('\n<br>\n<br>\n<br>\n<br>') html_body = '\n'.join(html_content) + "</body></html>" html_str = html_head + html_body with open(path + "NanoComp-report.html", "w") as html_file: html_file.write(html_str) return path + "NanoComp-report.html"
0.000691
def echo_with_markers(text, marker='=', marker_color='blue', text_color=None): """Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') """ text = ' ' + text + ' ' width, _ = click.get_terminal_size() if len(text) >= width: click.echo(text) # this is probably never the case else: leftovers = width - len(text) click.secho(marker * (leftovers / 2), fg=marker_color, nl=False) click.secho(text, nl=False, fg=text_color) click.secho(marker * (leftovers / 2 + leftovers % 2), fg=marker_color)
0.002825
def load_template_help(builtin): """Loads the help for a given template""" help_file = "templates/%s-help.yml" % builtin help_file = resource_filename(__name__, help_file) help_obj = {} if os.path.exists(help_file): help_data = yaml.safe_load(open(help_file)) if 'name' in help_data: help_obj['name'] = help_data['name'] if 'help' in help_data: help_obj['help'] = help_data['help'] if 'args' in help_data: help_obj['args'] = help_data['args'] return help_obj
0.001805
def get_ip_interface_output_interface_vrf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_ip_interface = ET.Element("get_ip_interface") config = get_ip_interface output = ET.SubElement(get_ip_interface, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') vrf = ET.SubElement(interface, "vrf") vrf.text = kwargs.pop('vrf') callback = kwargs.pop('callback', self._callback) return callback(config)
0.002535
def _HandleLegacy(self, args, token=None): """Retrieves the clients for a hunt.""" hunt_urn = args.hunt_id.ToURN() hunt_obj = aff4.FACTORY.Open( hunt_urn, aff4_type=implementation.GRRHunt, token=token) clients_by_status = hunt_obj.GetClientsByStatus() hunt_clients = clients_by_status[args.client_status.name] total_count = len(hunt_clients) if args.count: hunt_clients = sorted(hunt_clients)[args.offset:args.offset + args.count] else: hunt_clients = sorted(hunt_clients)[args.offset:] flow_id = "%s:hunt" % hunt_urn.Basename() results = [ ApiHuntClient(client_id=c.Basename(), flow_id=flow_id) for c in hunt_clients ] return ApiListHuntClientsResult(items=results, total_count=total_count)
0.003856
def respond(self, data): """ Respond to the connection accepted in this object """ self.push("%s%s" % (data, TERMINATOR)) if self.temporary: self.close_when_done()
0.009259
def download_archive(self, archive): """Download an archive's :attr:`~LegendasTVArchive.content`. :param archive: the archive to download :attr:`~LegendasTVArchive.content` of. :type archive: :class:`LegendasTVArchive` """ logger.info('Downloading archive %s', archive.id) r = self.session.get(self.server_url + 'downloadarquivo/{}'.format(archive.id)) r.raise_for_status() # open the archive archive_stream = io.BytesIO(r.content) if is_rarfile(archive_stream): logger.debug('Identified rar archive') archive.content = RarFile(archive_stream) elif is_zipfile(archive_stream): logger.debug('Identified zip archive') archive.content = ZipFile(archive_stream) else: raise ValueError('Not a valid archive')
0.00464
def tone_marks(): """Keep tone-modifying punctuation by matching following character. Assumes the `tone_marks` pre-processor was run for cases where there might not be any space after a tone-modifying punctuation mark. """ return RegexBuilder( pattern_args=symbols.TONE_MARKS, pattern_func=lambda x: u"(?<={}).".format(x)).regex
0.00274
def save(self, force_insert=False, force_update=False, *args, **kwargs): """ Makes sure we are in sync with the User field """ self.first_name = self.user.first_name self.last_name = self.user.last_name self.email = self.user.email full_name = '%s %s' % (self.first_name, self.last_name) theslug = slugify(full_name) if not theslug.strip(): theslug = str(self.user.pk) while self.__class__.objects.filter(slug=theslug).exclude(pk=self.pk).count(): theslug = "%s_" % theslug if self.slug != theslug: self.slug = theslug self.slug = self.slug[:50] super(BaseStaffMember, self).save( force_insert, force_update, *args, **kwargs )
0.003817
def get_fastq_2(job, patient_id, sample_type, fastq_1): """ For a path to a fastq_1 file, return a fastq_2 file with the same prefix and naming scheme. :param str patient_id: The patient_id :param str sample_type: The sample type of the file :param str fastq_1: The path to the fastq_1 file :return: The path to the fastq_2 file :rtype: str """ prefix, extn = fastq_1, 'temp' final_extn = '' while extn: prefix, extn = os.path.splitext(prefix) final_extn = extn + final_extn if prefix.endswith('1'): prefix = prefix[:-1] job.fileStore.logToMaster('"%s" prefix for "%s" determined to be %s' % (sample_type, patient_id, prefix)) break else: raise ParameterError('Could not determine prefix from provided fastq (%s). Is it ' 'of the form <fastq_prefix>1.[fq/fastq][.gz]?' % fastq_1) if final_extn not in ['.fastq', '.fastq.gz', '.fq', '.fq.gz']: raise ParameterError('If and _2 fastq path is not specified, only .fastq, .fq or ' 'their gzippped extensions are accepted. Could not process ' '%s:%s.' % (patient_id, sample_type + '_fastq_1')) return ''.join([prefix, '2', final_extn])
0.005236
def to_segmentation_map(self, image_shape, size_lines=1, size_points=0, raise_if_out_of_image=False): """ Generate a segmentation map object from the line string. This is similar to :func:`imgaug.augmentables.lines.LineString.draw_mask`. The result is wrapped in a ``SegmentationMapOnImage`` object instead of just an array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. size_lines : int, optional Thickness of the line. size_points : int, optional Size of the points in pixels. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to False, no error will be raised and only the parts inside the image will be drawn. Returns ------- imgaug.augmentables.segmaps.SegmentationMapOnImage Segmentation map object containing drawn line string. """ from .segmaps import SegmentationMapOnImage return SegmentationMapOnImage( self.draw_mask( image_shape, size_lines=size_lines, size_points=size_points, raise_if_out_of_image=raise_if_out_of_image), shape=image_shape )
0.00212
def Transit(time, t0=0., dur=0.1, per=3.56789, depth=0.001, **kwargs): ''' A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model, but with the depth and the duration as primary input variables. :param numpy.ndarray time: The time array :param float t0: The time of first transit in units of \ :py:obj:`BJD` - 2454833. :param float dur: The transit duration in days. Don't go too crazy on \ this one -- very small or very large values will break the \ inverter. Default 0.1 :param float per: The orbital period in days. Default 3.56789 :param float depth: The fractional transit depth. Default 0.001 :param dict kwargs: Any additional keyword arguments, passed directly \ to :py:func:`pysyzygy.Transit` :returns tmod: The transit model evaluated at the same times as the \ :py:obj:`time` array ''' if ps is None: raise Exception("Unable to import `pysyzygy`.") # Note that rhos can affect RpRs, so we should really do this iteratively, # but the effect is pretty negligible! RpRs = Get_RpRs(depth, t0=t0, per=per, **kwargs) rhos = Get_rhos(dur, t0=t0, per=per, **kwargs) return ps.Transit(t0=t0, per=per, RpRs=RpRs, rhos=rhos, **kwargs)(time)
0.00076
def _set_contents(self, force=False): """ Encodes all child objects into the contents for this object :param force: Ensure all contents are in DER format instead of possibly using cached BER-encoded data """ if self.children is None: self._parse_children() contents = BytesIO() for child in self: contents.write(child.dump(force=force)) self._contents = contents.getvalue() self._header = None if self._trailer != b'': self._trailer = b''
0.003442
def display_dp_matrix_attr(dp_matrix, attr_name): """ show a value assocciated with an attribute for each DataProperty instance in the dp_matrix """ print() print("---------- {:s} ----------".format(attr_name)) for dp_list in dp_matrix: print([getattr(dp, attr_name) for dp in dp_list])
0.003096
def load_object_at_path(path): """Load an object from disk at explicit path""" with open(path, 'r') as f: data = _deserialize(f.read()) return aadict(data)
0.005587
def init(cfg): """ Initialiaze na3x :param cfg: db, triggers, environment variables configuration """ global na3x_cfg with open(cfg[NA3X_DB]) as db_cfg_file: na3x_cfg[NA3X_DB] = json.load(db_cfg_file, strict=False) with open(cfg[NA3X_TRIGGERS]) as triggers_cfg_file: na3x_cfg[NA3X_TRIGGERS] = json.load(triggers_cfg_file, strict=False) with open(cfg[NA3X_ENV]) as env_cfg_file: na3x_cfg[NA3X_ENV] = json.load(env_cfg_file, strict=False)
0.002033
def deserialize(cls, buf, byteorder='@'): ''' Deserialize a lean MinHash from a buffer. Args: buf (buffer): `buf` must implement the `buffer`_ interface. One such example is the built-in `bytearray`_ class. byteorder (str. optional): This is byte order of the serialized data. Use one of the `byte order characters <https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_: ``@``, ``=``, ``<``, ``>``, and ``!``. Default is ``@`` -- the native order. Return: datasketch.LeanMinHash: The deserialized lean MinHash Example: To deserialize a lean MinHash from a buffer. .. code-block:: python lean_minhash = LeanMinHash.deserialize(buf) ''' fmt_seed_size = "%sqi" % byteorder fmt_hash = byteorder + "%dI" try: seed, num_perm = struct.unpack_from(fmt_seed_size, buf, 0) except TypeError: seed, num_perm = struct.unpack_from(fmt_seed_size, buffer(buf), 0) offset = struct.calcsize(fmt_seed_size) try: hashvalues = struct.unpack_from(fmt_hash % num_perm, buf, offset) except TypeError: hashvalues = struct.unpack_from(fmt_hash % num_perm, buffer(buf), offset) lmh = object.__new__(LeanMinHash) lmh._initialize_slots(seed, hashvalues) return lmh
0.002672
def t_intnumber(self, t): r'-?\d+' t.value = int(t.value) t.type = 'NUMBER' return t
0.017241
def new_freeform_sp(shape_id, name, x, y, cx, cy): """Return new `p:sp` element tree configured as freeform shape. The returned shape has a `a:custGeom` subtree but no paths in its path list. """ tmpl = CT_Shape._freeform_sp_tmpl() xml = tmpl % (shape_id, name, x, y, cx, cy) sp = parse_xml(xml) return sp
0.005405
def plotDataframe(table, title, plotPath): """ Plot Panda dataframe. :param table: Panda dataframe returned by :func:`analyzeWeightPruning` :type table: :class:`pandas.DataFrame` :param title: Plot title :type title: str :param plotPath: Plot full path :type plotPath: str """ plt.figure() axes = table.T.plot(subplots=True, sharex=True, grid=True, legend=True, title=title, figsize=(8, 11)) # Use fixed scale for "accuracy" accuracy = next(ax for ax in axes if ax.lines[0].get_label() == 'accuracy') accuracy.set_ylim(0.0, 1.0) plt.savefig(plotPath) plt.close()
0.014516
def _set_version(self, v, load=False): """ Setter method for version, mapped from YANG variable /rbridge_id/openflow/logical_instance/version (list) If this variable is read-only (config: false) in the source YANG file, then _set_version is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_version() directly. YANG Description: OpenFlow version """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("version_name",version.version, yang_name="version", rest_name="version", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='version-name', extensions={u'tailf-common': {u'callpoint': u'OpenFlowSupportedVersions', u'info': u'OpenFlow version', u'cli-suppress-mode': None}}), is_container='list', yang_name="version", rest_name="version", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'OpenFlowSupportedVersions', u'info': u'OpenFlow version', u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """version must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("version_name",version.version, yang_name="version", rest_name="version", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='version-name', extensions={u'tailf-common': {u'callpoint': u'OpenFlowSupportedVersions', u'info': u'OpenFlow version', u'cli-suppress-mode': None}}), is_container='list', yang_name="version", rest_name="version", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'OpenFlowSupportedVersions', u'info': u'OpenFlow version', u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)""", }) self.__version = t if hasattr(self, '_set'): self._set()
0.004281
def async_state_change_callback(self, addr, state, value): """Log the state change.""" _LOGGING.info('Device %s state %s value is changed to %s', addr, state, value)
0.009852
def distros_for_filename(filename, metadata=None): """Yield possible egg or source distribution objects based on a filename""" return distros_for_location( normalize_path(filename), os.path.basename(filename), metadata )
0.004167
def start_update(self, draw=None, queues=None): """ Conduct the formerly registered updates This method conducts the updates that have been registered via the :meth:`update` method. You can call this method if the :attr:`no_auto_update` attribute of this instance and the `auto_update` parameter in the :meth:`update` method has been set to False Parameters ---------- draw: bool or None Boolean to control whether the figure of this array shall be drawn at the end. If None, it defaults to the `'auto_draw'`` parameter in the :attr:`psyplot.rcParams` dictionary queues: list of :class:`Queue.Queue` instances The queues that are passed to the :meth:`psyplot.plotter.Plotter.start_update` method to ensure a thread-safe update. It can be None if only one single plotter is updated at the same time. The number of jobs that are taken from the queue is determined by the :meth:`_njobs` attribute. Note that there this parameter is automatically configured when updating from a :class:`~psyplot.project.Project`. Returns ------- bool A boolean indicating whether a redrawing is necessary or not See Also -------- :attr:`no_auto_update`, update """ if self.plotter is not None: return self.plotter.start_update(draw=draw, queues=queues)
0.001312
def update_frame(self, key, ranges=None, plot=None): """ Updates an existing plot with data corresponding to the key. """ element = self._get_frame(key) self._get_title_div(key, '12pt') # Cache frame object id to skip updating data if unchanged previous_id = self.handles.get('previous_id', None) current_id = element._plot_id self.handles['previous_id'] = current_id self.static_source = (self.dynamic and (current_id == previous_id)) if (element is None or (not self.dynamic and self.static) or (self.streaming and self.streaming[0].data is self.current_frame.data and not self.streaming[0]._triggering) or self.static_source): return source = self.handles['source'] style = self.lookup_options(element, 'style')[self.cyclic_index] data, _, style = self.get_data(element, ranges, style) columns = self._get_columns(element, data) self.handles['table'].columns = columns self._update_datasource(source, data)
0.00275
def startswith(self, prefix, ignore_case=False, options=None): """ A query to check if a field starts with a given prefix string **Example**: ``session.query(Spell).filter(Spells.name.startswith("abra", ignore_case=True))`` .. note:: This is a shortcut to .regex('^' + re.escape(prefix)) MongoDB optimises such prefix expressions to use indexes appropriately. As the prefix contains no further regex, this will be optimized by matching only against the prefix. """ return self.regex('^' + re.escape(prefix), ignore_case=ignore_case, options=options)
0.006173
def publish(): """Publish this package to PyPI (aka "the Cheeseshop").""" long_description = make_long_description() if long_description != read(RST_DESCRIPTION_PATH): print("""\ Description file not up-to-date: %s Run the following command and commit the changes-- python setup.py %s """ % (RST_DESCRIPTION_PATH, PREP_COMMAND)) sys.exit() print("Description up-to-date: %s" % RST_DESCRIPTION_PATH) answer = raw_input("Are you sure you want to publish to PyPI (yes/no)?") if answer != "yes": exit("Aborted: nothing published") os.system('python setup.py sdist upload')
0.001587
def insertComments( self, comment = None ): """ Inserts comments into the editor based on the current selection.\ If no comment string is supplied, then the comment from the language \ will be used. :param comment | <str> || None :return <bool> | success """ if ( not comment ): lang = self.language() if ( lang ): comment = lang.lineComment() if ( not comment ): return False startline, startcol, endline, endcol = self.getSelection() line, col = self.getCursorPosition() for lineno in range(startline, endline+1 ): self.setCursorPosition(lineno, 0) self.insert(comment) self.setSelection(startline, startcol, endline, endcol) self.setCursorPosition(line, col) return True
0.020386
def diff(s1, s2): """ Return a normalised Levenshtein distance between two strings. Distance is normalised by dividing the Levenshtein distance of the two strings by the max(len(s1), len(s2)). Examples: >>> text.diff("foo", "foo") 0 >>> text.diff("foo", "fooo") 1 >>> text.diff("foo", "") 1 >>> text.diff("1234", "1 34") 1 Arguments: s1 (str): Argument A. s2 (str): Argument B. Returns: float: Normalised distance between the two strings. """ return levenshtein(s1, s2) / max(len(s1), len(s2))
0.001595
def GetMessages(self, formatter_mediator, event): """Determines the formatted message strings for an event object. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event (EventObject): event. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter. """ if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) event_values = event.CopyToDict() security = event_values.get('security', None) if security: security_flags = [] for flag, description in iter(self._SECURITY_VALUES.items()): if security & flag: security_flags.append(description) security_string = '0x{0:08x}: {1:s}'.format( security, ','.join(security_flags)) event_values['security'] = security_string for key, value in iter(event_values.items()): if isinstance(value, py2to3.BYTES_TYPE): event_values[key] = repr(value) return self._ConditionalFormatMessages(event_values)
0.006178
def set_cursor_y(self, y): """ Set Screen Cursor Y Position """ if y >= 0 and y <= self.server.server_info.get("screen_height"): self.cursor_y = y self.server.request("screen_set %s cursor_y %i" % (self.ref, self.cursor_y))
0.011364
def update_item(TableName=None, Key=None, AttributeUpdates=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, UpdateExpression=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None): """ Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter. See also: AWS API Documentation Examples This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response. Expected Output: :example: response = client.update_item( TableName='string', Key={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, AttributeUpdates={ 'string': { 'Value': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, 'Action': 'ADD'|'PUT'|'DELETE' } }, Expected={ 'string': { 'Value': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, 'Exists': True|False, 'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH', 'AttributeValueList': [ { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False }, ] } }, ConditionalOperator='AND'|'OR', ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW', ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE', ReturnItemCollectionMetrics='SIZE'|'NONE', UpdateExpression='string', ConditionExpression='string', ExpressionAttributeNames={ 'string': 'string' }, ExpressionAttributeValues={ 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } } ) :type TableName: string :param TableName: [REQUIRED] The name of the table containing the item to update. :type Key: dict :param Key: [REQUIRED] The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute. For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key. (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type AttributeUpdates: dict :param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide . (string) -- (dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each. Note You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes. Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception. Value (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data TYpes in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table. If an item with the specified *Key* is found in the table: PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value. DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error. ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute: If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute. Note If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 . If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets. This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types. If no item with the specified *Key* is found: PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute. DELETE - Nothing happens; there is no attribute to delete. ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified. :type Expected: dict :param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways: Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds. Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false. Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception. Value (dict) --Represents the data for the expected attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation: If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException . If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException . The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied. DynamoDB returns a ValidationException if: Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.) Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.) ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc. The following comparison operators are available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each comparison operator. EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} . LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} . NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps. Note This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator. NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps. Note This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator. CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list. BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type). IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true. BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used. For type Number, value comparisons are numeric. String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters . For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values. For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide . (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :type ConditionalOperator: string :param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide . :type ReturnValues: string :param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are: NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .) ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation. UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation. ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation. UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed. Values returned are strongly consistent :type ReturnConsumedCapacity: string :param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response: INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s). TOTAL - The response includes only the aggregate ConsumedCapacity for the operation. NONE - No ConsumedCapacity details are included in the response. :type ReturnItemCollectionMetrics: string :param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned. :type UpdateExpression: string :param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them. The following action values are available for UpdateExpression . SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions: if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item. list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands. These function names are case-sensitive. REMOVE - Removes one or more attributes from an item. ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute: If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute. Note If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 . If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. Warning The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes. DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error. Warning The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes. You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide . :type ConditionExpression: string :param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed. An expression can contain any of the following: Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive. Comparison operators: = | | | | = | = | BETWEEN | IN Logical operators: AND | OR | NOT For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide . :type ExpressionAttributeNames: dict :param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames : To access an attribute whose name conflicts with a DynamoDB reserved word. To create a placeholder for repeating occurrences of an attribute name in an expression. To prevent special characters in an attribute name from being misinterpreted in an expression. Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name: Percentile The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames : {'#P':'Percentile'} You could then use this substitution in an expression, as in this example: #P = :val Note Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime. For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide . (string) -- (string) -- :type ExpressionAttributeValues: dict :param ExpressionAttributeValues: One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide . (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . S (string) --An attribute of type String. For example: 'S': 'Hello' N (string) --An attribute of type Number. For example: 'N': '123.45' Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. B (bytes) --An attribute of type Binary. For example: 'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk' SS (list) --An attribute of type String Set. For example: 'SS': ['Giraffe', 'Hippo' ,'Zebra'] (string) -- NS (list) --An attribute of type Number Set. For example: 'NS': ['42.2', '-19', '7.5', '3.14'] Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations. (string) -- BS (list) --An attribute of type Binary Set. For example: 'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k='] (bytes) -- M (dict) --An attribute of type Map. For example: 'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}} (string) -- (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . L (list) --An attribute of type List. For example: 'L': ['Cookies', 'Coffee', 3.14159] (dict) --Represents the data for an attribute. Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself. For more information, see Data Types in the Amazon DynamoDB Developer Guide . NULL (boolean) --An attribute of type Null. For example: 'NULL': true BOOL (boolean) --An attribute of type Boolean. For example: 'BOOL': true :rtype: dict :return: { 'Attributes': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'ConsumedCapacity': { 'TableName': 'string', 'CapacityUnits': 123.0, 'Table': { 'CapacityUnits': 123.0 }, 'LocalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } }, 'GlobalSecondaryIndexes': { 'string': { 'CapacityUnits': 123.0 } } }, 'ItemCollectionMetrics': { 'ItemCollectionKey': { 'string': { 'S': 'string', 'N': 'string', 'B': b'bytes', 'SS': [ 'string', ], 'NS': [ 'string', ], 'BS': [ b'bytes', ], 'M': { 'string': {'... recursive ...'} }, 'L': [ {'... recursive ...'}, ], 'NULL': True|False, 'BOOL': True|False } }, 'SizeEstimateRangeGB': [ 123.0, ] } } :returns: (string) -- """ pass
0.003785
def do_args(self, modargs, send, nick, target, source, name, msgtype): """Handle the various args that modules need.""" realargs = {} args = { 'nick': nick, 'handler': self, 'db': None, 'config': self.config, 'source': source, 'name': name, 'type': msgtype, 'botnick': self.connection.real_nickname, 'target': target if target[0] == "#" else "private", 'do_kick': lambda target, nick, msg: self.do_kick(send, target, nick, msg), 'is_admin': lambda nick: self.is_admin(send, nick), 'abuse': lambda nick, limit, cmd: self.abusecheck(send, nick, target, limit, cmd) } for arg in modargs: if arg in args: realargs[arg] = args[arg] else: raise Exception("Invalid Argument: %s" % arg) return realargs
0.004246
def from_attribute(attr): """ Converts an attribute into a shadow attribute. :param attr: :class:`MispAttribute` instance to be converted :returns: Converted :class:`MispShadowAttribute` :example: >>> server = MispServer() >>> event = server.events.get(12) >>> attr = event.attributes[0] >>> prop = MispShadowAttribute.from_attribute(attr) """ assert attr is not MispAttribute prop = MispShadowAttribute() prop.distribution = attr.distribution prop.type = attr.type prop.comment = attr.comment prop.value = attr.value prop.category = attr.category prop.to_ids = attr.to_ids return prop
0.002703
def cursor(self, user=None, configuration=None, convert_types=True, dictify=False, fetch_error=True): """Get a cursor from the HiveServer2 (HS2) connection. Parameters ---------- user : str, optional configuration : dict of str keys and values, optional Configuration overlay for the HS2 session. convert_types : bool, optional When `False`, timestamps and decimal values will not be converted to Python `datetime` and `Decimal` values. (These conversions are expensive.) Only applies when using HS2 protocol versions > 6. dictify : bool, optional When `True` cursor will return key value pairs instead of rows. batch_cursor : bool, optional When `True` cursor will return CBatches directly rather than rows. fetch_error : bool, optional In versions of impala prior to 2.7.0, when an operation fails and the impalad returns an error state, the error message is not always returned. In these cases the error message can be retrieved by a subsequent fetch rpc call but this has a side effect of invalidating the query handle and causing any further operations against it to fail. e.g. calling log() or profile(). When set to `True` impyla will attempt to fetch the error message. When set to `False`, this flag will cause impyla not to attempt to fetch the message with a fetch call . In this case the query handle remains valid and impyla will raise an exception with a message of "Operation is in ERROR_STATE". The Default option is `True`. Returns ------- HiveServer2Cursor A `Cursor` object (DB API 2.0-compliant). """ # PEP 249 log.debug('Getting a cursor (Impala session)') if user is None: user = getpass.getuser() log.debug('.cursor(): getting new session_handle') session = self.service.open_session(user, configuration) log.debug('HiveServer2Cursor(service=%s, session_handle=%s, ' 'default_config=%s, hs2_protocol_version=%s)', self.service, session.handle, session.config, session.hs2_protocol_version) cursor_class = HiveServer2DictCursor if dictify else HiveServer2Cursor cursor = cursor_class(session, convert_types=convert_types, fetch_error=fetch_error) if self.default_db is not None: log.info('Using database %s as default', self.default_db) cursor.execute('USE %s' % self.default_db) return cursor
0.001443
def list_jobs(self, state=None, limit=None, marker=None): """ListJobs https://apidocs.joyent.com/manta/api.html#ListJobs Limitation: at this time `list_jobs` doesn't support paging through more than the default response num results. (TODO) @param state {str} Only return jobs in the given state, e.g. "running", "done", etc. @param limit TODO @param marker TODO @returns jobs {list} """ log.debug('ListJobs') path = "/%s/jobs" % self.account query = {} if state: query["state"] = state if limit: query["limit"] = limit if marker: query["marker"] = marker res, content = self._request(path, "GET", query=query) if res["status"] != "200": raise errors.MantaAPIError(res, content) lines = content.splitlines(False) jobs = [] for line in lines: if not line.strip(): continue try: jobs.append(json.loads(line)) except ValueError: raise errors.MantaError('invalid job entry: %r' % line) return jobs
0.001656
def progress(self, enumerable, task_progress_object=None): """ Enables the object to be used as an iterator. Each iteration will produce a progress update in the logger. :param enumerable: Collection to iterate over. :param task_progress_object: [Optional] TaskProgress object holding the progress bar information. :return: The logger instance. """ self.list = enumerable self.list_length = len(enumerable) self.task_id = uuid.uuid4() self.index = 0 if task_progress_object: # Force total attribute task_progress_object.total = self.list_length else: task_progress_object = TaskProgress(total=self.list_length, display_time=True, prefix='Progress') # Create a task progress self.set_task_object(task_id=self.task_id, task_progress_object=task_progress_object) return self
0.005319
def create(self, instance, errors): """ Create an instance of a model. :param instance: The created model instance. :param errors: Any errors. :return: The created model instance, or a dictionary of errors. """ if errors: return self.errors(errors) return self.created(instance)
0.005634
def getDisplayName(self): """Provides a name for display purpose""" displayName = self.name + "(" if self.isAsync: displayName = "async " + displayName first = True for arg in self.arguments: if first: displayName += str(arg) first = False else: displayName += ", " + str(arg) displayName += ")" if self.returnAnnotation is not None: displayName += ' -> ' + self.returnAnnotation return displayName
0.003584
def local_position_ned_send(self, time_boot_ms, x, y, z, vx, vy, vz, force_mavlink1=False): ''' The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) x : X Position (float) y : Y Position (float) z : Z Position (float) vx : X Speed (float) vy : Y Speed (float) vz : Z Speed (float) ''' return self.send(self.local_position_ned_encode(time_boot_ms, x, y, z, vx, vy, vz), force_mavlink1=force_mavlink1)
0.005139
def _url(self): """Get the URL for the resource""" if self.ID_NAME not in self.route.keys() and "id" in self.data.keys(): self.route[self.ID_NAME] = self.data["id"] return self.config.BASE + self.PATH.format(**self.route)
0.007782
def install_program(self): """ install supervisor program config file """ text = templ_program.render(**self.options) config = Configuration(self.buildout, self.program + '.conf', { 'deployment': self.deployment_name, 'directory': os.path.join(self.options['etc-directory'], 'conf.d'), 'text': text}) return [config.install()]
0.004878
def introspectRemoteObject(self, busName, objectPath, replaceKnownInterfaces=False): """ Calls org.freedesktop.DBus.Introspectable.Introspect @type busName: C{string} @param busName: Name of the bus containing the object @type objectPath: C{string} @param objectPath: Object Path to introspect @type replaceKnownInterfaces: C{bool} @param replaceKnownInterfaces: If True (defaults to False), the content of the introspected XML will override any pre-existing definitions of the contained interfaces. @rtype: L{twisted.internet.defer.Deferred} @returns: a Deferred to a list of L{interface.DBusInterface} instances created from the content of the introspected XML description of the object's interface. """ d = self.callRemote( objectPath, 'Introspect', interface='org.freedesktop.DBus.Introspectable', destination=busName, ) def ok(xml_str): return introspection.getInterfacesFromXML( xml_str, replaceKnownInterfaces ) def err(e): raise error.IntrospectionFailed( 'Introspection Failed: ' + e.getErrorMessage() ) d.addCallbacks(ok, err) return d
0.002125
def _write_git_file_and_module_config(cls, working_tree_dir, module_abspath): """Writes a .git file containing a (preferably) relative path to the actual git module repository. It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir :note: will overwrite existing files ! :note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed if it becomes one :param working_tree_dir: directory to write the .git file into :param module_abspath: absolute path to the bare repository """ git_file = osp.join(working_tree_dir, '.git') rela_path = osp.relpath(module_abspath, start=working_tree_dir) if is_win: if osp.isfile(git_file): os.remove(git_file) with open(git_file, 'wb') as fp: fp.write(("gitdir: %s" % rela_path).encode(defenc)) with GitConfigParser(osp.join(module_abspath, 'config'), read_only=False, merge_includes=False) as writer: writer.set_value('core', 'worktree', to_native_path_linux(osp.relpath(working_tree_dir, start=module_abspath)))
0.005058
def asnumpy(self): """Copy the data from TCMPS into a new numpy ndarray""" # Create C variables that will serve as out parameters for TCMPS. data_ptr = _ctypes.POINTER(_ctypes.c_float)() # float* data_ptr shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr dim = _ctypes.c_size_t() # size_t dim # Obtain pointers into memory owned by the C++ object self.handle. # Note that this may trigger synchronization with another thread # producing the data. status_code = self._LIB.TCMPSReadFloatArray( self.handle, _ctypes.byref(data_ptr), _ctypes.byref(shape_ptr), _ctypes.byref(dim)) assert status_code == 0, "Error calling TCMPSReadFloatArray" return _numpy_array_from_ctypes(data_ptr, shape_ptr, dim)
0.002339
def aggregate_tree(l_tree): """Walk a py-radix tree and aggregate it. Arguments l_tree -- radix.Radix() object """ def _aggregate_phase1(tree): # phase1 removes any supplied prefixes which are superfluous because # they are already included in another supplied prefix. For example, # 2001:67c:208c:10::/64 would be removed if 2001:67c:208c::/48 was # also supplied. n_tree = radix.Radix() for prefix in tree.prefixes(): if tree.search_worst(prefix).prefix == prefix: n_tree.add(prefix) return n_tree def _aggregate_phase2(tree): # phase2 identifies adjacent prefixes that can be combined under a # single, shorter-length prefix. For example, 2001:67c:208c::/48 and # 2001:67c:208d::/48 can be combined into the single prefix # 2001:67c:208c::/47. n_tree = radix.Radix() for rnode in tree: p = text(ip_network(text(rnode.prefix)).supernet()) r = tree.search_covered(p) if len(r) == 2: if r[0].prefixlen == r[1].prefixlen == rnode.prefixlen: n_tree.add(p) else: n_tree.add(rnode.prefix) else: n_tree.add(rnode.prefix) return n_tree l_tree = _aggregate_phase1(l_tree) if len(l_tree.prefixes()) == 1: return l_tree while True: r_tree = _aggregate_phase2(l_tree) if l_tree.prefixes() == r_tree.prefixes(): break else: l_tree = r_tree del r_tree return l_tree
0.000608
def map(self, function, *iterables, **kwargs): """Returns an iterator equivalent to map(function, iterables). *chunksize* controls the size of the chunks the iterable will be broken into before being passed to the function. If None the size will be controlled by the Pool. """ self._check_pool_state() timeout = kwargs.get('timeout') chunksize = kwargs.get('chunksize', 1) if chunksize < 1: raise ValueError("chunksize must be >= 1") futures = [self.schedule(process_chunk, args=(function, chunk)) for chunk in iter_chunks(chunksize, *iterables)] map_future = MapFuture(futures) if not futures: map_future.set_result(MapResults(futures)) return map_future def done_map(_): if not map_future.done(): map_future.set_result(MapResults(futures, timeout=timeout)) for future in futures: future.add_done_callback(done_map) setattr(future, 'map_future', map_future) return map_future
0.001799
def check_size_all(self): """ Get size of homedir and update data on the server """ result = self.rpc_srv.get_all_account(self.token) print "debug: %s" % result for it in result: size = getFolderSize(it["path"]) result = self.rpc_srv.set_account_size(self.token, it["id"], size)
0.005714
def buy(self, quantity, **kwargs): """ Shortcut for ``instrument.order("BUY", ...)`` and accepts all of its `optional parameters <#qtpylib.instrument.Instrument.order>`_ :Parameters: quantity : int Order quantity """ self.parent.order("BUY", self, quantity=quantity, **kwargs)
0.008696
def to_xdr_object(self): """Creates an XDR Operation object that represents this :class:`Operation`. """ try: source_account = [account_xdr_object(self.source)] except StellarAddressInvalidError: source_account = [] return Xdr.types.Operation(source_account, self.body)
0.005848
def valid(self): """ ``True`` if credentials are valid, ``False`` if expired. """ if self.expiration_time: return self.expiration_time > int(time.time()) else: return True
0.008475
def show( self, filename: Optional[str] = None, show_link: bool = True, auto_open: bool = True, detect_notebook: bool = True, ) -> None: """Display the chart. Parameters ---------- filename : str, optional Save plot to this filename, otherwise it's saved to a temporary file. show_link : bool, optional Show link to plotly. auto_open : bool, optional Automatically open the plot (in the browser). detect_notebook : bool, optional Try to detect if we're running in a notebook. """ kargs = {} if detect_notebook and _detect_notebook(): py.init_notebook_mode() plot = py.iplot else: plot = py.plot if filename is None: filename = NamedTemporaryFile(prefix='plotly', suffix='.html', delete=False).name kargs['filename'] = filename kargs['auto_open'] = auto_open plot(self, show_link=show_link, **kargs)
0.004643
def init_optimizer(self): """ Initializes query optimizer state. There are 4 internals hash tables: 1. from type to declarations 2. from type to declarations for non-recursive queries 3. from type to name to declarations 4. from type to name to declarations for non-recursive queries Almost every query includes declaration type information. Also very common query is to search some declaration(s) by name or full name. Those hash tables allows to search declaration very quick. """ if self.name == '::': self._logger.debug( "preparing data structures for query optimizer - started") start_time = timeit.default_timer() self.clear_optimizer() for dtype in scopedef_t._impl_all_decl_types: self._type2decls[dtype] = [] self._type2decls_nr[dtype] = [] self._type2name2decls[dtype] = {} self._type2name2decls_nr[dtype] = {} self._all_decls_not_recursive = self.declarations self._all_decls = make_flatten( self._all_decls_not_recursive) for decl in self._all_decls: types = self.__decl_types(decl) for type_ in types: self._type2decls[type_].append(decl) name2decls = self._type2name2decls[type_] if decl.name not in name2decls: name2decls[decl.name] = [] name2decls[decl.name].append(decl) if self is decl.parent: self._type2decls_nr[type_].append(decl) name2decls_nr = self._type2name2decls_nr[type_] if decl.name not in name2decls_nr: name2decls_nr[decl.name] = [] name2decls_nr[decl.name].append(decl) for decl in self._all_decls_not_recursive: if isinstance(decl, scopedef_t): decl.init_optimizer() if self.name == '::': self._logger.debug(( "preparing data structures for query optimizer - " + "done( %f seconds ). "), (timeit.default_timer() - start_time)) self._optimized = True
0.000885
def gffselect(args): """ %prog gffselect gmaplocation.bed expectedlocation.bed translated.ids tag Try to match up the expected location and gmap locations for particular genes. translated.ids was generated by fasta.translate --ids. tag must be one of "complete|pseudogene|partial". """ from jcvi.formats.bed import intersectBed_wao p = OptionParser(gffselect.__doc__) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) gmapped, expected, idsfile, tag = args data = get_tags(idsfile) completeness = dict((a.replace("mrna", "path"), c) \ for (a, b, c) in data) seen = set() idsfile = expected.rsplit(".", 1)[0] + ".ids" fw = open(idsfile, "w") cnt = 0 for a, b in intersectBed_wao(expected, gmapped): if b is None: continue aname, bbname = a.accn, b.accn bname = bbname.split(".")[0] if completeness[bbname] != tag: continue if aname == bname: if bname in seen: continue seen.add(bname) print(bbname, file=fw) cnt += 1 fw.close() logging.debug("Total {0} records written to `{1}`.".format(cnt, idsfile))
0.002349
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) if geometry.is_empty: return if not geometry.is_valid: raise ValueError("no valid geometry: %s" % geometry.type) if geometry.geom_type == "Point": yield self.tile_from_xy(geometry.x, geometry.y, zoom) elif geometry.geom_type == "MultiPoint": for point in geometry: yield self.tile_from_xy(point.x, point.y, zoom) elif geometry.geom_type in ( "LineString", "MultiLineString", "Polygon", "MultiPolygon", "GeometryCollection" ): prepared_geometry = prep(clip_geometry_to_srs_bounds(geometry, self)) for tile in self.tiles_from_bbox(geometry, zoom): if prepared_geometry.intersects(tile.bbox()): yield tile
0.00295
def create_namespace(self, name, ignore_errors=False): """creates a namespace if it does not exist args: name: the name of the namespace ignore_errors(bool): Will ignore if a namespace already exists or there is an error creating the namespace returns: True if created False if not created error if namespace already exists """ if not self.has_namespace(name): self.namespaces[name] = ConjunctiveGraph() return True elif ignore_errors: return True else: raise RuntimeError("namespace '%s' already exists" % name)
0.002789
def remove_repositories(repositories, default_repositories): """ Remove no default repositories """ repos = [] for repo in repositories: if repo in default_repositories: repos.append(repo) return repos
0.004082
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions): """reqScannerSubscription(EClient self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)""" return _swigibpy.EClient_reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions)
0.014245
def skos_topConcept(rdf): """Infer skos:topConceptOf/skos:hasTopConcept (S8) and skos:inScheme (S7).""" for s, o in rdf.subject_objects(SKOS.hasTopConcept): rdf.add((o, SKOS.topConceptOf, s)) for s, o in rdf.subject_objects(SKOS.topConceptOf): rdf.add((o, SKOS.hasTopConcept, s)) for s, o in rdf.subject_objects(SKOS.topConceptOf): rdf.add((s, SKOS.inScheme, o))
0.004975
def conv_cy(self, cy_cl): """Convert cycles (cy/CL) to other units, such as FLOP/s or It/s.""" if not isinstance(cy_cl, PrefixedUnit): cy_cl = PrefixedUnit(cy_cl, '', 'cy/CL') clock = self.machine['clock'] element_size = self.kernel.datatypes_size[self.kernel.datatype] elements_per_cacheline = int(self.machine['cacheline size']) // element_size it_s = clock/cy_cl*elements_per_cacheline it_s.unit = 'It/s' flops_per_it = sum(self.kernel._flops.values()) performance = it_s*flops_per_it performance.unit = 'FLOP/s' cy_it = cy_cl*elements_per_cacheline cy_it.unit = 'cy/It' return {'It/s': it_s, 'cy/CL': cy_cl, 'cy/It': cy_it, 'FLOP/s': performance}
0.003681
def get_select_fields(self, select_fields=None): """ :return: string specifying the attributes to return """ return self.heading.as_sql if select_fields is None else self.heading.project(select_fields).as_sql
0.0125
def compose_arrays(a1, a2, firstfield='etag'): """ Compose composite arrays by generating an extended datatype containing all the fields. The two arrays must have the same length. """ assert len(a1) == len(a2), (len(a1), len(a2)) if a1.dtype.names is None and len(a1.shape) == 1: # the first array is not composite, but it is one-dimensional a1 = numpy.array(a1, numpy.dtype([(firstfield, a1.dtype)])) fields1 = [(f, a1.dtype.fields[f][0]) for f in a1.dtype.names] if a2.dtype.names is None: # the second array is not composite assert len(a2.shape) == 2, a2.shape width = a2.shape[1] fields2 = [('value%d' % i, a2.dtype) for i in range(width)] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for i in range(width): composite['value%d' % i] = a2[:, i] return composite fields2 = [(f, a2.dtype.fields[f][0]) for f in a2.dtype.names] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for f2 in dict(fields2): composite[f2] = a2[f2] return composite
0.000805
def remove(self, other): """Remove a particular factor from a tensor product space.""" if other is FullSpace: return TrivialSpace if other is TrivialSpace: return self if isinstance(other, ProductSpace): oops = set(other.operands) else: oops = {other} return ProductSpace.create( *sorted(set(self.operands).difference(oops)))
0.004577
def p_joinx(self,t): # todo: support join types http://www.postgresql.org/docs/9.4/static/queries-table-expressions.html#QUERIES-JOIN """joinx : fromtable jointype fromtable | fromtable jointype fromtable kw_on expression | fromtable jointype fromtable kw_using '(' namelist ')' """ if len(t)==4: t[0] = JoinX(t[1],t[3],None,t[2]) elif len(t)==6: t[0] = JoinX(t[1],t[3],t[5],t[2]) else: raise NotImplementedError('todo: join .. using')
0.028866
def segment(self, document): """ document: list[str] return list[int], i-th element denotes whether exists a boundary right before paragraph i(0 indexed) """ # ensure document is not empty and every element is an instance of str assert(len(document) > 0 and len([d for d in document if not isinstance(d, str)]) == 0) # step 1, do preprocessing n = len(document) self.window = max(min(self.window, n / 3), 1) cnts = [Counter(self.tokenizer.tokenize(document[i])) for i in range(n)] # step 2, calculate gap score gap_score = [0 for _ in range(n)] for i in range(n): sz = min(min(i + 1, n - i - 1), self.window) lcnt, rcnt = Counter(), Counter() for j in range(i - sz + 1, i + 1): lcnt += cnts[j] for j in range(i + 1, i + sz + 1): rcnt += cnts[j] gap_score[i] = cosine_sim(lcnt, rcnt) # step 3, calculate depth score depth_score = [0 for _ in range(n)] for i in range(n): if i < self.window or i + self.window >= n: continue ptr = i - 1 while ptr >= 0 and gap_score[ptr] >= gap_score[ptr + 1]: ptr -= 1 lval = gap_score[ptr + 1] ptr = i + 1 while ptr < n and gap_score[ptr] >= gap_score[ptr - 1]: ptr += 1 rval = gap_score[ptr - 1] depth_score[i] = lval + rval - 2 * gap_score[i] # step 4, smooth depth score with fixed window size 3 smooth_dep_score = [0 for _ in range(n)] for i in range(n): if i - 1 < 0 or i + 1 >= n: smooth_dep_score[i] = depth_score[i] else: smooth_dep_score[i] = np.average(depth_score[(i - 1):(i + 2)]) # step 5, determine boundaries boundaries = [0 for _ in range(n)] avg = np.average(smooth_dep_score) stdev = np.std(smooth_dep_score) cutoff = avg - stdev / 2.0 depth_tuples = list(zip(smooth_dep_score, list(range(len(smooth_dep_score))))) depth_tuples.sort() depth_tuples.reverse() hp = [x for x in depth_tuples if (x[0] > cutoff)] for dt in hp: boundaries[dt[1]] = 1 for i in range(dt[1] - 4, dt[1] + 4 + 1): if i != dt[1] and i >= 0 and i < n and boundaries[i] == 1: boundaries[dt[1]] = 0 break return [1] + boundaries[:-1]
0.002325
def set_permissions(self, object, replace=False): """ Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity. """ if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split('/')[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant('READ', oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl('public-read')
0.001338
def gzip(f, *args, **kwargs): """GZip Flask Response Decorator.""" data = f(*args, **kwargs) if isinstance(data, Response): content = data.data else: content = data gzip_buffer = BytesIO() gzip_file = gzip2.GzipFile( mode='wb', compresslevel=4, fileobj=gzip_buffer ) gzip_file.write(content) gzip_file.close() gzip_data = gzip_buffer.getvalue() if isinstance(data, Response): data.data = gzip_data data.headers['Content-Encoding'] = 'gzip' data.headers['Content-Length'] = str(len(data.data)) return data return gzip_data
0.001543
def _pyxb_from_norm_perm_list(self, norm_perm_list): """Return an AccessPolicy PyXB representation of ``norm_perm_list``""" # Using accessPolicy() instead of AccessPolicy() and accessRule() instead of # AccessRule() gives PyXB the type information required for using this as a # root element. access_pyxb = d1_common.types.dataoneTypes.accessPolicy() for perm_str, subj_list in norm_perm_list: rule_pyxb = d1_common.types.dataoneTypes.accessRule() rule_pyxb.permission.append(perm_str) for subj_str in subj_list: rule_pyxb.subject.append(subj_str) access_pyxb.allow.append(rule_pyxb) if len(access_pyxb.allow): return access_pyxb
0.005256
def _get_pattern(self, pattern_id): """Get pattern item by id.""" for key in ('PATTERNS1', 'PATTERNS2', 'PATTERNS3'): if key in self.tagged_blocks: data = self.tagged_blocks.get_data(key) for pattern in data: if pattern.pattern_id == pattern_id: return pattern return None
0.005208
def _move(self, speed=0, steering=0, seconds=None): """Move robot.""" self.drive_queue.put((speed, steering)) if seconds is not None: time.sleep(seconds) self.drive_queue.put((0, 0)) self.drive_queue.join()
0.007634
def set_context(self, cell_type): """Set protein expression amounts from CCLE as initial conditions. This method uses :py:mod:`indra.databases.context_client` to get protein expression levels for a given cell type and set initial conditions for Monomers in the model accordingly. Parameters ---------- cell_type : str Cell type name for which expression levels are queried. The cell type name follows the CCLE database conventions. Example: LOXIMVI_SKIN, BT20_BREAST """ if self.model is None: return monomer_names = [m.name for m in self.model.monomers] res = context_client.get_protein_expression(monomer_names, [cell_type]) amounts = res.get(cell_type) if not amounts: logger.warning('Could not get context for %s cell type.' % cell_type) self.add_default_initial_conditions() return self.set_expression(amounts)
0.001929
def get_advances_declines(self, as_json=False): """ :return: a list of dictionaries with advance decline data :raises: URLError, HTTPError """ url = self.advances_declines_url req = Request(url, None, self.headers) # raises URLError or HTTPError resp = self.opener.open(req) # for py3 compat covert byte file like object to # string file like object resp = byte_adaptor(resp) resp_dict = json.load(resp) resp_list = [self.clean_server_response(item) for item in resp_dict['data']] return self.render_response(resp_list, as_json)
0.003026
def sort_image_tiles(image, size, sorting_args, tile_size, tile_density=1.0, randomize_tiles=False): """ Sorts an image by taking various tiles and sorting them individually. :param image: The image to be modified :param size: The size of the image, as (width, height) :param sorting_args: Arguments that would be passed to sort_pixels for each tile :param tile_size: The size of each tile as (width, height) :param tile_density: What fraction of the image is covered in tiles. :param randomize_tiles: Whether tiles should be distributed randomly :return: The modified image """ out_image = list(image) width, height = size tile_width, tile_height = tile_size i = 0 total_tiles = ceil(height / float(tile_height)) * ceil(width / float(tile_width)) tiles_completed = 0 pixels_per_tiles = tile_width * tile_height for y in range(0, height, tile_height): for x in range(0, width, tile_width): # logging tiles_completed += 1 if tiles_completed % (200000 / pixels_per_tiles) == 0: logger.info("Completed %d / %d tiles... (%2.2f%%)" % (tiles_completed, total_tiles, 100.0 * tiles_completed / total_tiles)) i += 1 if randomize_tiles: # if using randomized tiles, skip a tile with probability 1 - density r = random() if r >= tile_density: continue else: # if tiles are not randomized, add a tile once every 1/density times if tile_density == 0 or i < 1.0 / tile_density: continue else: i -= 1.0 / tile_density # extract a tile, sort it, and copy it back to the image tile, current_tile_size = get_tile_from_image(image, size, (x, y), tile_size) sorted_tile = sort_image(tile, current_tile_size, **sorting_args) apply_tile_to_image(out_image, size, sorted_tile, current_tile_size, (x, y)) return out_image
0.004278
def parse(str_, lsep=",", avsep=":", vssep=",", avssep=";"): """Generic parser""" if avsep in str_: return parse_attrlist(str_, avsep, vssep, avssep) if lsep in str_: return parse_list(str_, lsep) return parse_single(str_)
0.003922
def _path_hash(path, transform, kwargs): """ Generate a hash of source file path + transform + args """ sortedargs = ["%s:%r:%s" % (key, value, type(value)) for key, value in sorted(iteritems(kwargs))] srcinfo = "{path}:{transform}:{{{kwargs}}}".format(path=os.path.abspath(path), transform=transform, kwargs=",".join(sortedargs)) return digest_string(srcinfo)
0.007937
def prediction_error(model, X, y=None, ax=None, alpha=0.75, **kwargs): """ Quick method: Plot the actual targets from the dataset against the predicted values generated by our model(s). This helper function is a quick wrapper to utilize the PredictionError ScoreVisualizer for one-off analysis. Parameters ---------- model : the Scikit-Learn estimator (should be a regressor) X : ndarray or DataFrame of shape n x m A matrix of n instances with m features. y : ndarray or Series of length n An array or series of target or class values. ax : matplotlib Axes The axes to plot the figure on. shared_limits : bool, default: True If shared_limits is True, the range of the X and Y axis limits will be identical, creating a square graphic with a true 45 degree line. In this form, it is easier to diagnose under- or over- prediction, though the figure will become more sparse. To localize points, set shared_limits to False, but note that this will distort the figure and should be accounted for during analysis. besfit : bool, default: True Draw a linear best fit line to estimate the correlation between the predicted and measured value of the target variable. The color of the bestfit line is determined by the ``line_color`` argument. identity: bool, default: True Draw the 45 degree identity line, y=x in order to better show the relationship or pattern of the residuals. E.g. to estimate if the model is over- or under- estimating the given values. The color of the identity line is a muted version of the ``line_color`` argument. point_color : color Defines the color of the error points; can be any matplotlib color. line_color : color Defines the color of the best fit line; can be any matplotlib color. alpha : float, default: 0.75 Specify a transparency where 1 is completely opaque and 0 is completely transparent. This property makes densely clustered points more visible. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. Returns ------- ax : matplotlib Axes Returns the axes that the prediction error plot was drawn on. """ # Instantiate the visualizer visualizer = PredictionError(model, ax, alpha=alpha, **kwargs) # Create the train and test splits X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # Fit and transform the visualizer (calls draw) visualizer.fit(X_train, y_train, **kwargs) visualizer.score(X_test, y_test) visualizer.finalize() # Return the axes object on the visualizer return visualizer.ax
0.000349
def vlan_classifier_rule_class_type_proto_proto_proto_val(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan") classifier = ET.SubElement(vlan, "classifier") rule = ET.SubElement(classifier, "rule") ruleid_key = ET.SubElement(rule, "ruleid") ruleid_key.text = kwargs.pop('ruleid') class_type = ET.SubElement(rule, "class-type") proto = ET.SubElement(class_type, "proto") proto = ET.SubElement(proto, "proto") proto_val = ET.SubElement(proto, "proto-val") proto_val.text = kwargs.pop('proto_val') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003793
def fcontext_policy_applied(name, recursive=False): ''' .. versionadded:: 2017.7.0 Checks and makes sure the SELinux policies for a given filespec are applied. ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} changes_text = __salt__['selinux.fcontext_policy_is_applied'](name, recursive) if changes_text == '': ret.update({'result': True, 'comment': 'SElinux policies are already applied for filespec "{0}"'.format(name)}) return ret if __opts__['test']: ret.update({'result': None}) else: apply_ret = __salt__['selinux.fcontext_apply_policy'](name, recursive) if apply_ret['retcode'] != 0: ret.update({'comment': apply_ret}) else: ret.update({'result': True}) ret.update({'changes': apply_ret.get('changes')}) return ret
0.003356
def get_rmse(self, data_x=None, data_y=None): """ Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max """ if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
0.002567
def find(find_all=False, latest=False, legacy=False, prerelease=False, products=None, prop=None, requires=None, requires_any=False, version=None): """ Call vswhere and return an array of the results. If `find_all` is true, finds all instances even if they are incomplete and may not launch. If `latest` is true, returns only the newest version and last installed. If `legacy` is true, also searches Visual Studio 2015 and older products. Information is limited. This option cannot be used with either products or requires. If `prerelease` is true, also searches prereleases. By default, only releases are searched. `products` is a list of one or more product IDs to find. Defaults to Community, Professional, and Enterprise if not specified. Specify ['*'] by itself to search all product instances installed. See https://aka.ms/vs/workloads for a list of product IDs. `prop` is the name of a property to return instead of the full installation details. Use delimiters '.', '/', or '_' to separate object and property names. Example: 'properties.nickname' will return the 'nickname' property under 'properties'. `requires` is a list of one or more workload component IDs required when finding instances. All specified IDs must be installed unless `requires_any` is True. See https://aka.ms/vs/workloads for a list of workload and component IDs. `version` is a version range for instances to find. Example: '[15.0,16.0)' will find versions 15.*. """ args = [] if find_all: args.append('-all') if latest: args.append('-latest') if legacy: args.append('-legacy') if prerelease: args.append('-prerelease') if products: args.append('-products') args.extend(products) if prop: args.append('-property') args.append(prop) if requires: args.append('-requires') args.extend(requires) if requires_any: args.append('-requiresAny') if version: args.append('-version') args.append(version) return execute(args)
0.004212
def determine_encoding(buf): """Return the appropriate encoding for the given CSS source, according to the CSS charset rules. `buf` may be either a string or bytes. """ # The ultimate default is utf8; bravo, W3C bom_encoding = 'UTF-8' if not buf: # What return bom_encoding if isinstance(buf, six.text_type): # We got a file that, for whatever reason, produces already-decoded # text. Check for the BOM (which is useless now) and believe # whatever's in the @charset. if buf[0] == '\ufeff': buf = buf[0:] # This is pretty similar to the code below, but without any encoding # double-checking. charset_start = '@charset "' charset_end = '";' if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) return buf[start:end] else: return bom_encoding # BOMs if buf[:3] == b'\xef\xbb\xbf': bom_encoding = 'UTF-8' buf = buf[3:] if buf[:4] == b'\x00\x00\xfe\xff': bom_encoding = 'UTF-32BE' buf = buf[4:] elif buf[:4] == b'\xff\xfe\x00\x00': bom_encoding = 'UTF-32LE' buf = buf[4:] if buf[:4] == b'\x00\x00\xff\xfe': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:4] == b'\xfe\xff\x00\x00': raise UnicodeError("UTF-32-2143 is not supported") elif buf[:2] == b'\xfe\xff': bom_encoding = 'UTF-16BE' buf = buf[2:] elif buf[:2] == b'\xff\xfe': bom_encoding = 'UTF-16LE' buf = buf[2:] # The spec requires exactly this syntax; no escapes or extra spaces or # other shenanigans, thank goodness. charset_start = '@charset "'.encode(bom_encoding) charset_end = '";'.encode(bom_encoding) if buf.startswith(charset_start): start = len(charset_start) end = buf.index(charset_end, start) encoded_encoding = buf[start:end] encoding = encoded_encoding.decode(bom_encoding) # Ensure that decoding with the specified encoding actually produces # the same @charset rule encoded_charset = buf[:end + len(charset_end)] if (encoded_charset.decode(encoding) != encoded_charset.decode(bom_encoding)): raise UnicodeError( "@charset {0} is incompatible with detected encoding {1}" .format(bom_encoding, encoding)) else: # With no @charset, believe the BOM encoding = bom_encoding return encoding
0.000383
def getParam(self, key): """Returns value of Run-time Database Parameter 'key'. @param key: Run-time parameter name. @return: Run-time parameter value. """ cur = self._conn.cursor() cur.execute("SHOW %s" % key) row = cur.fetchone() return util.parse_value(row[0])
0.011494
def get_sp_metadata(self): """ Gets the SP metadata. The XML representation. :returns: SP metadata (xml) :rtype: string """ metadata = OneLogin_Saml2_Metadata.builder( self.__sp, self.__security['authnRequestsSigned'], self.__security['wantAssertionsSigned'], self.__security['metadataValidUntil'], self.__security['metadataCacheDuration'], self.get_contacts(), self.get_organization() ) add_encryption = self.__security['wantNameIdEncrypted'] or self.__security['wantAssertionsEncrypted'] cert_new = self.get_sp_cert_new() metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert_new, add_encryption) cert = self.get_sp_cert() metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert, add_encryption) # Sign metadata if 'signMetadata' in self.__security and self.__security['signMetadata'] is not False: if self.__security['signMetadata'] is True: # Use the SP's normal key to sign the metadata: if not cert: raise OneLogin_Saml2_Error( 'Cannot sign metadata: missing SP public key certificate.', OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND ) cert_metadata = cert key_metadata = self.get_sp_key() if not key_metadata: raise OneLogin_Saml2_Error( 'Cannot sign metadata: missing SP private key.', OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND ) else: # Use a custom key to sign the metadata: if ('keyFileName' not in self.__security['signMetadata'] or 'certFileName' not in self.__security['signMetadata']): raise OneLogin_Saml2_Error( 'Invalid Setting: signMetadata value of the sp is not valid', OneLogin_Saml2_Error.SETTINGS_INVALID_SYNTAX ) key_file_name = self.__security['signMetadata']['keyFileName'] cert_file_name = self.__security['signMetadata']['certFileName'] key_metadata_file = self.__paths['cert'] + key_file_name cert_metadata_file = self.__paths['cert'] + cert_file_name try: with open(key_metadata_file, 'r') as f_metadata_key: key_metadata = f_metadata_key.read() except IOError: raise OneLogin_Saml2_Error( 'Private key file not readable: %s', OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND, key_metadata_file ) try: with open(cert_metadata_file, 'r') as f_metadata_cert: cert_metadata = f_metadata_cert.read() except IOError: raise OneLogin_Saml2_Error( 'Public cert file not readable: %s', OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND, cert_metadata_file ) signature_algorithm = self.__security['signatureAlgorithm'] digest_algorithm = self.__security['digestAlgorithm'] metadata = OneLogin_Saml2_Metadata.sign_metadata(metadata, key_metadata, cert_metadata, signature_algorithm, digest_algorithm) return metadata
0.002725
def renders(col_name): """ Use this decorator to map your custom Model properties to actual Model db properties. As an example:: class MyModel(Model): id = Column(Integer, primary_key=True) name = Column(String(50), unique = True, nullable=False) custom = Column(Integer(20)) @renders('custom') def my_custom(self): # will render this columns as bold on ListWidget return Markup('<b>' + custom + '</b>') class MyModelView(ModelView): datamodel = SQLAInterface(MyTable) list_columns = ['name', 'my_custom'] """ def wrap(f): if not hasattr(f, '_col_name'): f._col_name = col_name return f return wrap
0.00119
def envelope(self, header, body): """ Build the B{<Envelope/>} for a SOAP outbound message. @param header: The SOAP message B{header}. @type header: L{Element} @param body: The SOAP message B{body}. @type body: L{Element} @return: The SOAP envelope containing the body and header. @rtype: L{Element} """ env = Element("Envelope", ns=envns) env.addPrefix(Namespace.xsins[0], Namespace.xsins[1]) env.append(header) env.append(body) return env
0.00361
def convertImages(self): """ run this to turn all folder1 TIFs and JPGs into folder2 data. TIFs will be treated as micrographs and converted to JPG with enhanced contrast. JPGs will simply be copied over. """ # copy over JPGs (and such) exts=['.jpg','.png'] for fname in [x for x in self.files1 if cm.ext(x) in exts]: ID="UNKNOWN" if len(fname)>8 and fname[:8] in self.IDs: ID=fname[:8] fname2=ID+"_jpg_"+fname if not fname2 in self.files2: self.log.info("copying over [%s]"%fname2) shutil.copy(os.path.join(self.folder1,fname),os.path.join(self.folder2,fname2)) if not fname[:8]+".abf" in self.files1: self.log.error("orphan image: %s",fname) # convert TIFs (and such) to JPGs exts=['.tif','.tiff'] for fname in [x for x in self.files1 if cm.ext(x) in exts]: ID="UNKNOWN" if len(fname)>8 and fname[:8] in self.IDs: ID=fname[:8] fname2=ID+"_tif_"+fname+".jpg" if not fname2 in self.files2: self.log.info("converting micrograph [%s]"%fname2) imaging.TIF_to_jpg(os.path.join(self.folder1,fname),saveAs=os.path.join(self.folder2,fname2)) if not fname[:8]+".abf" in self.files1: self.log.error("orphan image: %s",fname)
0.01927
def _handle_autologin(self, event): """Automatic logins for client configurations that allow it""" self.log("Verifying automatic login request") # TODO: Check for a common secret # noinspection PyBroadException try: client_config = objectmodels['client'].find_one({ 'uuid': event.requestedclientuuid }) except Exception: client_config = None if client_config is None or client_config.autologin is False: self.log("Autologin failed:", event.requestedclientuuid, lvl=error) self._fail(event) return try: user_account = objectmodels['user'].find_one({ 'uuid': client_config.owner }) if user_account is None: raise AuthenticationError self.log("Autologin for", user_account.name, lvl=debug) except Exception as e: self.log("No user object due to error: ", e, type(e), lvl=error) self._fail(event) return if user_account.active is False: self.log("Account deactivated.") self._fail(event, 'Account deactivated.') return user_profile = self._get_profile(user_account) self._login(event, user_account, user_profile, client_config) self.log("Autologin successful!", lvl=warn)
0.001368
def add_from_names_to_locals(self, node): """Store imported names to the locals Resort the locals if coming from a delayed node """ _key_func = lambda node: node.fromlineno def sort_locals(my_list): my_list.sort(key=_key_func) for (name, asname) in node.names: if name == "*": try: imported = node.do_import_module() except exceptions.AstroidBuildingError: continue for name in imported.public_names(): node.parent.set_local(name, node) sort_locals(node.parent.scope().locals[name]) else: node.parent.set_local(asname or name, node) sort_locals(node.parent.scope().locals[asname or name])
0.003593
def rsync(config_file, source, target, override_cluster_name, down): """Rsyncs files. Arguments: config_file: path to the cluster yaml source: source dir target: target dir override_cluster_name: set the name of the cluster down: whether we're syncing remote -> local """ config = yaml.load(open(config_file).read()) if override_cluster_name is not None: config["cluster_name"] = override_cluster_name config = _bootstrap_config(config) head_node = _get_head_node( config, config_file, override_cluster_name, create_if_needed=False) provider = get_node_provider(config["provider"], config["cluster_name"]) try: updater = NodeUpdaterThread( node_id=head_node, provider_config=config["provider"], provider=provider, auth_config=config["auth"], cluster_name=config["cluster_name"], file_mounts=config["file_mounts"], initialization_commands=[], setup_commands=[], runtime_hash="", ) if down: rsync = updater.rsync_down else: rsync = updater.rsync_up rsync(source, target, check_error=False) finally: provider.cleanup()
0.000773
def populate(self): """Populates a new cache. """ if self.exists: raise CacheAlreadyExistsException('location: %s' % self.cache_uri) self._populate_setup() with closing(self.graph): with self._download_metadata_archive() as metadata_archive: for fact in self._iter_metadata_triples(metadata_archive): self._add_to_graph(fact)
0.004673
def addStream(self, stream, t1=None, t2=None, limit=None, i1=None, i2=None, transform=None): """Adds the given stream to the query construction. The function supports both stream names and Stream objects.""" params = query_maker(t1, t2, limit, i1, i2, transform) params["stream"] = get_stream(self.cdb, stream) # Now add the stream to the query parameters self.query.append(params)
0.009112
def do_rsync(self, line): """rsync [-m|--mirror] [-n|--dry-run] [-q|--quiet] SRC_DIR DEST_DIR Synchronizes a destination directory tree with a source directory tree. """ args = self.line_to_args(line) src_dir = resolve_path(args.src_dir) dst_dir = resolve_path(args.dst_dir) verbose = not args.quiet pf = print if args.dry_run or verbose else lambda *args : None rsync(src_dir, dst_dir, mirror=args.mirror, dry_run=args.dry_run, print_func=pf, recursed=False, sync_hidden=args.all)
0.008772
def get_notebook(self, notebook_id, format=u'json'): """Get the representation of a notebook in format by notebook_id.""" format = unicode(format) if format not in self.allowed_formats: raise web.HTTPError(415, u'Invalid notebook format: %s' % format) last_modified, nb = self.get_notebook_object(notebook_id) kwargs = {} if format == 'json': # don't split lines for sending over the wire, because it # should match the Python in-memory format. kwargs['split_lines'] = False data = current.writes(nb, format, **kwargs) name = nb.metadata.get('name','notebook') return last_modified, name, data
0.004208
def allowance (self, filename): """Preconditions: - our agent applies to this entry - filename is URL decoded Check if given filename is allowed to acces this entry. @return: True if allowed, else False @rtype: bool """ for line in self.rulelines: log.debug(LOG_CHECK, "%s %s %s", filename, str(line), line.allowance) if line.applies_to(filename): log.debug(LOG_CHECK, " ... rule line %s", line) return line.allowance log.debug(LOG_CHECK, " ... no rule lines of %s applied to %s; allowed.", self.useragents, filename) return True
0.007496
def pipe(self, other_task): """ Add a pipe listener to the execution of this task. The output of this task is required to be an iterable. Each item in the iterable will be queued as the sole argument to an execution of the listener task. Can also be written as:: pipeline = task1 | task2 """ other_task._source = self self._listeners.append(PipeListener(other_task)) return other_task
0.004255
def set_gl_state(self, preset=None, **kwargs): """Define the set of GL state parameters to use when drawing Parameters ---------- preset : str Preset to use. **kwargs : dict Keyword arguments to `gloo.set_state`. """ self._vshare.gl_state = kwargs self._vshare.gl_state['preset'] = preset
0.005305
def hcode(*content, sep=' '): """ Make mono-width text (HTML) :param content: :param sep: :return: """ return _md(quote_html(_join(*content, sep=sep)), symbols=MD_SYMBOLS[6])
0.004926