code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def delete(container, queue): <NEW_LINE> <INDENT> while 1: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> batch = queue.get_nowait() <NEW_LINE> <DEDENT> except Empty: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for name in batch: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> container.delete_object(name) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass
Delete from cloudfiles. Runs in a separate process.
625941b5e8904600ed9f1d14
def setOutputTimesteps ( algorithm , timesteps ): <NEW_LINE> <INDENT> executive = algorithm . GetExecutive () <NEW_LINE> outInfo = executive . GetOutputInformation (0) <NEW_LINE> outInfo.Remove ( executive.TIME_STEPS () ) <NEW_LINE> for timestep in timesteps : <NEW_LINE> <INDENT> outInfo . Append ( executive . TIME_STEPS () , timestep ) <NEW_LINE> outInfo . Remove ( executive . TIME_RANGE () ) <NEW_LINE> outInfo . Append ( executive . TIME_RANGE () , timesteps [0]) <NEW_LINE> outInfo . Append ( executive . TIME_RANGE () , timesteps [ -1])
helper routine to set timestep information
625941b5ec188e330fd5a593
def is_integer_n_tuple(expr, n): <NEW_LINE> <INDENT> return isinstance(expr, tuple) and len(expr) == n and all(isinstance(x, int) for x in expr)
Is true when `expr` is an integer tuple of length `n`. :: >>> mathtools.is_integer_n_tuple((19, 20, 21), 3) True Otherwise false: :: >>> mathtools.is_integer_n_tuple((19, 20, 'text'), 3) False Returns boolean.
625941b524f1403a92600955
def expr(self): <NEW_LINE> <INDENT> result = self.term() <NEW_LINE> while self.current_token.type in (PLUS, MINUS): <NEW_LINE> <INDENT> token = self.current_token <NEW_LINE> if token.type == PLUS: <NEW_LINE> <INDENT> self.eat(PLUS) <NEW_LINE> result += self.term() <NEW_LINE> <DEDENT> elif token.type == MINUS: <NEW_LINE> <INDENT> self.eat(MINUS) <NEW_LINE> result -= self.term() <NEW_LINE> <DEDENT> <DEDENT> return result
Arithmetic expressoin parser / interpreter. calc> 14 + 2 * 3 - 6 / 2 17 expr : term ((PLUS | MINUS) term)* term : factor ((MUL | DIV) factor)* factor : INTEGER
625941b5b830903b967e9703
def start_timer(self): <NEW_LINE> <INDENT> return self.send_command_async("start time")
Starts the timer. Return value is the echoed command. Note that device must already be started to start the timer.
625941b5ab23a570cc24ff6b
def _aquifer_qs(request): <NEW_LINE> <INDENT> query = request.GET <NEW_LINE> qs = Aquifer.objects.all() <NEW_LINE> resources__section__code = query.get("resources__section__code") <NEW_LINE> hydraulic = query.get('hydraulically_connected') <NEW_LINE> search = query.get('search') <NEW_LINE> match_any = query.get('match_any') == 'true' <NEW_LINE> now = timezone.now() <NEW_LINE> filters = [] <NEW_LINE> if hydraulic: <NEW_LINE> <INDENT> filters.append(Q(subtype__code__in=serializers.HYDRAULIC_SUBTYPES)) <NEW_LINE> <DEDENT> if resources__section__code: <NEW_LINE> <INDENT> for code in resources__section__code.split(','): <NEW_LINE> <INDENT> filters.append(Q(resources__section__code=code)) <NEW_LINE> <DEDENT> <DEDENT> if match_any: <NEW_LINE> <INDENT> if len(filters) > 0: <NEW_LINE> <INDENT> disjunction = filters.pop() <NEW_LINE> for filter in filters: <NEW_LINE> <INDENT> disjunction |= filter <NEW_LINE> <DEDENT> qs = qs.filter(disjunction) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for filter in filters: <NEW_LINE> <INDENT> qs = qs.filter(filter) <NEW_LINE> <DEDENT> <DEDENT> if search: <NEW_LINE> <INDENT> disjunction = Q(aquifer_name__icontains=search) <NEW_LINE> if search.isdigit(): <NEW_LINE> <INDENT> disjunction = disjunction | Q(pk=int(search)) <NEW_LINE> <DEDENT> qs = qs.filter(disjunction) <NEW_LINE> <DEDENT> if not request.user.groups.filter(name=AQUIFERS_EDIT_ROLE).exists(): <NEW_LINE> <INDENT> qs = qs.filter(effective_date__lte=now, expiry_date__gt=now) <NEW_LINE> <DEDENT> qs = qs.select_related( 'demand', 'material', 'productivity', 'subtype', 'vulnerability') <NEW_LINE> qs = qs.distinct() <NEW_LINE> return qs
We have a custom search which does a case insensitive substring of aquifer_name, exact match on aquifer_id, and also looks at an array of provided resources attachments of which we require one to be present if any are specified. The front-end doesn't use DjangoFilterBackend's querystring array syntax, preferring ?a=1,2 rather than ?a[]=1&a[]=2, so again we need a custom back-end implementation. @param request - the request object
625941b5cdde0d52a9e52e19
def createOutputContext(self): <NEW_LINE> <INDENT> if (self.multicast_address!="" and self.multicast_port>0): <NEW_LINE> <INDENT> self.out_ctx =core.LiveOutboundContext(core.LiveConnectionType_sdp, self.multicast_address, self.slot, self.multicast_port) <NEW_LINE> self.outgoing_livethread.core.registerOutboundCall(self.out_ctx)
Creates an output context for sending the stream
625941b576e4537e8c351462
def __init__(self, theta_array=None, stack=None, instrument=None): <NEW_LINE> <INDENT> abstract = "simulator_XRF" <NEW_LINE> pyxcel.engine.simulator.generic.Simulator.__init__(self, theta_array, stack, instrument, abstract)
initialization
625941b5de87d2750b85fb79
def find_and_halve_numbers(line): <NEW_LINE> <INDENT> split_line = re.split("(\d+)",line) <NEW_LINE> new_line = '' <NEW_LINE> for item in split_line: <NEW_LINE> <INDENT> if item.isdigit(): <NEW_LINE> <INDENT> item = str(int(item)/2) <NEW_LINE> <DEDENT> new_line += item <NEW_LINE> <DEDENT> return new_line
Find all occurances of numbers in a line, and divide them by 2 Note! We're using regular expressions here to find the groups of numbers. This is complex and you aren't expected to know how to do this. The rest of the function is straightforward, however. Another possible solution would be to split each line word by word with split() and test whether each "word" is a number
625941b5d18da76e235322bc
def _PrepareValuesForAdd(self, values): <NEW_LINE> <INDENT> if isinstance(values, dict): <NEW_LINE> <INDENT> for col in values: <NEW_LINE> <INDENT> if not col in self._column_set: <NEW_LINE> <INDENT> raise LookupError("Tried adding data to unknown column '%s'" % col) <NEW_LINE> <DEDENT> <DEDENT> for col in self._columns: <NEW_LINE> <INDENT> if not col in values: <NEW_LINE> <INDENT> values[col] = self.EMPTY_CELL <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif isinstance(values, list): <NEW_LINE> <INDENT> if len(values) > len(self._columns): <NEW_LINE> <INDENT> raise LookupError("Tried adding row with too many columns") <NEW_LINE> <DEDENT> if len(values) < len(self._columns): <NEW_LINE> <INDENT> shortage = len(self._columns) - len(values) <NEW_LINE> values.extend([self.EMPTY_CELL] * shortage) <NEW_LINE> <DEDENT> values = dict(zip(self._columns, values)) <NEW_LINE> <DEDENT> return values
Prepare a |values| dict/list to be added as a row. If |values| is a dict, verify that only supported column values are included. Add empty string values for columns not seen in the row. The original dict may be altered. If |values| is a list, translate it to a dict using known column order. Append empty values as needed to match number of expected columns. Return prepared dict.
625941b5377c676e91271f96
def __init__(self): <NEW_LINE> <INDENT> self.Score = None <NEW_LINE> self.RequestId = None
:param Score: 活体打分,取值范围 [0,100],分数一般落于[80, 100]区间内,0分也为常见值。推荐相大于 87 时可判断为活体。可根据具体场景自行调整阈值。 :type Score: float :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
625941b5e8904600ed9f1d15
def decrement_month(date, amount=1): <NEW_LINE> <INDENT> if date.day != 1: <NEW_LINE> <INDENT> raise ValueError("Input must be truncated to the 1st of the month.") <NEW_LINE> <DEDENT> date -= datetime.timedelta(days=1) <NEW_LINE> return date.replace(day=1)
Given a truncated datetime, return a new one one month in the past.
625941b5a8370b771705268d
def __invert__(self): <NEW_LINE> <INDENT> r <NEW_LINE> raise NotImplementedError
works if 1/self is again d-finite.
625941b50c0af96317bb7fd5
def Application_EndRequest(app, e): <NEW_LINE> <INDENT> pass
Code that runs at the end of each request
625941b5d7e4931a7ee9dd07
def command_output(cmd): <NEW_LINE> <INDENT> import os <NEW_LINE> return os.popen(cmd, "r").read()
Capture a command's standard output.
625941b550485f2cf553cb84
def get_all_fn_names(): <NEW_LINE> <INDENT> all_names = [] <NEW_LINE> for fn_name, fn_handle in inspect.getmembers(sys.modules[__name__], inspect.isfunction): <NEW_LINE> <INDENT> if fn_name[:4] == "get_": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> all_names.append(fn_name) <NEW_LINE> <DEDENT> return all_names
Return a list of names of all defined activation functions in this file. Returns ------- list of str List of names of activation functions. ..note:: This is mainly used when you need to dynamically switch activation functions for things like hyperparameter smashing via the command line. This is not recommend for production use.
625941b5e1aae11d1e749a9f
@lD.log(logBase + ".gradModel") <NEW_LINE> def lime(logger, img, model, num_samples, num_features, random_seed): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> explainer = lime_image.LimeImageExplainer() <NEW_LINE> explanation = explainer.explain_instance( img, classifier_fn=model.predict, top_labels=1, hide_color=0, num_samples=num_samples, random_seed=random_seed, ) <NEW_LINE> temp_img, mask = explanation.get_image_and_mask( label=0, positive_only=True, hide_rest=True, num_features=num_features ) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print((f"Unable to lime!\n{e}")) <NEW_LINE> <DEDENT> return temp_img, mask
bla bla bla Parameters ---------- img : {numpy.ndarray} A single image that is a numpy array in the shape of () model : {Keras model instance} The loaded already-trained TensorFlow SavedModel, as returned by the function `loadModel()`. num_samples : {int} The size of the neighborhood to learn the linear model num_features : {int} The number of superpixels to include in explanation. Returns ------- temp_img : {numpy.ndarray} 3D numpy array of `img`'s explanation. mask : {numpy.ndarray} 2D numpy array that can be used with skimage.segmentation.mark_boundaries.
625941b5498bea3a759b989e
def model_loss(input_real, input_z, out_channel_dim): <NEW_LINE> <INDENT> g_model = generator(input_z, out_channel_dim) <NEW_LINE> d_model_real, d_logits_real = discriminator(input_real) <NEW_LINE> d_model_fake, d_logits_fake = discriminator(g_model, reuse=True) <NEW_LINE> d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * 0.9)) <NEW_LINE> d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake))) <NEW_LINE> g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) <NEW_LINE> d_loss = d_loss_real + d_loss_fake <NEW_LINE> return d_loss, g_loss
Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss)
625941b54f88993c3716be60
def save_html(self,response,page): <NEW_LINE> <INDENT> file_name = '{}第{}页.html'.format(self.name,page) <NEW_LINE> with open(file_name,'wb') as f: <NEW_LINE> <INDENT> f.write(response)
保存数据
625941b50383005118ecf3d0
def set_sketch_params(self, scale=None, length=None, randomness=None): <NEW_LINE> <INDENT> if scale is None: <NEW_LINE> <INDENT> self._sketch = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._sketch = (scale, length or 128.0, randomness or 16.0) <NEW_LINE> <DEDENT> self.stale = True
Set the sketch parameters. Parameters ---------- scale : float, optional The amplitude of the wiggle perpendicular to the source line, in pixels. If scale is `None`, or not provided, no sketch filter will be provided. length : float, optional The length of the wiggle along the line, in pixels (default 128.0) randomness : float, optional The scale factor by which the length is shrunken or expanded (default 16.0) The PGF backend uses this argument as an RNG seed and not as described above. Using the same seed yields the same random shape. .. ACCEPTS: (scale: float, length: float, randomness: float)
625941b591f36d47f21ac2e1
def neighbor_average(values, row, column): <NEW_LINE> <INDENT> if row >= len(values) or column >= len(values[0]): <NEW_LINE> <INDENT> exit("out of range") <NEW_LINE> <DEDENT> a = neighbors(values, row, column) <NEW_LINE> return sum(a) / len(a)
computes the average of the neighbor of a table
625941b5baa26c4b54cb0f0f
def test_branched_hooked(self, branched_dummy_context): <NEW_LINE> <INDENT> is_hooked(branched_dummy_context, "bob")
Dummy context should have a SteelToes hook.
625941b53eb6a72ae02ec2c6
def startWorkflow(name, version=None, correlationId=None, body=None): <NEW_LINE> <INDENT> parameterString = "?" <NEW_LINE> if version != None: <NEW_LINE> <INDENT> parameterString += "version=" + str(version) <NEW_LINE> parameterString += "&" <NEW_LINE> <DEDENT> if correlationId != None: <NEW_LINE> <INDENT> parameterString += "sort=" + correlationId <NEW_LINE> parameterString += "&" <NEW_LINE> <DEDENT> if body == None: <NEW_LINE> <INDENT> body = '{}' <NEW_LINE> <DEDENT> path = "/api/workflow/" + name + parameterString <NEW_LINE> response, responseData = httpPost(path, body) <NEW_LINE> workflowInstanceId = None <NEW_LINE> if response.status >= 200 and response.status < 300: <NEW_LINE> <INDENT> workflowInstanceId = responseData <NEW_LINE> <DEDENT> return workflowInstanceId
Start a Conductor Workflow with given name Will invoke POST request with format http://<ip>:<port>/api/workflow/<name>?version=<version>&correlationId=<correlationId> Returns workflow instance id
625941b54c3428357757c117
def _wait(self, frame=None, callback_function=None, process_control_events=True): <NEW_LINE> <INDENT> while self.is_playing: <NEW_LINE> <INDENT> if _internals.skip_wait_methods: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.present() <NEW_LINE> if frame is not None and self.frame >= frame: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if isinstance(callback_function, FunctionType): <NEW_LINE> <INDENT> rtn_callback = callback_function() <NEW_LINE> if isinstance(rtn_callback, CallbackQuitEvent): <NEW_LINE> <INDENT> return rtn_callback <NEW_LINE> <DEDENT> <DEDENT> if _internals.active_exp.is_initialized: <NEW_LINE> <INDENT> rtn_callback = _internals.active_exp._execute_wait_callback() <NEW_LINE> if isinstance(rtn_callback, CallbackQuitEvent): <NEW_LINE> <INDENT> return rtn_callback <NEW_LINE> <DEDENT> if process_control_events: <NEW_LINE> <INDENT> if _internals.active_exp.mouse.process_quit_event(): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for event in pygame.event.get(pygame.KEYDOWN): <NEW_LINE> <INDENT> if _internals.active_exp.is_initialized and process_control_events and event.type == pygame.KEYDOWN and ( event.key == self.Keyboard.get_quit_key()): <NEW_LINE> <INDENT> self.pause() <NEW_LINE> self.Keyboard.process_control_keys(event, self.stop) <NEW_LINE> self.play()
Wait until frame was shown or end of video and update screen. Parameters ---------- frame : int, optional number of the frame to stop after callback_function : function, optional function to repeatedly execute during waiting loop process_control_events : bool, optional process ``io.Keyboard.process_control_keys()`` and ``io.Mouse.process_quit_event()`` (default = True) Notes ------ This will also by default process control events (quit and pause). Thus, keyboard events will be cleared from the cue and cannot be received by a Keyboard().check() anymore! See Also -------- expyriment.design.Experiment.register_wait_callback_function
625941b510dbd63aa1bd299c
def __do_search(self, direction="first", skip=0): <NEW_LINE> <INDENT> if direction == "first": <NEW_LINE> <INDENT> search_space = enumerate(self.torrent_names) <NEW_LINE> <DEDENT> elif direction == "last": <NEW_LINE> <INDENT> search_space = enumerate(self.torrent_names) <NEW_LINE> search_space = list(search_space) <NEW_LINE> search_space = reversed(search_space) <NEW_LINE> <DEDENT> elif direction == "next": <NEW_LINE> <INDENT> search_space = enumerate(self.torrent_names) <NEW_LINE> search_space = list(search_space) <NEW_LINE> search_space = search_space[self.cursel:] <NEW_LINE> <DEDENT> elif direction == "previous": <NEW_LINE> <INDENT> search_space = enumerate(self.torrent_names) <NEW_LINE> search_space = list(search_space)[:self.cursel-1] <NEW_LINE> search_space = reversed(search_space) <NEW_LINE> <DEDENT> search_string = self.search_string.lower() <NEW_LINE> for i,n in search_space: <NEW_LINE> <INDENT> n = n.lower() <NEW_LINE> if n.find(search_string) != -1: <NEW_LINE> <INDENT> if skip > 0: <NEW_LINE> <INDENT> skip -= 1 <NEW_LINE> continue <NEW_LINE> <DEDENT> self.cursel = (i+1) <NEW_LINE> if ((self.curoff + self.rows - 5) < self.cursel): <NEW_LINE> <INDENT> self.curoff = self.cursel - self.rows + 5 <NEW_LINE> <DEDENT> elif ((self.curoff +1) > self.cursel): <NEW_LINE> <INDENT> self.curoff = max(1, self.cursel - 1) <NEW_LINE> <DEDENT> self.search_state = SEARCH_SUCCESS <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> if direction in ["first", "last"]: <NEW_LINE> <INDENT> self.search_state = SEARCH_FAILING <NEW_LINE> <DEDENT> elif direction == "next": <NEW_LINE> <INDENT> self.search_state = SEARCH_END_REACHED <NEW_LINE> <DEDENT> elif direction == "previous": <NEW_LINE> <INDENT> self.search_state = SEARCH_START_REACHED
Performs a search on visible torrent and sets cursor to the match :param string: direction, the direction of search, can be first, last, next or previous :returns: Nothing
625941b59f2886367277a67d
def generate_az_rhi_title(radar, field, azimuth): <NEW_LINE> <INDENT> time_str = generate_radar_time_begin(radar).isoformat() + 'Z' <NEW_LINE> l1 = "%s %s " % (generate_radar_name(radar), time_str) <NEW_LINE> l2 = "Azimuth: %.1f deg" % azimuth <NEW_LINE> field_name = generate_field_name(radar, field) <NEW_LINE> return l1 + '\n' + l2 + '\n' + field_name
Generate a title for a pseudo-RHI from PPI azimuth plot. Parameters ---------- radar : Radar Radar structure. field : str Field plotted. azimuth : float Azimuth plotted. Returns ------- title : str Plot title.
625941b5d6c5a10208143e32
def coord2pixels_from_filestore(f): <NEW_LINE> <INDENT> contour_coord = f.ROIContourSequence[0].ContourData <NEW_LINE> coord = [] <NEW_LINE> for i in range(0, len(contour_coord), 3): <NEW_LINE> <INDENT> coord.append((contour_coord[i], contour_coord[i + 1], contour_coord[i + 2])) <NEW_LINE> <DEDENT> img_ID = f.SOPInstanceUID <NEW_LINE> img_arr = f.pixel_array <NEW_LINE> img_arr = img_arr.astype(np.int16) <NEW_LINE> pixel_coords = [(np.ceil(y), np.ceil(x)) for x, y, _ in coord] <NEW_LINE> rows = [] <NEW_LINE> cols = [] <NEW_LINE> for i, j in list(set(pixel_coords)): <NEW_LINE> <INDENT> rows.append(i) <NEW_LINE> cols.append(j) <NEW_LINE> <DEDENT> contour_arr = csc_matrix((np.ones_like(rows), (rows, cols)), dtype=np.int8, shape=(img_arr.shape[0], img_arr.shape[1])).toarray() <NEW_LINE> return img_arr, contour_arr, img_ID
Given a contour dataset (a DICOM class) and path that has .dcm files of corresponding images. This function will return img_arr and contour_arr (2d image and contour pixels) Inputs contour_dataset: DICOM dataset class that is identified as (3006, 0016) Contour Image Sequence path: string that tells the path of all DICOM images Return img_arr: 2d np.array of image with pixel intensities contour_arr: 2d np.array of contour with 0 and 1 labels
625941b597e22403b379cd84
def _update_table_classes(self, table): <NEW_LINE> <INDENT> assert isinstance(table, nodes.table) <NEW_LINE> table['classes'].append('styled-table') <NEW_LINE> header_cols = self.options.get('header-columns') or 0 <NEW_LINE> widths = self.options.get('widths') <NEW_LINE> dividers = self.options.get('column-dividers') <NEW_LINE> if dividers is None: <NEW_LINE> <INDENT> get_divider = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> def get_divider(idx_): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return dividers[idx_] <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> return 'no' <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> EMPTY = () <NEW_LINE> opts = ( self.options.get('column-alignment', EMPTY), self.options.get('column-wrapping', EMPTY), self.options.get('column-classes', EMPTY), ) <NEW_LINE> def locate(cls): <NEW_LINE> <INDENT> for child_ in table.children: <NEW_LINE> <INDENT> if isinstance(child_, cls): <NEW_LINE> <INDENT> return child_ <NEW_LINE> <DEDENT> <DEDENT> return None <NEW_LINE> <DEDENT> tgroup = locate(nodes.tgroup) <NEW_LINE> if not tgroup: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> col = 0 <NEW_LINE> for child in tgroup: <NEW_LINE> <INDENT> if isinstance(child, nodes.colspec): <NEW_LINE> <INDENT> if widths and col < len(widths): <NEW_LINE> <INDENT> child['colwidth'] = widths[col] <NEW_LINE> <DEDENT> if col < header_cols: <NEW_LINE> <INDENT> child['stub'] = 1 <NEW_LINE> <DEDENT> col += 1 <NEW_LINE> continue <NEW_LINE> <DEDENT> assert isinstance(child, (nodes.thead, nodes.tbody)) <NEW_LINE> for row in child.children: <NEW_LINE> <INDENT> assert isinstance(row, nodes.row) <NEW_LINE> for idx, (entry, align, wrap, clist) in enumerate(zip_longest(row, *opts)): <NEW_LINE> <INDENT> if entry is None: <NEW_LINE> <INDENT> raise ValueError('not enough columns for field options') <NEW_LINE> <DEDENT> assert isinstance(entry, nodes.entry) <NEW_LINE> classes = entry['classes'] <NEW_LINE> if align: <NEW_LINE> <INDENT> classes.append(align + '-align') <NEW_LINE> <DEDENT> if wrap is False: <NEW_LINE> <INDENT> classes.append('nowrap') <NEW_LINE> <DEDENT> if clist: <NEW_LINE> <INDENT> classes.extend(clist) <NEW_LINE> <DEDENT> if get_divider: <NEW_LINE> <INDENT> classes.append(get_divider(idx) + '-left-divider') <NEW_LINE> classes.append(get_divider(idx + 1) + '-right-divider')
_update_table_classes
625941b58e05c05ec3eea15d
def drawImageAndSpectrum(arr, showTimeSec=None, winTitle='Three images') : <NEW_LINE> <INDENT> plt.ion() <NEW_LINE> fig = plt.figure(figsize=(8,8), dpi=80, facecolor='w',edgecolor='w',frameon=True) <NEW_LINE> fig.canvas.set_window_title(winTitle) <NEW_LINE> plt.clf() <NEW_LINE> plt.subplot2grid((10,10), (0,0), rowspan=7, colspan=10) <NEW_LINE> drawImage(arr,'Image and Spectrum') <NEW_LINE> plt.subplot2grid((10,10), (7,0), rowspan=4, colspan=10) <NEW_LINE> drawHistogram(arr) <NEW_LINE> drawOrShow(showTimeSec)
Graphical presentation for three 2D arrays.
625941b59c8ee82313fbb568
def lnfactorial(n, integer=True): <NEW_LINE> <INDENT> assert is_nonneginteger(n), "the argument to lnfactorial must be a non-negative integer!" <NEW_LINE> if integer: <NEW_LINE> <INDENT> fact = factorial(n) <NEW_LINE> lnfact = log(fact) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lnfact = lngamma(n+1.0) <NEW_LINE> <DEDENT> return lnfact
Computation of the natural logarithm of a factorial using integer arithmetics (integer=True) or floating point arithmetics using lngamma (integer=False). In both cases a floating-point number is returned, of course...
625941b51b99ca400220a89c
def interatom_define(self, event=None): <NEW_LINE> <INDENT> uf_store['interatom.define'](wx_wizard_modal=True, spin_id1='@N', spin_id2='@H')
Define the interatomic interactions of the spins via the interatom.define user function. @keyword event: The wx event. @type event: wx event
625941b5b545ff76a8913c0d
def pwm_heater_control_loop(state): <NEW_LINE> <INDENT> GPIO.setmode(GPIO.BCM) <NEW_LINE> GPIO.setup(21, GPIO.OUT, initial=0) <NEW_LINE> GPIO.output(21, 0) <NEW_LINE> time.sleep(20) <NEW_LINE> period = config['heatperiod'] <NEW_LINE> while True: <NEW_LINE> <INDENT> if state['pid'] > 1: <NEW_LINE> <INDENT> print("on") <NEW_LINE> GPIO.output(21, 1) <NEW_LINE> time.sleep(30) <NEW_LINE> <DEDENT> elif state['pid'] <= 0: <NEW_LINE> <INDENT> print("off") <NEW_LINE> GPIO.output(21, 0) <NEW_LINE> time.sleep(30) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> on_sec = state['pid'] * period <NEW_LINE> if on_sec >= period: <NEW_LINE> <INDENT> on_sec = period <NEW_LINE> <DEDENT> if on_sec < 0: <NEW_LINE> <INDENT> on_sec = 0 <NEW_LINE> <DEDENT> print("on for {:3.2f}%% ({:3.2f} sec)".format( on_sec / period * 100.0, on_sec)) <NEW_LINE> GPIO.output(21, 1) <NEW_LINE> time.sleep(on_sec) <NEW_LINE> print("off for {:3.2f}%% ({:3.2f} sec)".format( (period - on_sec) / period * 100.0, period - on_sec)) <NEW_LINE> GPIO.output(21, 0) <NEW_LINE> time.sleep(period - on_sec)
PID controlled loop using PWM to control heat output
625941b5fff4ab517eb2f225
def test_it_can_calculate_other_angles(self): <NEW_LINE> <INDENT> self.assertEqual(sun_angle("12:15"), 93.75)
Variable angles can be calculated too.
625941b51f5feb6acb0c4941
def testWebhookResponse(self): <NEW_LINE> <INDENT> pass
Test WebhookResponse
625941b5925a0f43d2549c5f
def get_sensor_range(self): <NEW_LINE> <INDENT> return self._range
returns the range of the sensor :return float: range of sensor in meters
625941b59f2886367277a67e
def test_case_3(self): <NEW_LINE> <INDENT> a = Square(4, 1, 1, 1) <NEW_LINE> self.assertDictEqual(a.to_dictionary(), { 'id': 1, 'size': 4, 'x': 1, 'y': 1})
test to_dictionary method
625941b523849d37ff7b2e7f
def estimate_nll(self, X): <NEW_LINE> <INDENT> fe = self.free_energy(X) <NEW_LINE> return fe.mean() + self.log_Z
Estimate the :math:`-\log p(x)` using the estimate of :math:`log_Z`. Args: X (T.tensor): data samples. Returns: T.tensor: NLL estimate.
625941b5b7558d58953c4d08
def to_rdkit_mol(mol_repr, molid=None, instantiator=Chem.MolFromSmiles, to2D=False, to3D=False, toPropertyMol=False): <NEW_LINE> <INDENT> if not isinstance(mol_repr, Chem.Mol): <NEW_LINE> <INDENT> mol = instantiator(mol_repr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mol = mol_repr <NEW_LINE> <DEDENT> if mol is None: <NEW_LINE> <INDENT> if molid is None: <NEW_LINE> <INDENT> warning('RDKit cannot create a molecule from %r' % mol_repr) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> warning('RDKit cannot create molecule %s from %r' % (molid, mol_repr)) <NEW_LINE> <DEDENT> return mol <NEW_LINE> <DEDENT> if to3D: <NEW_LINE> <INDENT> AllChem.EmbedMolecule(mol) <NEW_LINE> AllChem.UFFOptimizeMolecule(mol) <NEW_LINE> <DEDENT> elif to2D: <NEW_LINE> <INDENT> AllChem.Compute2DCoords(mol) <NEW_LINE> <DEDENT> if toPropertyMol: <NEW_LINE> <INDENT> return PropertyMol(mol) <NEW_LINE> <DEDENT> return mol
Converts a molecular representation (e.g. smiles string) into an RDKit molecule. Allows to perform common postprocessing operations on the resulting molecule.
625941b56fece00bbac2d527
def recognize(models: dict, test_set: SinglesData): <NEW_LINE> <INDENT> warnings.filterwarnings("ignore", category=DeprecationWarning) <NEW_LINE> probabilities = [] <NEW_LINE> guesses = [] <NEW_LINE> for i in range(len(test_set.wordlist)): <NEW_LINE> <INDENT> sample_frames , sample_length = test_set._hmm_data[i][0], test_set._hmm_data[i][1] <NEW_LINE> sample_dict = {} <NEW_LINE> for word in models: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> score = models[word].score(sample_frames, sample_length) <NEW_LINE> sample_dict[word] = models[word].score(sample_frames, sample_length) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> sample_dict[word] = float('-inf') <NEW_LINE> <DEDENT> <DEDENT> probabilities.append(sample_dict) <NEW_LINE> <DEDENT> for sample in probabilities: <NEW_LINE> <INDENT> biggest_prob = float('-inf') <NEW_LINE> best_guess = '' <NEW_LINE> for word, prob in sample.items(): <NEW_LINE> <INDENT> if prob > biggest_prob: <NEW_LINE> <INDENT> biggest_prob = prob <NEW_LINE> best_guess = word <NEW_LINE> <DEDENT> <DEDENT> guesses.append(best_guess) <NEW_LINE> <DEDENT> return probabilities, guesses
Recognize test word sequences from word models set :param models: dict of trained models {'SOMEWORD': GaussianHMM model object, 'SOMEOTHERWORD': GaussianHMM model object, ...} :param test_set: SinglesData object :return: (list, list) as probabilities, guesses both lists are ordered by the test set word_id probabilities is a list of dictionaries where each key a word and value is Log Liklihood [{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... }, {SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... }, ] guesses is a list of the best guess words ordered by the test set word_id ['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
625941b50fa83653e4656dab
def test_doubles(): <NEW_LINE> <INDENT> array = [3.1, 2.9, 2.4] <NEW_LINE> bubble.sort(array) <NEW_LINE> assert all(array[x] <= array[x + 1] for x in range(len(array) - 1))
Handling of double values
625941b555399d3f055884a0
def get(self, request, format=None): <NEW_LINE> <INDENT> an_apiview = [ 'USES HTTP methods as function (get, post, patch, put, delete)' 'Is similar to a traditional Django View', 'Gives you the most control over your application logic', 'Is mapped manualled to URLs', ] <NEW_LINE> return Response({'message': 'Hello!', 'an_apiview': an_apiview})
Returns a list of APIView features
625941b545492302aab5e0ac
def remove_low_intensity_frames(data, parameters): <NEW_LINE> <INDENT> firstIndex = parameters['firstSampleIndex'] - 1 <NEW_LINE> lastIndex = firstIndex + (parameters['numSamples'] * parameters['numTechReps']) <NEW_LINE> temp = data.iloc[:, firstIndex : lastIndex] <NEW_LINE> temp[temp < parameters['intenSignifCutOff']] = 0 <NEW_LINE> data.iloc[:, firstIndex : lastIndex] = temp <NEW_LINE> data.drop_empty_frames('Background correction', parameters)
Discard features (rows) where intensity is below the threshold. The threshold is set by "intenSignifCutOff" parameter. Keyword Arguments: data -- LFDataFrame instance parameters -- LipidFinder's PeakFilter parameters instance
625941b53617ad0b5ed67ceb
def test_reference_query_conversion(self): <NEW_LINE> <INDENT> class Member(Document): <NEW_LINE> <INDENT> user_num = IntField(primary_key=True) <NEW_LINE> <DEDENT> class BlogPost(Document): <NEW_LINE> <INDENT> title = StringField() <NEW_LINE> author = ReferenceField(Member) <NEW_LINE> <DEDENT> Member.drop_collection() <NEW_LINE> BlogPost.drop_collection() <NEW_LINE> m1 = Member(user_num=1) <NEW_LINE> m1.save() <NEW_LINE> m2 = Member(user_num=2) <NEW_LINE> m2.save() <NEW_LINE> post1 = BlogPost(title='post 1', author=m1) <NEW_LINE> post1.save() <NEW_LINE> post2 = BlogPost(title='post 2', author=m2) <NEW_LINE> post2.save() <NEW_LINE> post = BlogPost.objects(author=m1).first() <NEW_LINE> self.assertEqual(post.id, post1.id) <NEW_LINE> post = BlogPost.objects(author=m2).first() <NEW_LINE> self.assertEqual(post.id, post2.id) <NEW_LINE> Member.drop_collection() <NEW_LINE> BlogPost.drop_collection()
Ensure that ReferenceFields can be queried using objects and values of the type of the primary key of the referenced object.
625941b538b623060ff0abdb
def load_data(): <NEW_LINE> <INDENT> pass
Load input data.
625941b5eab8aa0e5d26d94b
def login(request): <NEW_LINE> <INDENT> result = {"status": False, "message": {}} <NEW_LINE> if request.method == "POST": <NEW_LINE> <INDENT> caret_result = Login.creat_user(request,User,result) <NEW_LINE> print(caret_result) <NEW_LINE> return HttpResponse(caret_result) <NEW_LINE> <DEDENT> if request.method == "GET": <NEW_LINE> <INDENT> login_result = Login.user_login(request,result) <NEW_LINE> return HttpResponse(login_result)
使用方法 POST 提交username,password,email 注册用户 GET 提交username,password 用户名密码验证,验证成功写入session OPTION 用户注销
625941b56e29344779a62403
def on_right(): <NEW_LINE> <INDENT> global FLIP_TIMER_ID <NEW_LINE> still_to_learn_words.remove(NEW_WORD) <NEW_LINE> window.after_cancel(FLIP_TIMER_ID) <NEW_LINE> new_card()
IF you got the card right
625941b5bde94217f3682be9
def get_characters_character_id_wallets_with_http_info(self, character_id, **kwargs): <NEW_LINE> <INDENT> all_params = ['character_id', 'datasource', 'token', 'user_agent', 'x_user_agent'] <NEW_LINE> all_params.append('callback') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_characters_character_id_wallets" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('character_id' not in params) or (params['character_id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `character_id` when calling `get_characters_character_id_wallets`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> resource_path = '/v1/characters/{character_id}/wallets/'.replace('{format}', 'json') <NEW_LINE> path_params = {} <NEW_LINE> if 'character_id' in params: <NEW_LINE> <INDENT> path_params['character_id'] = params['character_id'] <NEW_LINE> <DEDENT> query_params = {} <NEW_LINE> if 'datasource' in params: <NEW_LINE> <INDENT> query_params['datasource'] = params['datasource'] <NEW_LINE> <DEDENT> if 'token' in params: <NEW_LINE> <INDENT> query_params['token'] = params['token'] <NEW_LINE> <DEDENT> if 'user_agent' in params: <NEW_LINE> <INDENT> query_params['user_agent'] = params['user_agent'] <NEW_LINE> <DEDENT> header_params = {} <NEW_LINE> if 'x_user_agent' in params: <NEW_LINE> <INDENT> header_params['X-User-Agent'] = params['x_user_agent'] <NEW_LINE> <DEDENT> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client. select_header_accept(['application/json']) <NEW_LINE> auth_settings = ['evesso'] <NEW_LINE> return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='list[GetCharactersCharacterIdWallets200Ok]', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
List wallets and balances List your wallets and their balances. Characters typically have only one wallet, with wallet_id 1000 being the master wallet. --- This route is cached for up to 120 seconds This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_characters_character_id_wallets_with_http_info(character_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int character_id: An EVE character ID (required) :param str datasource: The server name you would like data from :param str token: Access token to use, if preferred over a header :param str user_agent: Client identifier, takes precedence over headers :param str x_user_agent: Client identifier, takes precedence over User-Agent :return: list[GetCharactersCharacterIdWallets200Ok] If the method is called asynchronously, returns the request thread.
625941b5004d5f362079a124
def main(args): <NEW_LINE> <INDENT> bad_files = list_files(BAD_DIR) <NEW_LINE> logger.debug(f'{len(bad_files)} files found') <NEW_LINE> for bf in bad_files: <NEW_LINE> <INDENT> df = DataFile.objects.get(name=os.path.basename(bf)) <NEW_LINE> new_dir = os.path.join(BASE_OUTPUT_DIR, construct_drs_path(df)) <NEW_LINE> new_path = os.path.join(new_dir, df.name) <NEW_LINE> if not os.path.exists(new_dir): <NEW_LINE> <INDENT> os.makedirs(new_dir) <NEW_LINE> <DEDENT> if os.path.exists(new_path): <NEW_LINE> <INDENT> if os.path.islink(new_path): <NEW_LINE> <INDENT> os.remove(new_path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.error(f'{new_path} is not a link') <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> os.rename(bf, new_path) <NEW_LINE> df.directory = new_dir <NEW_LINE> df.save()
Main entry point
625941b51b99ca400220a89d
def updateScores(self): <NEW_LINE> <INDENT> root = self.scores.invisibleRootItem() <NEW_LINE> for key, value in self.score_headers.items(): <NEW_LINE> <INDENT> if 'Score:' in key: <NEW_LINE> <INDENT> score_value = value <NEW_LINE> <DEDENT> elif 'Start Time' in key: <NEW_LINE> <INDENT> start_value = value <NEW_LINE> <DEDENT> elif 'Stop Time' in key: <NEW_LINE> <INDENT> stop_value = value <NEW_LINE> <DEDENT> <DEDENT> if hasattr(self.mainWindow, 'lr'): <NEW_LINE> <INDENT> x1, x2 = self.mainWindow.lr.getRegion() <NEW_LINE> for item in self.scores.selectedItems(): <NEW_LINE> <INDENT> item.setText(score_value, self.score.currentText()) <NEW_LINE> item.setText(start_value, str(x1)) <NEW_LINE> item.setText(stop_value, str(x2)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for item in self.scores.selectedItems(): <NEW_LINE> <INDENT> item.setText(score_value, self.score.currentText())
updates the selected scores in the Score Window's TreeWidget
625941b5d53ae8145f87a063
def reduce_resources(self, ram_factor, core_factor): <NEW_LINE> <INDENT> self.reduced_ram = self.ram / ram_factor <NEW_LINE> self.reduced_core = self.core / core_factor
We need to reduce complexity of the problem in all possible ways As soon as we deal with flavors - we have to keep in mind that we can count flavors http://en.wikipedia.org/wiki/Greatest_common_divisor for ram and core and reduce complexity when calculating distribution of flavors over nodes with dynamic programming techinque
625941b53346ee7daa2b2b55
def _get(session, baseurl, path=''): <NEW_LINE> <INDENT> url = urljoin(baseurl, path) <NEW_LINE> response = session.get(url) <NEW_LINE> response.raise_for_status() <NEW_LINE> return response
Fetch and return a request. Throws on HTTP 4xx or HTTP 5xx.
625941b5377c676e91271f97
def __set_present(self, value): <NEW_LINE> <INDENT> self.header.set_long(4, value)
Set RadioTap present field bit
625941b531939e2706e4cc5e
def test_integer_sparse_input(self): <NEW_LINE> <INDENT> op = sparse_ops.sparse_cross([ self._sparse_tensor([[11], [333, 5555]]), constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'], ['batch2-FC2-F1', 'batch2-FC2-F2']], dtypes.string), ]) <NEW_LINE> expected_out = self._sparse_tensor( [['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [ '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2' ]]) <NEW_LINE> with self.cached_session() as sess: <NEW_LINE> <INDENT> self._assert_sparse_tensor_equals(expected_out, self.evaluate(op))
Tests mixed type sparse and dense inputs.
625941b5a17c0f6771cbde41
def submit(self, func, *args, **kwargs): <NEW_LINE> <INDENT> with self._count_lock: <NEW_LINE> <INDENT> self._wait_count += 1 <NEW_LINE> <DEDENT> def do_task(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with self._app.app_context(): <NEW_LINE> <INDENT> func(*args, **kwargs) <NEW_LINE> <DEDENT> <DEDENT> except Exception: <NEW_LINE> <INDENT> LOGGER.exception("Task failed") <NEW_LINE> <DEDENT> with self._count_lock: <NEW_LINE> <INDENT> self._wait_count -= 1 <NEW_LINE> <DEDENT> <DEDENT> return self._thread_pool.submit(do_task)
Schedule a task into the task pool
625941b5e8904600ed9f1d16
def is_valid_score(self, update): <NEW_LINE> <INDENT> username = self.get_first_word(update) <NEW_LINE> return self.score.is_valid_score(username, update)
Returns if score is valid
625941b5d53ae8145f87a064
def order_gradebook_results(self, gradebook_search_order): <NEW_LINE> <INDENT> pass
Specify an ordering to the search results. :param gradebook_search_order: gradebook search order :type gradebook_search_order: ``osid.grading.GradebookSearchOrder`` :raise: ``NullArgument`` -- ``gradebook_search_order`` is ``null`` :raise: ``Unsupported`` -- ``gradebook_search_order`` is not of this service *compliance: mandatory -- This method must be implemented.*
625941b515fb5d323cde08f5
def fw(self, i): <NEW_LINE> <INDENT> if self.__position__ + i > len(self.string): <NEW_LINE> <INDENT> raise ValueError("Hops beyond the end of the data") <NEW_LINE> <DEDENT> self.__position__ += i
Jumps forward by the given number of symbols. Args: i (int): the number of symbols to skip;
625941b5627d3e7fe0d68c3b
def get_db_product_count(self): <NEW_LINE> <INDENT> if self.uri: <NEW_LINE> <INDENT> print('\n\t5) Products count from database: {products_count}\t'.format( products_count=self.pyro_obj.get_products_count()) )
Calls database object to return product's count.
625941b51d351010ab855912
def onCameraChanged(self, *args): <NEW_LINE> <INDENT> self.updateLiveScriptText()
Slot that is called when the camera is changed.
625941b52ae34c7f2600cf1f
def chunks(self, size, *args, **kwargs): <NEW_LINE> <INDENT> with self.open(*args, **kwargs) as f: <NEW_LINE> <INDENT> for chunk in iter(lambda: f.read(size) or None, None): <NEW_LINE> <INDENT> yield chunk
Returns a generator yielding chunks of the file, so it can be read piece by piece with a simple for loop. Any argument you pass after `size` will be passed to :meth:`open`. :example: >>> hash = hashlib.md5() >>> for chunk in Path("CHANGES.rst").chunks(8192, mode='rb'): ... hash.update(chunk) This will read the file by chunks of 8192 bytes.
625941b55f7d997b87174888
def __init__(self, name, doccount, fieldlength_totals, fieldlength_maxes, deleted=None): <NEW_LINE> <INDENT> assert isinstance(name, basestring) <NEW_LINE> assert isinstance(doccount, (int, long)) <NEW_LINE> assert fieldlength_totals is None or isinstance(fieldlength_totals, dict), "fl_totals=%r" % fieldlength_totals <NEW_LINE> assert fieldlength_maxes is None or isinstance(fieldlength_maxes, dict), "fl_maxes=%r" % fieldlength_maxes <NEW_LINE> self.name = name <NEW_LINE> self.doccount = doccount <NEW_LINE> self.fieldlength_totals = fieldlength_totals <NEW_LINE> self.fieldlength_maxes = fieldlength_maxes <NEW_LINE> self.deleted = deleted <NEW_LINE> self._filenames = set() <NEW_LINE> for attr, ext in self.EXTENSIONS.iteritems(): <NEW_LINE> <INDENT> fname = "%s.%s" % (self.name, ext) <NEW_LINE> setattr(self, attr + "_filename", fname) <NEW_LINE> self._filenames.add(fname)
:param name: The name of the segment (the Index object computes this from its name and the generation). :param doccount: The maximum document number in the segment. :param term_count: Total count of all terms in all documents. :param fieldlength_totals: A dictionary mapping field numbers to the total number of terms in that field across all documents in the segment. :param deleted: A set of deleted document numbers, or None if no deleted documents exist in this segment.
625941b56aa9bd52df036b8f
def data(self, idx=QModelIndex(), role=None): <NEW_LINE> <INDENT> if role == Qt.DisplayRole: <NEW_LINE> <INDENT> x: int = idx.row() <NEW_LINE> y: int = idx.column() <NEW_LINE> commands_in_game = [command for command in self.commanddata.commands if command.available] <NEW_LINE> if y == 0: <NEW_LINE> <INDENT> return commands_in_game[x].name <NEW_LINE> <DEDENT> if y == 1: <NEW_LINE> <INDENT> return commands_in_game[x].questions <NEW_LINE> <DEDENT> if y == 2: <NEW_LINE> <INDENT> return commands_in_game[x].points <NEW_LINE> <DEDENT> if y == 3: <NEW_LINE> <INDENT> return commands_in_game[x].question_points <NEW_LINE> <DEDENT> if y == 4: <NEW_LINE> <INDENT> return commands_in_game[x].button_id <NEW_LINE> <DEDENT> if y == 5: <NEW_LINE> <INDENT> return commands_in_game[x].bets <NEW_LINE> <DEDENT> <DEDENT> return QVariant()
reimplements data function of Abstract Table model :param idx: index of data (x, y positions) :param role: role of query :return: appropriate data of selected command (x is command number, y is 0 for name, 1 for question number, 2 for points
625941b5d8ef3951e324332a
def make_api_response(data, status=200): <NEW_LINE> <INDENT> api_resp = HttpResponse(json.dumps( data, sort_keys=True, indent=4, separators=(',', ': ')), content_type="application/json; charset=utf-8", status=status) <NEW_LINE> api_resp['Access-Control-Allow-Origin'] = '*' <NEW_LINE> return api_resp
Make API Response
625941b5099cdd3c635f0a4a
def reset_input_buffer(self): <NEW_LINE> <INDENT> if not self.is_open: <NEW_LINE> <INDENT> raise portNotOpenError <NEW_LINE> <DEDENT> if self.logger: <NEW_LINE> <INDENT> self.logger.info('reset_input_buffer()') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> while self.queue.qsize(): <NEW_LINE> <INDENT> self.queue.get_nowait() <NEW_LINE> <DEDENT> <DEDENT> except queue.Empty: <NEW_LINE> <INDENT> pass
Clear input buffer, discarding all that is in the buffer.
625941b515fb5d323cde08f6
def check_blocked_rows(self): <NEW_LINE> <INDENT> for a in range(9): <NEW_LINE> <INDENT> missing_numbers = list({1,2,3,4,5,6,7,8,9}.difference(set(self.squares[a]))) <NEW_LINE> for missing in missing_numbers: <NEW_LINE> <INDENT> rows_allowed = set() <NEW_LINE> for b in range(9): <NEW_LINE> <INDENT> pos = self.pos_from_square(a, b) <NEW_LINE> if missing in self.allowed[pos]: <NEW_LINE> <INDENT> rows_allowed.add(pos[0]) <NEW_LINE> <DEDENT> <DEDENT> if len(rows_allowed) == 1: <NEW_LINE> <INDENT> i = list(rows_allowed)[0] <NEW_LINE> for j in range(9): <NEW_LINE> <INDENT> square, z = self.square_from_pos((i, j)) <NEW_LINE> if square != a and missing in self.allowed[(i, j)]: <NEW_LINE> <INDENT> self.allowed[(i, j)].remove(missing)
Similar functionality to check_blocked_cols, but checks for rows
625941b58e7ae83300e4adb9
def test_008_create_project_with_appid(self): <NEW_LINE> <INDENT> Tns.create(app_name=Settings.AppName.DEFAULT, template=Template.HELLO_WORLD_JS.local_package, app_data=Apps.HELLO_WORLD_JS, update=False, app_id='org.nativescript.MyApp')
Create project with --appid option
625941b58a349b6b435e7f62
def get_journal(id): <NEW_LINE> <INDENT> ses = _session() <NEW_LINE> return ses.query(Journal).get(id)
Get journal entry with id.
625941b576e4537e8c351464
def expire_staff_apply_by_staff(shop_id: int, user_id: int): <NEW_LINE> <INDENT> staff_apply_list = list_staff_apply_by_shop_id_and_user_id(shop_id, user_id, filter_expired=True) <NEW_LINE> for sal in staff_apply_list: <NEW_LINE> <INDENT> sal.expired = StaffApplyExpired.YES <NEW_LINE> sal.save()
删除员工时使其申请记录过期 :param shop_id: :param user_id: :return:
625941b544b2445a33931e8e
def model_fn(features, labels, mode, params): <NEW_LINE> <INDENT> unique_ids = features["unique_ids"] <NEW_LINE> input_ids = features["input_ids"] <NEW_LINE> input_mask = features["input_mask"] <NEW_LINE> segment_ids = features["segment_ids"] <NEW_LINE> is_training = (mode == tf.estimator.ModeKeys.TRAIN) <NEW_LINE> (start_logits, end_logits) = create_model( bert_config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) <NEW_LINE> tvars = tf.trainable_variables() <NEW_LINE> initialized_variable_names = {} <NEW_LINE> scaffold_fn = None <NEW_LINE> if init_checkpoint: <NEW_LINE> <INDENT> (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) <NEW_LINE> if use_tpu: <NEW_LINE> <INDENT> def tpu_scaffold(): <NEW_LINE> <INDENT> tf.train.init_from_checkpoint(init_checkpoint, assignment_map) <NEW_LINE> return tf.train.Scaffold() <NEW_LINE> <DEDENT> scaffold_fn = tpu_scaffold <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tf.train.init_from_checkpoint(init_checkpoint, assignment_map) <NEW_LINE> <DEDENT> <DEDENT> output_spec = None <NEW_LINE> if mode == tf.estimator.ModeKeys.PREDICT: <NEW_LINE> <INDENT> predictions = { "unique_ids": unique_ids, "start_logits": start_logits, "end_logits": end_logits, } <NEW_LINE> output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError( "Only TRAIN and PREDICT modes are supported: %s" % (mode)) <NEW_LINE> <DEDENT> return output_spec
The `model_fn` for TPUEstimator.
625941b58e05c05ec3eea15e
def reset(self): <NEW_LINE> <INDENT> if self.getProgramCounter() == 0: <NEW_LINE> <INDENT> va = self.readMemoryPtr(self.IVT_RESET) <NEW_LINE> self.setProgramCounter(va)
triggers the reset interrupt
625941b54a966d76dd550df9
def length_of_words_list(words2): <NEW_LINE> <INDENT> return [len(word2) for word2 in words2]
we are defining the function length_of_words_list which will return the length of the words given using list comprehension in the code Parameters: a list of characters Return: the length of each word separated by a comma in the input
625941b585dfad0860c3ac46
def post(self, request, format=None): <NEW_LINE> <INDENT> if u'document' in request.data: <NEW_LINE> <INDENT> data = request.data['document'] <NEW_LINE> if 'id' in data: <NEW_LINE> <INDENT> document = Document.objects.filter(id=data['id']).first() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> document = Document() <NEW_LINE> <DEDENT> ds = DocumentSerializer(instance=document, data=data) <NEW_LINE> if ds.is_valid(raise_exception=True): <NEW_LINE> <INDENT> ds.update_document(document) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ParseError("Required parameter 'document' is missing.") <NEW_LINE> <DEDENT> if not document.document_xml: <NEW_LINE> <INDENT> html = "" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> renderer = HTMLRenderer() <NEW_LINE> body_html = renderer.render_xml(document.document_xml) <NEW_LINE> html = render_to_string('html_preview.html', { 'document': document, 'body_html': body_html, }) <NEW_LINE> <DEDENT> return Response({'html': html})
Render a document into HTML. The request MUST include a JSON description of what to render. Parameters: format: "html" (default) document: { ... } To determine what to render, include a document description. If the description has an id, the missing details are filled in from the existing document in the database. { "document": { "title": "A title", "body": "... xml ..." } }
625941b5097d151d1a222c4a
def addAtTail(self, val: int) -> None: <NEW_LINE> <INDENT> if self.len == 0: <NEW_LINE> <INDENT> self.addAtHead(val) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> node = self.head <NEW_LINE> while node.next is not None: <NEW_LINE> <INDENT> node = node.next <NEW_LINE> <DEDENT> node.next = Node(val) <NEW_LINE> self.len += 1
Append a node of value val to the last element of the linked list.
625941b5f548e778e58cd369
def recalculate_percentile(snapshot: models.SnapshotMetadata, cached_adapter: CachedCDIAdapter) -> None: <NEW_LINE> <INDENT> cdi_type = snapshot.cdi_type <NEW_LINE> gender = snapshot.gender <NEW_LINE> individual_words = db_util.load_snapshot_contents(snapshot) <NEW_LINE> snapshot.words_spoken = get_words_spoken( cached_adapter, cdi_type, individual_words ) <NEW_LINE> snapshot.percentile = recalculate_percentile_raw( cached_adapter, cdi_type, gender, snapshot.words_spoken, snapshot.age )
Recalculate the percentile for a snapshot to be modified in place. @param snapshot: The snapshot to modify.
625941b526068e7796caeac5
def generate_new_script(debugger, command, result, internal_dict): <NEW_LINE> <INDENT> command_args = shlex.split(command, posix=False) <NEW_LINE> parser = generateOptionParser() <NEW_LINE> try: <NEW_LINE> <INDENT> (options, args) = parser.parse_args(command_args) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> result.SetError(parser.usage) <NEW_LINE> return <NEW_LINE> <DEDENT> if not args: <NEW_LINE> <INDENT> result.SetError('Expects a filename. Usage: __generate_script filename') <NEW_LINE> return <NEW_LINE> <DEDENT> clean_command = ('').join(args) <NEW_LINE> file_path = str(os.path.splitext(os.path.join( os.path.dirname(__file__), clean_command))[0] + '.py') <NEW_LINE> if os.path.isfile(file_path): <NEW_LINE> <INDENT> result.SetError('There already exists a file named "{}", please remove the file at "{}" first'.format(clean_command, file_path)) <NEW_LINE> return <NEW_LINE> <DEDENT> if options.create_class: <NEW_LINE> <INDENT> script = generate_class_file(clean_command, options) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> script = generate_function_file(clean_command, options) <NEW_LINE> <DEDENT> create_or_touch_filepath(file_path, script) <NEW_LINE> os.system('atom ' + file_path) <NEW_LINE> result.AppendMessage('Opening \"{}\"...'.format(file_path))
Generates a new script in the same directory as this file. Can generate function styled scripts or class styled scripts. Expected Usage: __generate_script cool_command reload_lldbinit cool_command
625941b57047854f462a11fb
def save(self, file_name: str) -> None: <NEW_LINE> <INDENT> with open(file_name, "wb") as f: <NEW_LINE> <INDENT> pickle.dump(self, f)
Serializes this corpus to file with file_name :param file_name: File name to save corpus to :returns: None
625941b515baa723493c3d60
def __init__(self, items, depth=8, bounding_rect=None): <NEW_LINE> <INDENT> self.nw = self.ne = self.se = self.sw = None <NEW_LINE> depth -= 1 <NEW_LINE> if depth == 0 or not items: <NEW_LINE> <INDENT> self.items = items <NEW_LINE> return <NEW_LINE> <DEDENT> if bounding_rect: <NEW_LINE> <INDENT> bounding_rect = Rect( bounding_rect ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bounding_rect = Rect( items[0] ) <NEW_LINE> for item in items[1:]: <NEW_LINE> <INDENT> bounding_rect.union_ip( item ) <NEW_LINE> <DEDENT> <DEDENT> cx = self.cx = bounding_rect.centerx <NEW_LINE> cy = self.cy = bounding_rect.centery <NEW_LINE> self.items = [] <NEW_LINE> nw_items = [] <NEW_LINE> ne_items = [] <NEW_LINE> se_items = [] <NEW_LINE> sw_items = [] <NEW_LINE> for item in items: <NEW_LINE> <INDENT> in_nw = item.left <= cx and item.top <= cy <NEW_LINE> in_sw = item.left <= cx and item.bottom >= cy <NEW_LINE> in_ne = item.right >= cx and item.top <= cy <NEW_LINE> in_se = item.right >= cx and item.bottom >= cy <NEW_LINE> if in_nw and in_ne and in_se and in_sw: <NEW_LINE> <INDENT> self.items.append(item) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if in_nw: nw_items.append(item) <NEW_LINE> if in_ne: ne_items.append(item) <NEW_LINE> if in_se: se_items.append(item) <NEW_LINE> if in_sw: sw_items.append(item) <NEW_LINE> <DEDENT> <DEDENT> if nw_items: <NEW_LINE> <INDENT> self.nw = QuadTree(nw_items, depth, (bounding_rect.left, bounding_rect.top, cx, cy)) <NEW_LINE> <DEDENT> if ne_items: <NEW_LINE> <INDENT> self.ne = QuadTree(ne_items, depth, (cx, bounding_rect.top, bounding_rect.right, cy)) <NEW_LINE> <DEDENT> if se_items: <NEW_LINE> <INDENT> self.se = QuadTree(se_items, depth, (cx, cy, bounding_rect.right, bounding_rect.bottom)) <NEW_LINE> <DEDENT> if sw_items: <NEW_LINE> <INDENT> self.sw = QuadTree(sw_items, depth, (bounding_rect.left, cy, cx, bounding_rect.bottom))
Creates a quad-tree. @param items: A sequence of items to store in the quad-tree. Note that these items must be a pygame.Rect or have a .rect attribute. @param depth: The maximum recursion depth. @param bounding_rect: The bounding rectangle of all of the items in the quad-tree. For internal use only.
625941b5cc40096d61595741
def test_queryparser_unstem_iter(): <NEW_LINE> <INDENT> stemmer = xapian.Stem('en') <NEW_LINE> queryparser = xapian.QueryParser() <NEW_LINE> expect([term for term in queryparser.unstemlist('to')], []) <NEW_LINE> expect([term for term in queryparser.unstemlist('question')], []) <NEW_LINE> expect([term for term in queryparser.unstemlist('questions')], []) <NEW_LINE> query = queryparser.parse_query('to question questions') <NEW_LINE> expect([term for term in queryparser.unstemlist('to')], ['to']) <NEW_LINE> expect([term for term in queryparser.unstemlist('question')], ['question']) <NEW_LINE> expect([term for term in queryparser.unstemlist('questions')], ['questions']) <NEW_LINE> expect(str(query), 'Xapian::Query((to:(pos=1) OR question:(pos=2) OR questions:(pos=3)))') <NEW_LINE> queryparser = xapian.QueryParser() <NEW_LINE> queryparser.set_stemmer(stemmer) <NEW_LINE> queryparser.set_stemming_strategy(queryparser.STEM_SOME) <NEW_LINE> expect([term for term in queryparser.unstemlist('Zto')], []) <NEW_LINE> expect([term for term in queryparser.unstemlist('Zquestion')], []) <NEW_LINE> expect([term for term in queryparser.unstemlist('Zquestions')], []) <NEW_LINE> query = queryparser.parse_query('to question questions') <NEW_LINE> expect([term for term in queryparser.unstemlist('Zto')], ['to']) <NEW_LINE> expect([term for term in queryparser.unstemlist('Zquestion')], ['question', 'questions']) <NEW_LINE> expect([term for term in queryparser.unstemlist('Zquestions')], []) <NEW_LINE> expect(str(query), 'Xapian::Query((Zto:(pos=1) OR Zquestion:(pos=2) OR Zquestion:(pos=3)))')
Test QueryParser unstemlist iterator.
625941b5656771135c3eb661
def test_rest_pool_config_put(api_client): <NEW_LINE> <INDENT> user = User.objects.get(username="test") <NEW_LINE> api_client.force_authenticate(user=user) <NEW_LINE> resp = api_client.put("/ec2spotmanager/rest/configurations/1/") <NEW_LINE> assert resp.status_code == requests.codes["method_not_allowed"]
put should not be allowed
625941b5cdde0d52a9e52e1c
def validate(data,log_type): <NEW_LINE> <INDENT> if log_type is LOG_SWEAR: <NEW_LINE> <INDENT> fields = ["date", "time", "song title", "song artist", "song composer", "show name", "report"] <NEW_LINE> for f in fields: <NEW_LINE> <INDENT> if f not in data: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
Ensure json data has all the important entry fields Args: data (dict): JSON data for swear log log_type (int): Specified log type Returns: (boolean): True if valid, False otherwise.
625941b5dd821e528d63af99
def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> super().__init__(' ') <NEW_LINE> self.tab_width = 0
Initialize this tab character. Its attribute :attr:`tab_width` is set a later point in time when context (:class:`TabStop`) is available.
625941b5d164cc6175782b3b
def sort_data(self): <NEW_LINE> <INDENT> srt_indx = np.argsort( self.betas ) <NEW_LINE> self.betas = [self.betas[indx] for indx in srt_indx] <NEW_LINE> self.Z = [self.Z[indx] for indx in srt_indx]
Sorts the data according to the betas
625941b55fdd1c0f98dc001f
def resource_name_to_path(self, resource_name): <NEW_LINE> <INDENT> return self.entity_name_to_path(resource_name, 'resource')
TODO :param resource_name: TODO
625941b54e4d5625662d41cc
def __valid_webservice(self, webservice): <NEW_LINE> <INDENT> return True
valid all information from webservice and save it if everything goes ok
625941b521a7993f00bc7ad6
def initialAction(self, graph): <NEW_LINE> <INDENT> for tip in graph.listNodes.keys(): <NEW_LINE> <INDENT> for node in graph.listNodes[tip]: <NEW_LINE> <INDENT> if tip == "SubType" : <NEW_LINE> <INDENT> node.FILE_ = ( node.Type.getValue()[0] ,'ATOM3'+node.Type.getValue()[1]) <NEW_LINE> <DEDENT> elif tip == "TypeName": <NEW_LINE> <INDENT> node.FILE_ = node.Name.toString() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> node.FILE_ = None
action to be performed before the graph grammar starts its execution
625941b5d99f1b3c44c6738c
def test_changerole_user(self): <NEW_LINE> <INDENT> print('('+self.test_changerole_user.__name__+')', self.test_changerole_user.__doc__) <NEW_LINE> user = self.connection.change_role_user(USER1_USERNAME,0,0) <NEW_LINE> userobj = self.connection.get_user(user) <NEW_LINE> self.assertEqual(userobj['isAdmin'],'0')
Check if the role of user with username user1 is changing
625941b52c8b7c6e89b355b2
def test_sort_nums_in_list_random_case(rand_ten): <NEW_LINE> <INDENT> result = quick_sort(rand_ten) <NEW_LINE> key = sorted(rand_ten) <NEW_LINE> assert result == key
Test quick sort function.
625941b556b00c62f0f1444a
def get_application(name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationResult: <NEW_LINE> <INDENT> __args__ = dict() <NEW_LINE> __args__['name'] = name <NEW_LINE> if opts is None: <NEW_LINE> <INDENT> opts = pulumi.InvokeOptions() <NEW_LINE> <DEDENT> if opts.version is None: <NEW_LINE> <INDENT> opts.version = _utilities.get_version() <NEW_LINE> <DEDENT> __ret__ = pulumi.runtime.invoke('aws:elasticbeanstalk/getApplication:getApplication', __args__, opts=opts, typ=GetApplicationResult).value <NEW_LINE> return AwaitableGetApplicationResult( appversion_lifecycle=__ret__.appversion_lifecycle, arn=__ret__.arn, description=__ret__.description, id=__ret__.id, name=__ret__.name)
Retrieve information about an Elastic Beanstalk Application. ## Example Usage ```python import pulumi import pulumi_aws as aws example = aws.elasticbeanstalk.get_application(name="example") pulumi.export("arn", example.arn) pulumi.export("description", example.description) ``` :param str name: The name of the application
625941b550812a4eaa59c114
def _force_bytes_parts( self, signature: typing.Union[Blake2Signature, Blake2SignatureDump], ) -> Blake2Signature: <NEW_LINE> <INDENT> return Blake2Signature( data=self._force_bytes(signature.data), signature=self._force_bytes(signature.signature), )
Force given value into bytes, meaning a Blake2Signature container.
625941b526238365f5f0ec57
def chain(funcs, combine=True, stop_on_failure=False, all_or_nothing=True, save_iterator=True): <NEW_LINE> <INDENT> return _Chain(funcs, combine, stop_on_failure, all_or_nothing, save_iterator)
Create a parser that chains a given iterable of parsers together, using output of one parser as input for another. If 'combine' is truthy, resulting 'parsed' window will cover the input between starting point of the first parser and the ending point of the last one. Note that this might be different than what you'd get by concatenating together individual 'parsed' windows if some of the parsers performed unusual operations on their windows - like 'noconsume' does, for example. If 'stop_on_failure' is truthy, stop parsing instead of failing it when a parser in the chain raises a ParsingFailure exception. Note that this also includes parsers with lookahead, effectively disabling it. If 'all_or_nothing' is truthy (the default), a ParsingEnd exception thrown inside it will not cause the effects gathered so far to be registered and the part of the string parsed so far to be marked as 'parsed'. If the parameter is falsey, partial application of effects and parsers is possible. 'all_or_nothing' is suppressed if 'stop_on_failure' is truthy. If 'save_iterator' is truthy (the default), the elements of the supplied iterator will be saved for future reuse. This avoids a problem of the iterator being exausted after the first parser run, but leads to higher memory consumption. Disable it only if you're sure that the chain will only be used once, or if you've wrapped the iterator into some reusable iterable, like a list or a deque, or if you've used 'reuse_iter'. Note that chains inherit lookahead mode from the first parser inside them that has the capability.
625941b582261d6c526ab291
def __contains__(self, value): <NEW_LINE> <INDENT> node = self.head <NEW_LINE> for i in range(len(self)): <NEW_LINE> <INDENT> node = node.next <NEW_LINE> if node.val == value: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
Implements `val in self`. Returns true if value is found in this list.
625941b5cc40096d61595742
def get_name(self): <NEW_LINE> <INDENT> return self._name
Method to get the membership name. Return a string with the membership name
625941b5167d2b6e3121898c
def get_format_tool_instance(format_tool): <NEW_LINE> <INDENT> check_param_not_none(format_tool, "format_tool") <NEW_LINE> format_tool_instance = None <NEW_LINE> try: <NEW_LINE> <INDENT> if format_tool.name.lower() == 'file': <NEW_LINE> <INDENT> format_tool_instance = FineFreeFile(format_tool) <NEW_LINE> <DEDENT> elif format_tool.name.lower() == 'droid': <NEW_LINE> <INDENT> format_tool_instance = DROID(format_tool) <NEW_LINE> <DEDENT> elif format_tool.name.lower() == 'fido': <NEW_LINE> <INDENT> format_tool_instance = FIDO(format_tool) <NEW_LINE> <DEDENT> elif format_tool.name.lower() == 'python-magic': <NEW_LINE> <INDENT> format_tool_instance = PythonMagic(format_tool) <NEW_LINE> <DEDENT> elif format_tool.name.lower() == 'apache tika': <NEW_LINE> <INDENT> format_tool_instance = Tika(format_tool) <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return format_tool_instance
Given an instance from the DB will find the right tool.
625941b5566aa707497f4369
def read2list(filename, commentchar='#', splitchar=None, skip_empty=True, skip_lines=0, **kwargs): <NEW_LINE> <INDENT> if os.path.splitext(filename)[1] == '.gz' and has_zip: <NEW_LINE> <INDENT> ff = gzip.open(filename) <NEW_LINE> <DEDENT> elif os.path.splitext(filename)[1] == '.gz': <NEW_LINE> <INDENT> print('gzip not installed') <NEW_LINE> return [], [] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ff = open(filename) <NEW_LINE> <DEDENT> data = [] <NEW_LINE> comm = [] <NEW_LINE> line_nr = -1 <NEW_LINE> if splitchar is None or isinstance(splitchar,str): <NEW_LINE> <INDENT> fixwidth = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> fixwidth = True <NEW_LINE> <DEDENT> while 1: <NEW_LINE> <INDENT> line = ff.readline() <NEW_LINE> if not line: break <NEW_LINE> line_nr += 1 <NEW_LINE> if line_nr<skip_lines: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if skip_empty and line.isspace(): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> line = line.replace('\n','') <NEW_LINE> if line[0] in commentchar: <NEW_LINE> <INDENT> comm.append(line[1:]) <NEW_LINE> continue <NEW_LINE> <DEDENT> if not fixwidth: <NEW_LINE> <INDENT> data.append(line.split(splitchar)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data.append(fw2python(line,splitchar)) <NEW_LINE> <DEDENT> <DEDENT> ff.close() <NEW_LINE> return data,comm
Load an ASCII file to list of lists. The comments and data go to two different lists. Also opens gzipped files. @param filename: name of file with the data @type filename: string @keyword commentchar: character(s) denoting comment rules @type commentchar: list of str @keyword splitchar: character seperating entries in a row (default: whitespace) @type splitchar: str or None @keyword skip_empty: skip empty lines @type skip_empty: bool @keyword skip_lines: skip nr of lines (including comment and empty lines) @type skip_lines: integer @return: list of lists (data rows) list of lists (comments lines without commentchar), @rtype: (list,list)
625941b5fff4ab517eb2f227
def prepare_iteration_parameters(prepared_parameters): <NEW_LINE> <INDENT> (sess, graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor, image_lists) = prepared_parameters <NEW_LINE> (do_distort_images, distorted_jpeg_data_tensor, distorted_image_tensor) = distort.distort_images(prepared_parameters) <NEW_LINE> num_classes = len(image_lists.keys()) <NEW_LINE> (train_step, cross_entropy, total_loss, bottleneck_input, ground_truth_input, final_tensor, keep_prob) = config.add_final_training_ops(num_classes, flags.final_tensor_name, bottleneck_tensor) <NEW_LINE> prepare_session(sess) <NEW_LINE> evaluation_step = config.add_evaluation_step(final_tensor, ground_truth_input) <NEW_LINE> prediction_step = config.add_prediction_step(final_tensor) <NEW_LINE> return (sess, graph, (do_distort_images, image_lists, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor, jpeg_data_tensor, train_step, bottleneck_input, ground_truth_input, keep_prob, evaluation_step, prediction_step, cross_entropy, total_loss))
Prepares parameters for training iterations Args: prepared_parameters - prepared parameters tuple for training Returns: tuple of - sess - current TensorFlow session graph - network graph do_distort_images - distort flag image_lists - training images list distorted_jpeg_data_tensor - distorted JPEG image as tensor distorted_image_tensor - distorted image as tensor bottleneck_tensor - bottleneck layer tensor jpeg_data_tensor - images for iteration train_step - training step descriptor bottleneck_input - input for bottleneck layer ground_truth_input - label for input evaluation_step - prepared step for evaluation cross_entropy - result producer function
625941b550485f2cf553cb87
def create_new(username, password, first_name=None, last_name=None): <NEW_LINE> <INDENT> logging.debug("Initiated Account creation") <NEW_LINE> if __check_existence(username): <NEW_LINE> <INDENT> raise NameError('User already exists') <NEW_LINE> <DEDENT> user = utils.load_json(env.USER_JSON_PATH) <NEW_LINE> user["login"] = username <NEW_LINE> user["loginUppercase"] = username.upper() <NEW_LINE> user["password"] = password <NEW_LINE> if first_name: <NEW_LINE> <INDENT> user["personalInfo"]["firstName"] = first_name <NEW_LINE> <DEDENT> if last_name: <NEW_LINE> <INDENT> user["personalInfo"]["lastName"] = last_name <NEW_LINE> <DEDENT> if not __raw_create(user): <NEW_LINE> <INDENT> raise RuntimeError('Unable to create account') <NEW_LINE> <DEDENT> logging.debug("Verifying user") <NEW_LINE> try: <NEW_LINE> <INDENT> org_id = utils.get_orgid(username) <NEW_LINE> <DEDENT> except NameError as e: <NEW_LINE> <INDENT> logging.error('[New Account] failed to verify account: {0}'.format(e)) <NEW_LINE> raise RuntimeError('Unable to verify if account creation was ' 'successful') <NEW_LINE> <DEDENT> return org_id
Create new account for Stage Candlepin Loads the env.USER_JSON_PATH JSON with user specification. Then verifies if the user is not already present in the Stage Candlepin. If not, the account is created and verified. :param username: Account's username :param password: Account's password :param first_name: Account's first name :param last_name: Account's last name :return org_id: User's Org ID is returned if the creation was successful
625941b563d6d428bbe442de
def start(self, tid): <NEW_LINE> <INDENT> self._join_transaction(tid) <NEW_LINE> logging.debug("Start activating user interface module") <NEW_LINE> self.__started = True <NEW_LINE> logging.debug("Start button monitoring thread") <NEW_LINE> threading.Thread( target=self.__button_monitor_thread, daemon=True).start() <NEW_LINE> logging.debug("Finish activating user interface module")
Starts working from the current state.
625941b5b545ff76a8913c0f
def remove_punctuation(self, repl=" ", punc=punc): <NEW_LINE> <INDENT> self._text = punc.remove(self._text, repl) <NEW_LINE> return self
:param str repl: replacement for regex, if setted, default value will be overwritten remove all punctuations
625941b56e29344779a62404
def _get_sequence(value, n, channel_index, name): <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> value = [1] <NEW_LINE> <DEDENT> elif not isinstance(value, collections_abc.Sized): <NEW_LINE> <INDENT> value = [value] <NEW_LINE> <DEDENT> current_n = len(value) <NEW_LINE> if current_n == n + 2: <NEW_LINE> <INDENT> return value <NEW_LINE> <DEDENT> elif current_n == 1: <NEW_LINE> <INDENT> value = list((value[0],) * n) <NEW_LINE> <DEDENT> elif current_n == n: <NEW_LINE> <INDENT> value = list(value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("{} should be of length 1, {} or {} but was {}".format( name, n, n + 2, current_n)) <NEW_LINE> <DEDENT> if channel_index == 1: <NEW_LINE> <INDENT> return [1, 1] + value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return [1] + value + [1]
Formats a value input for gen_nn_ops.
625941b555399d3f055884a1
def quiz_selected(self, quiz_name): <NEW_LINE> <INDENT> if not self.quiz_name is None: <NEW_LINE> <INDENT> write_quiz_stats(self.quiz_name, self.qs) <NEW_LINE> <DEDENT> self.quiz = read_quiz_queries(quiz_name) <NEW_LINE> if len(self.quiz) == 0: <NEW_LINE> <INDENT> self.quiz_name = None <NEW_LINE> self.summary['text'] = 'NIE ZNALEZIONO PYTAN' <NEW_LINE> return <NEW_LINE> <DEDENT> self.quiz_name = quiz_name <NEW_LINE> self.qs = read_quiz_stats(quiz_name, self.quiz) <NEW_LINE> self.start_quiz() <NEW_LINE> return
Callback na wybranie nazwy quizu z menu rozwijanego
625941b5435de62698dfda43