code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __init__(self, data, _id=None, session=None, date=None, source=None, destination=None): <NEW_LINE> <INDENT> if data is None: <NEW_LINE> <INDENT> data = '' <NEW_LINE> <DEDENT> self.data = data <NEW_LINE> self.session = session <NEW_LINE> if _id is None: <NEW_LINE> <INDENT> _id = uuid.uuid4() <NEW_LINE> <DEDENT> self.id = _id <NEW_LINE> if date is None: <NEW_LINE> <INDENT> date = time.mktime(time.gmtime()) <NEW_LINE> <DEDENT> self.__date = date <NEW_LINE> self.__source = source <NEW_LINE> self.__destination = destination <NEW_LINE> self.__visualizationFunctions = TypedList(VisualizationFunction) <NEW_LINE> self.__metadata = OrderedDict() <NEW_LINE> self.__semanticTags = OrderedDict()
:parameter data: the content of the message :type data: a :class:`object` :parameter _id: the unique identifier of the message :type _id: :class:`uuid.UUID` :keyword session: the session in which the message was captures :type session: :class:`netzob.Common.Models.Vocabulary.Session.Session` :parameter date: the timestamp of the message :type date: a :class:`int` :parameter source: the optional source address of the message :type source: a :class:`str` :parameter destination: the optional destination address of the message :type destination: a :class:`str`
625941b3656771135c3eb622
def to_int(facts): <NEW_LINE> <INDENT> result = functools.reduce(lambda x, y: x * (y[0] ** y[1]), facts, 1) <NEW_LINE> return result
Assuming the input is a list of tuples of the form (p_i, a_i), this method returns the scalar product (p_i ** a_i) * (p_j ** a_j) * ...
625941b36aa9bd52df036b52
def set_disable_keyboard_on_lock(enable): <NEW_LINE> <INDENT> state = __utils__['mac_utils.validate_enabled'](enable) <NEW_LINE> cmd = 'systemsetup -setdisablekeyboardwhenenclosurelockisengaged ' '{0}'.format(state) <NEW_LINE> __utils__['mac_utils.execute_return_success'](cmd) <NEW_LINE> return __utils__['mac_utils.confirm_updated']( state, get_disable_keyboard_on_lock, normalize_ret=True, )
Get whether or not the keyboard should be disabled when the X Serve enclosure lock is engaged. :param bool enable: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_disable_keyboard_on_lock False
625941b3507cdc57c6306a81
def fetch_access_token(self, request_token=None, **kwargs): <NEW_LINE> <INDENT> with self._get_oauth_client() as client: <NEW_LINE> <INDENT> if request_token is None: <NEW_LINE> <INDENT> raise MissingRequestTokenError() <NEW_LINE> <DEDENT> token = {} <NEW_LINE> token.update(request_token) <NEW_LINE> token.update(kwargs) <NEW_LINE> client.token = token <NEW_LINE> params = self.access_token_params or {} <NEW_LINE> token = client.fetch_access_token(self.access_token_url, **params) <NEW_LINE> <DEDENT> return token
Fetch access token in one step. :param request_token: A previous request token for OAuth 1. :param kwargs: Extra parameters to fetch access token. :return: A token dict.
625941b397e22403b379cd48
def set_captain(self) -> None: <NEW_LINE> <INDENT> self.print_players() <NEW_LINE> print("-"*50) <NEW_LINE> captain_nr = int(input("Write the shirt number of your upcoming captain: ")) <NEW_LINE> if self.team.set_captain(captain_nr) == False: <NEW_LINE> <INDENT> print("There isn't a player in the squad with nr: " + str(captain_nr)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Nr:" + str(captain_nr) + " is now your new captain")
Makes a player the team captain
625941b394891a1f4081b858
def get_optimizer_param_groups_lr( model: OptimizerModelsType, base_lr: float, bias_lr_factor: float = 1.0, lr_multipliers_overwrite: Optional[Dict[str, float]] = None, ): <NEW_LINE> <INDENT> params: List[Dict[str, Any]] = [] <NEW_LINE> for ( module_name, _module, module_param_name, value, ) in iterate_module_named_parameters(model): <NEW_LINE> <INDENT> cur_lr = base_lr <NEW_LINE> if module_param_name == "bias": <NEW_LINE> <INDENT> cur_lr = base_lr * bias_lr_factor <NEW_LINE> <DEDENT> if lr_multipliers_overwrite is not None: <NEW_LINE> <INDENT> for kname, mult in lr_multipliers_overwrite.items(): <NEW_LINE> <INDENT> if kname in module_name: <NEW_LINE> <INDENT> cur_lr = cur_lr * mult <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> params += [ { "params": [value], "lr": cur_lr, } ] <NEW_LINE> <DEDENT> return params
Allow setting up lr for modules base_lr: lr for all modules bias_lr_factor: scale factor for lr for bias term lr_multipliers_overwrite (dict: str-> float): Applying different lr multiplier to a set of parameters whose names containing certain keys. For example, if lr_multipliers_overwrite={'backbone': 0.1}, the LR for the parameters whose names containing 'backbone' will be scaled to 0.1x. Set lr_multipliers_overwrite=None if no multipliers required.
625941b3187af65679ca4ed5
@receiver(post_save, sender=User) <NEW_LINE> def create_notification_setting(sender, **kwargs): <NEW_LINE> <INDENT> if kwargs['created']: <NEW_LINE> <INDENT> NotificationSetting.objects.get_or_create(user=kwargs['instance'])
ユーザー作成時に空のnotification_settingも作成する
625941b3baa26c4b54cb0ed4
def readData(self): <NEW_LINE> <INDENT> self.filepath = askopenfilename(parent=root, title="Choose data set!").__str__() <NEW_LINE> file = open(self.filepath,'r') <NEW_LINE> root.destroy() <NEW_LINE> strm1 = file.read().split('\n') <NEW_LINE> inData = np.zeros([4, len(strm1)-1]) <NEW_LINE> output = np.zeros([1, len(strm1)-1]) <NEW_LINE> for ln in range(len(strm1)-1): <NEW_LINE> <INDENT> smp = np.array( [float(i) for i in strm1[ln].split(',')]) <NEW_LINE> inData[:, ln] = smp[0:4] <NEW_LINE> output[0][ln] = smp[-1] <NEW_LINE> <DEDENT> file.close() <NEW_LINE> return [inData, output]
doc strig
625941b355399d3f05588463
def stripFormatting(s): <NEW_LINE> <INDENT> s = stripColor(s) <NEW_LINE> s = stripBold(s) <NEW_LINE> s = stripReverse(s) <NEW_LINE> s = stripUnderline(s) <NEW_LINE> s = stripItalic(s) <NEW_LINE> return s.replace('\x0f', '').replace('\x0F', '')
Returns the string s, with all formatting removed.
625941b36fece00bbac2d4eb
def _backpropagate(self, path, reward): <NEW_LINE> <INDENT> for node in reversed(path): <NEW_LINE> <INDENT> self.N[node] += 1 <NEW_LINE> self.Q[node] += reward
Send the reward back up to the ancestors of the leaf
625941b345492302aab5e070
def test_dict(): <NEW_LINE> <INDENT> pass
Test C{dict} interface >>> import gbp.git >>> modifier = gbp.git.GitModifier('foo', 'bar', 1) >>> sorted(modifier.keys()) ['date', 'email', 'name'] >>> sorted(modifier.items()) [('date', '1 +0000'), ('email', 'bar'), ('name', 'foo')]
625941b38c3a87329515816e
@array <NEW_LINE> @score_10 <NEW_LINE> def pscore(detections, annotations, tolerance=PSCORE_TOLERANCE): <NEW_LINE> <INDENT> if len(annotations) < 2: <NEW_LINE> <INDENT> raise BeatIntervalError("At least 2 annotations are needed for" "P-Score.") <NEW_LINE> <DEDENT> if float(tolerance) <= 0: <NEW_LINE> <INDENT> raise ValueError("`tolerance` must be greater than 0.") <NEW_LINE> <DEDENT> window = tolerance * np.median(np.diff(annotations)) <NEW_LINE> errors = calc_absolute_errors(detections, annotations) <NEW_LINE> p = len(detections[errors <= window]) <NEW_LINE> p /= float(max(len(detections), len(annotations))) <NEW_LINE> return p
Calculate the P-score accuracy for the given detections and annotations. The P-score is determined by taking the sum of the cross-correlation between two impulse trains, representing the detections and annotations allowing for a tolerance of 20% of the median annotated interval [1]_. Parameters ---------- detections : list or numpy array Detected beats. annotations : list or numpy array Annotated beats. tolerance : float, optional Evaluation tolerance (fraction of the median beat interval). Returns ------- pscore : float P-Score. Notes ----- Contrary to the original implementation which samples the two impulse trains with 100Hz, we do not quantise the annotations and detections but rather count all detections falling withing the defined tolerance window. References ---------- .. [1] M. McKinney, D. Moelants, M. Davies and A. Klapuri, "Evaluation of audio beat tracking and music tempo extraction algorithms", Journal of New Music Research, vol. 36, no. 1, 2007.
625941b3b830903b967e96c9
def calculate_euclidean_distance(out_dir, ref_set, params, fit_type, datasets, subset=None): <NEW_LINE> <INDENT> euclidean_base_csv = 'euclidean_distances_from_{}_datasets_{}'.format(len(datasets), fit_type) <NEW_LINE> if subset is not None: <NEW_LINE> <INDENT> euclidean_base_csv = euclidean_base_csv.rsplit('.')[0] + subset + '.csv' <NEW_LINE> <DEDENT> for residue, data in ref_set.iterrows(): <NEW_LINE> <INDENT> euclidean_csv='{}'.format(residue) + euclidean_base_csv <NEW_LINE> if not os.path.exists(os.path.join(out_dir,residue,euclidean_csv)): <NEW_LINE> <INDENT> start=time.time() <NEW_LINE> parameters_csv_filename = '{}_from_{}_datasets_{}.csv'.format( residue,len(datasets),fit_type) <NEW_LINE> fit_parameters=pandas.read_csv(os.path.join(out_dir,residue,parameters_csv_filename), index_col=0, header=0) <NEW_LINE> euclidean_distance=pandas.DataFrame(index=fit_parameters.index, columns=fit_parameters.index) <NEW_LINE> if subset == 'Amplitudes': <NEW_LINE> <INDENT> fit_parameters = fit_parameters[['a1','a2','a3']] <NEW_LINE> <DEDENT> elif subset == 'Amplitudes_Means': <NEW_LINE> <INDENT> fit_parameters = fit_parameters[['a1','a2','a3','mean_1','mean_2','mean_3']] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.warning('Incorrect identifier for subset of parameters on which to perform clustering') <NEW_LINE> <DEDENT> for dataset_col in fit_parameters.index: <NEW_LINE> <INDENT> for dataset_row in fit_parameters.index: <NEW_LINE> <INDENT> euclidean_distance.loc[dataset_col][dataset_row] = euclidean( fit_parameters.loc[dataset_col].values,fit_parameters.loc[dataset_row].values) <NEW_LINE> <DEDENT> <DEDENT> euclidean_distance.to_csv(os.path.join(out_dir,residue,euclidean_csv)) <NEW_LINE> end=time.time() <NEW_LINE> duration=end-start <NEW_LINE> logger.info('{}: Euclidean distance ' 'for {} datasets in ' '{} seconds'.format(residue, len(datasets), duration)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.info('{}: Euclidean distance already calculated for these {} datasets'.format(residue,len(datasets))) <NEW_LINE> <DEDENT> <DEDENT> return euclidean_base_csv
Calculates and store pairwise euclidean distances
625941b37c178a314d6ef208
@log_func <NEW_LINE> def ProposeClassView(request): <NEW_LINE> <INDENT> if request.method == 'POST': <NEW_LINE> <INDENT> form = ClassProposalForm(request.POST) <NEW_LINE> user_message = UserMessage.objects.get_or_create( view='ProposeClassView', code="SUBMIT_SUCCESS", defaults={ 'summary': "Class Proposal Success", 'description': default_propose_submit_msg}) <NEW_LINE> if form.is_valid(): <NEW_LINE> <INDENT> form.save() <NEW_LINE> messages.success(request, user_message[0].description) <NEW_LINE> return HttpResponseRedirect(reverse('home', urlconf='gbe.urls')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> template = loader.get_template('gbe/class_proposal.tmpl') <NEW_LINE> context = RequestContext(request, {'form': form}) <NEW_LINE> return HttpResponse(template.render(context)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> form = ClassProposalForm() <NEW_LINE> template = loader.get_template('gbe/class_proposal.tmpl') <NEW_LINE> context = RequestContext(request, {'form': form}) <NEW_LINE> return HttpResponse(template.render(context))
Handle suggestions for classes from the great unwashed
625941b3507cdc57c6306a82
def fileSyntaxCheck(fileInfo, timeoutInterval=20): <NEW_LINE> <INDENT> return hdlConvertor.parse(fileInfo.fileName, fileInfo.lang)
Perform syntax check on whole file
625941b3046cf37aa974cafb
def __init__(self, word_to_idx, mode='train', features_L=196,features_D=512, dim_embed=512, dim_hidden=1024, n_time_step=16, alpha_c=0.0, lstm_dropout_keep_prob=0.5): <NEW_LINE> <INDENT> assert mode in ["train", "eval"] <NEW_LINE> self.word_to_idx = word_to_idx <NEW_LINE> self.vocab_size = len(word_to_idx) <NEW_LINE> self.mode = mode <NEW_LINE> self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()} <NEW_LINE> self.alpha_c = alpha_c <NEW_LINE> self.lstm_dropout_keep_prob = lstm_dropout_keep_prob <NEW_LINE> self.V = len(word_to_idx) <NEW_LINE> self.L = features_L <NEW_LINE> self.D = features_D <NEW_LINE> self.M = dim_embed <NEW_LINE> self.H = dim_hidden <NEW_LINE> self.T = n_time_step <NEW_LINE> self._start = word_to_idx['<START>'] <NEW_LINE> self._null = word_to_idx['<NULL>'] <NEW_LINE> self.global_step = 0 <NEW_LINE> self.weight_initializer = tf.random_uniform_initializer(minval=-0.08, maxval=0.08) <NEW_LINE> self.const_initializer = tf.constant_initializer(0.0) <NEW_LINE> self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0) <NEW_LINE> self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) <NEW_LINE> self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
Args: word_to_idx: word-to-index mapping dictionary. mode: train or evaluation features_L, features_D: (optional) Dimension of vggnet19 conv5_3 feature vectors. dim_embed: (optional) Dimension of word embedding. dim_hidden: (optional) Dimension of all hidden state. n_time_step: (optional) Time step size of LSTM. alpha_c: (optional) Doubly stochastic regularization coefficient. (see Section (4.2.1) for explanation) lstm_dropout_keep_prob: (optional) The possibility a hidden layer to be kept.
625941b37d43ff24873a2a54
def __init__(self, top, bot): <NEW_LINE> <INDENT> self.top = top <NEW_LINE> self.bot = bot
initializer for RationalFraction instance. top and bot are Polynomial object representing numerator and denominator
625941b391af0d3eaac9b7c3
def write_image_fluxes(self): <NEW_LINE> <INDENT> log.info("Writing image fluxes ...") <NEW_LINE> self.images_fluxes.saveto(self.images_fluxes_filepath)
This function ... :return:
625941b3d164cc6175782afe
def InsertListCtrlItem(self, index, value, item): <NEW_LINE> <INDENT> i = self.__id(item) <NEW_LINE> some_long = self.InsertStringItem(index, value) <NEW_LINE> gItem = self.GetItem(index) <NEW_LINE> gItem.SetData(i) <NEW_LINE> self.SetItem(gItem) <NEW_LINE> return some_long
Insert an item to the list control giving it an internal id.
625941b399cbb53fe6792998
def hash_password(plain_password): <NEW_LINE> <INDENT> return pwd_context.hash(plain_password)
Hash the password, using the pre-configured CryptContext.
625941b32c8b7c6e89b3557c
def get(self, key): <NEW_LINE> <INDENT> if key not in self.key_map: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.touch(key) <NEW_LINE> return self.val_map[key]
:type key: int :rtype: int
625941b330dc7b766590171c
def unshift(self): <NEW_LINE> <INDENT> if self.begin is None: <NEW_LINE> <INDENT> firstvalue = None <NEW_LINE> <DEDENT> elif self.begin == self.end: <NEW_LINE> <INDENT> firstvalue = self.end.value <NEW_LINE> self.begin = None <NEW_LINE> self.end = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> firstvalue = self.begin.value <NEW_LINE> self.begin = self.begin.next <NEW_LINE> self.begin.prev = None <NEW_LINE> <DEDENT> return firstvalue
Removes the first item (from begin) and returns it.
625941b3d58c6744b4257a11
def datetime_to_seconds(dt): <NEW_LINE> <INDENT> return dt.timestamp()
Name this, just because its confusing.
625941b3a4f1c619b28afdfc
def is_bool(value: Union[str, bool]) -> bool: <NEW_LINE> <INDENT> return (value in true_list + false_list) or (isinstance(value, bool))
validate whether the value is boolean
625941b3cb5e8a47e48b7869
def create_carrier_proposed_assignment(self, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> return self.create_carrier_proposed_assignment_with_http_info(**kwargs)
Create an assignment # noqa: E501 Creates a new assignment that a driver can later use. Each driver can only have one future assignment. **Submit Feedback**: Likes, dislikes, and API feature requests should be filed as feedback in our <a href="https://forms.gle/zkD4NCH7HjKb7mm69" target="_blank">API feedback form</a>. If you encountered an issue or noticed inaccuracies in the API documentation, please <a href="https://www.samsara.com/help" target="_blank">submit a case</a> to our support team. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_carrier_proposed_assignment(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param CreateCarrierProposedAssignmentRequest carrier_proposed_assignment: The assignment to create. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: CarrierProposedAssignmentResponse If the method is called asynchronously, returns the request thread.
625941b3287bf620b61d3822
@celery.task(bind=True, ignore_results=True, base=AttributeManager) <NEW_LINE> def update_attributes(self: AttributeManager, app_name: str, user_id: str) -> None: <NEW_LINE> <INDENT> warnings.warn("This function will be removed. Use update_attributes_keep_result instead.", DeprecationWarning) <NEW_LINE> _update_attributes(self, app_name, user_id)
Task executing on the Celery worker service as an RPC called from the different eduID applications. :param self: base class :param app_name: calling application name, like 'eduid_signup' :param user_id: id for the user that has been updated by the calling application
625941b301c39578d7e74bf5
def get_loaded_modules(): <NEW_LINE> <INDENT> return " ".join(os.environ.get("LOADEDMODULES", "").split(":"))
Returns a space separated list of loaded modules These are modules loaded by the environment-modules system. This function just looks in the LOADEDMODULES environment variable for the list.
625941b3fff4ab517eb2f1e9
def filter_old_data(filename): <NEW_LINE> <INDENT> dataframe = pd.read_csv(filename) <NEW_LINE> to_remove = set([dt for dt in dataframe.date_and_time if flip_dates(dt) < deadline_time(6)]) <NEW_LINE> for dt in to_remove: dataframe = dataframe[dataframe.date_and_time != dt] <NEW_LINE> dataframe.to_csv(path_or_buf="services/frontend_files/recent_severe_crimes.csv", sep=",", index=False)
Removes entries from the csv that occur before the threshhold date :param filename: file to remove entries from
625941b3eab8aa0e5d26d90f
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): <NEW_LINE> <INDENT> if normalize: <NEW_LINE> <INDENT> cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] <NEW_LINE> print("Normalized confusion matrix") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Confusion matrix, without normalization') <NEW_LINE> <DEDENT> print(cm) <NEW_LINE> plt.imshow(cm, interpolation='nearest', cmap=cmap) <NEW_LINE> plt.title(title) <NEW_LINE> plt.colorbar() <NEW_LINE> tick_marks = np.arange(len(classes)) <NEW_LINE> plt.xticks(tick_marks, classes, rotation=45) <NEW_LINE> plt.yticks(tick_marks, classes) <NEW_LINE> fmt = '.2f' if normalize else 'd' <NEW_LINE> thresh = cm.max() / 2. <NEW_LINE> for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): <NEW_LINE> <INDENT> plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") <NEW_LINE> <DEDENT> plt.tight_layout() <NEW_LINE> plt.ylabel('True label') <NEW_LINE> plt.xlabel('Predicted label') <NEW_LINE> plt.savefig('Confusion_Matrix_' + '.png') <NEW_LINE> plt.close()
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. Input - cm : 计算出的混淆矩阵的值 - classes : 混淆矩阵中每一行每一列对应的列 - normalize : True:显示百分比, False:显示个数
625941b33539df3088e2e0fc
def create_test_data(user=None): <NEW_LINE> <INDENT> SiteFactory() <NEW_LINE> UserFactory.create_batch(size=10)
Create test data.
625941b394891a1f4081b859
def set_trigger(self): <NEW_LINE> <INDENT> ret = self.uEyeDll.is_SetExternalTrigger(self.cam, c_uint(IS_SET_TRIGGER_SOFTWARE)) <NEW_LINE> if ret == IS_SUCCESS: <NEW_LINE> <INDENT> self.logger.info("Successfully set software trigger") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.logger.error("Failed to set software trigger; error code: " + str(ret)) <NEW_LINE> return
Wrapped call to set trigger type to software trigger.
625941b3462c4b4f79d1d481
def split(l, idx): <NEW_LINE> <INDENT> if isinstance(idx, numbers.Integral): <NEW_LINE> <INDENT> idx = [idx] <NEW_LINE> <DEDENT> if idx: <NEW_LINE> <INDENT> idx = [0, *sorted(idx), len(l)] <NEW_LINE> for i, j in mit.pairwise(idx): <NEW_LINE> <INDENT> yield l[i:j] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> yield l
Split a list into sub-lists at the given indices
625941b3adb09d7d5db6c545
def test_generate_orphans_by_type_with_unit_keys(self): <NEW_LINE> <INDENT> unit_1 = gen_content_unit(PHONY_TYPE_1.id, self.content_root) <NEW_LINE> gen_content_unit(PHONY_TYPE_2.id, self.content_root) <NEW_LINE> results = list(self.orphan_manager.generate_orphans_by_type_with_unit_keys(PHONY_TYPE_1.id)) <NEW_LINE> self.assertEqual(1, len(results)) <NEW_LINE> self.assertEqual(unit_1['_content_type_id'], results[0]['_content_type_id'])
Assert that orphans are retrieved by type with unit keys correctly
625941b3ad47b63b2c509d3e
def __init__(self, item): <NEW_LINE> <INDENT> self.__item = item <NEW_LINE> self.itemID = item.ID if item is not None else None <NEW_LINE> self.amount = 0 <NEW_LINE> self.__itemModifiedAttributes = ModifiedAttributeDict() <NEW_LINE> self.__itemModifiedAttributes.original = item.attributes <NEW_LINE> self.__itemModifiedAttributes.overrides = item.overrides
Initialize cargo from the program
625941b3b57a9660fec33630
def test_transcribe_dna_to_rna(): <NEW_LINE> <INDENT> assert 'GAUGGAACUUGACUACGUAAAUU' == transcribe_dna_to_rna('GATGGAACTTGACTACGTAAATT')
Test for transcribe_dna_to_rna.
625941b38c0ade5d55d3e770
def endomorphism_ring(self, category=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.__endomorphism_ring <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> self.__endomorphism_ring = homspace.EndomorphismSubring(self, category=category) <NEW_LINE> return self.__endomorphism_ring
Return the endomorphism ring of self. OUTPUT: b = self.sturm_bound() EXAMPLES: We compute a few endomorphism rings:: sage: from sage_modabvar import J0 sage: J0(11).endomorphism_ring() Endomorphism ring of Abelian variety J0(11) of dimension 1 sage: J0(37).endomorphism_ring() Endomorphism ring of Abelian variety J0(37) of dimension 2 sage: J0(33)[2].endomorphism_ring() Endomorphism ring of Simple abelian subvariety 33a(1,33) of dimension 1 of J0(33) No real computation is done:: sage: from sage_modabvar import J1 sage: J1(123456).endomorphism_ring() Endomorphism ring of Abelian variety J1(123456) of dimension 423185857
625941b3097d151d1a222c15
def num_processes(tree, fn): <NEW_LINE> <INDENT> c = Counter() <NEW_LINE> for proc in tree.iter('process'): <NEW_LINE> <INDENT> c['num_processes'] += 1 <NEW_LINE> <DEDENT> return c
gets the number of processes in the exe
625941b3a79ad161976cbef7
def is_valid_type(self, filepath): <NEW_LINE> <INDENT> if len(self._validators) == 0: <NEW_LINE> <INDENT> validators = [self.default_validator, self.not_repo] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> validators = self._validators + [self.not_repo] <NEW_LINE> <DEDENT> for validator in validators: <NEW_LINE> <INDENT> if not validator(filepath): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
Returns True if the given filepath is a valid watchable filetype. The filepath can be assumed to be a file (not a directory).
625941b36e29344779a623c9
def getShortDescription(self, test, room): <NEW_LINE> <INDENT> room -= 1 <NEW_LINE> s = str(test) <NEW_LINE> if len(s) > room: <NEW_LINE> <INDENT> pos = s.find(" (") <NEW_LINE> if pos >= 0: <NEW_LINE> <INDENT> w = room - (pos + 5) <NEW_LINE> if w < 1: <NEW_LINE> <INDENT> s = s[:room-3] + "..." <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pre = s[:pos+2] <NEW_LINE> post = s[-w:] <NEW_LINE> s = "%s...%s" % (pre, post) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> w = room - 4 <NEW_LINE> s = '... ' + s[-w:] <NEW_LINE> <DEDENT> <DEDENT> return ' ' + s[:room]
Return a description of a test that fits in ``room`` characters.
625941b34e696a04525c9207
def consume_attribute_type_information(line: str) -> Tuple[Optional[str], Optional[Tuple[Union[str, List[str]]]], str]: <NEW_LINE> <INDENT> attribute_type, line = consume(line, ATTRIBUTE_TYPE_PATTERN) <NEW_LINE> if attribute_type is None: <NEW_LINE> <INDENT> return None, None, line <NEW_LINE> <DEDENT> additional = None <NEW_LINE> if attribute_type.startswith('{'): <NEW_LINE> <INDENT> additional = parse_nominal_values(attribute_type) <NEW_LINE> attribute_type = constants.NOMINAL_ATTRIBUTE_KEYWORD <NEW_LINE> <DEDENT> attribute_type = attribute_type.lower() <NEW_LINE> if attribute_type == constants.DATE_ATTRIBUTE_KEYWORD: <NEW_LINE> <INDENT> additional, line = consume(line, DATE_FORMAT_PATTERN) <NEW_LINE> <DEDENT> elif attribute_type == constants.REAL_ATTRIBUTE_KEYWORD or attribute_type == constants.INTEGER_ATTRIBUTE_KEYWORD: <NEW_LINE> <INDENT> additional = attribute_type <NEW_LINE> attribute_type = constants.NUMERIC_ATTRIBUTE_KEYWORD <NEW_LINE> <DEDENT> return attribute_type, (additional,) if additional is not None else tuple(), line
Extracts the type of an attribute from the beginning of a line. :param line: The line to extract the attribute type from. :return: The attribute type, any additional type information, and the remainder of the line.
625941b30383005118ecf396
def parse_ggt_format(data_dir): <NEW_LINE> <INDENT> gt_fnames = glob.glob(osp.join(data_dir,'*.json')) <NEW_LINE> id_match = re.compile('.*?([\d]+_*[\d]*).*') <NEW_LINE> gt = {} <NEW_LINE> for gt_fn in gt_fnames: <NEW_LINE> <INDENT> im_id = id_match.match(osp.basename(gt_fn)).groups()[0] <NEW_LINE> gt_dat = [] <NEW_LINE> try: <NEW_LINE> <INDENT> with open(gt_fn,'r') as f: <NEW_LINE> <INDENT> gt_dat = json.load(f) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> gt[im_id] = (np.empty((5,0)),[]) <NEW_LINE> <DEDENT> bbs, txt = np.zeros((5,0)), [] <NEW_LINE> for i in xrange(1,len(gt_dat)): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> i_gt = gt_dat[i] <NEW_LINE> if isinstance(i_gt['boundingPoly'],list): <NEW_LINE> <INDENT> verts = [i_gt['boundingPoly'][i]['vertices'] for i in xrange(len(i_gt['boundingPoly']))] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> verts = i_gt['boundingPoly']['vertices'] <NEW_LINE> <DEDENT> x1y1 = np.array([[verts[0]['x']],[verts[0]['y']]]) <NEW_LINE> x2y2 = np.array([[verts[2]['x']],[verts[2]['y']]]) <NEW_LINE> wh = x2y2 - x1y1 <NEW_LINE> try: <NEW_LINE> <INDENT> score = i_gt['score'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> score = 1 <NEW_LINE> <DEDENT> bbs = np.c_[bbs, np.r_[x1y1,wh,[[score]]]] <NEW_LINE> txt.append(i_gt['description']) <NEW_LINE> gt[im_id] = (bbs,txt) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return gt
Parses Google OCR json and returns the ground-truth in standard format: image-id --> [5xn, [text_1,text_2,..., text_n]] dict. Where 5xn is a matrix for n boxes, with first four numbers being x,y,w,h and the 5th being the "score".
625941b3e5267d203edcda53
def __pingBack(ircMsg): <NEW_LINE> <INDENT> commands = [] <NEW_LINE> nick = ircMsg.net.findNick(ircMsg.src) <NEW_LINE> if nick.pingOut > 0: <NEW_LINE> <INDENT> lagTime = time.time() - nick.pingOut <NEW_LINE> lagTime = round(lagTime, 4) <NEW_LINE> commands.append( "PRIVMSG " + nick.pingDest + " :Your current ping is " + str(lagTime) + " seconds " + ircMsg.src + ".") <NEW_LINE> nick.pingOut = 0 <NEW_LINE> nick.pingDest = None <NEW_LINE> <DEDENT> return commands
Processes a return ping, relays the lag back.
625941b326238365f5f0ec1a
def get_or_compute_grads(loss_or_grads, params): <NEW_LINE> <INDENT> if any(not isinstance(p, theano.compile.SharedVariable) for p in params): <NEW_LINE> <INDENT> raise ValueError("params must contain shared variables only. If it " "contains arbitrary parameter expressions, then " "lasagne.utils.collect_shared_vars() may help you.") <NEW_LINE> <DEDENT> if isinstance(loss_or_grads, list): <NEW_LINE> <INDENT> if not len(loss_or_grads) == len(params): <NEW_LINE> <INDENT> raise ValueError("Got %d gradient expressions for %d parameters" % (len(loss_or_grads), len(params))) <NEW_LINE> <DEDENT> return loss_or_grads <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return theano.grad(loss_or_grads, params, disconnected_inputs='warn')
Helper function returning a list of gradients Parameters ---------- loss_or_grads : symbolic expression or list of expressions A scalar loss expression, or a list of gradient expressions params : list of shared variables The variables to return the gradients for Returns ------- list of expressions If `loss_or_grads` is a list, it is assumed to be a list of gradients and returned as is, unless it does not match the length of `params`, in which case a `ValueError` is raised. Otherwise, `loss_or_grads` is assumed to be a cost expression and the function returns `theano.grad(loss_or_grads, params)`. Raises ------ ValueError If `loss_or_grads` is a list of a different length than `params`, or if any element of `params` is not a shared variable (while we could still compute its gradient, we can never update it and want to fail early).
625941b3dd821e528d63af63
def postproc(self): <NEW_LINE> <INDENT> return True
post-processing
625941b35f7d997b8717484d
def __init__(self, data=None, local_vars_configuration=None): <NEW_LINE> <INDENT> if local_vars_configuration is None: <NEW_LINE> <INDENT> local_vars_configuration = Configuration.get_default_copy() <NEW_LINE> <DEDENT> self.local_vars_configuration = local_vars_configuration <NEW_LINE> self._data = None <NEW_LINE> self.discriminator = None <NEW_LINE> if data is not None: <NEW_LINE> <INDENT> self.data = data
GetLiveStreamPlaybackIDResponse - a model defined in OpenAPI
625941b34527f215b584c214
def __init__(self, primary_smtp_address, fullname=None, access_type=None, autodiscover=False, credentials=None, config=None, locale=None, default_timezone=None): <NEW_LINE> <INDENT> if '@' not in primary_smtp_address: <NEW_LINE> <INDENT> raise ValueError("primary_smtp_address '%s' is not an email address" % primary_smtp_address) <NEW_LINE> <DEDENT> self.primary_smtp_address = primary_smtp_address <NEW_LINE> self.fullname = fullname <NEW_LINE> try: <NEW_LINE> <INDENT> self.locale = locale or getlocale()[0] or None <NEW_LINE> <DEDENT> except ValueError as e: <NEW_LINE> <INDENT> log.warning('Failed to get locale (%s)' % e) <NEW_LINE> self.locale = None <NEW_LINE> <DEDENT> if self.locale is not None: <NEW_LINE> <INDENT> if not isinstance(self.locale, string_types): <NEW_LINE> <INDENT> raise ValueError("Expected 'locale' to be a string, got %s" % self.locale) <NEW_LINE> <DEDENT> <DEDENT> self.access_type = access_type or (DELEGATE if credentials else IMPERSONATION) <NEW_LINE> if self.access_type not in ACCESS_TYPES: <NEW_LINE> <INDENT> raise ValueError("'access_type' %s must be one of %s" % (self.access_type, ACCESS_TYPES)) <NEW_LINE> <DEDENT> if autodiscover: <NEW_LINE> <INDENT> if not credentials: <NEW_LINE> <INDENT> raise AttributeError('autodiscover requires credentials') <NEW_LINE> <DEDENT> if config: <NEW_LINE> <INDENT> raise AttributeError('config is ignored when autodiscover is active') <NEW_LINE> <DEDENT> self.primary_smtp_address, self.protocol = discover(email=self.primary_smtp_address, credentials=credentials) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if not config: <NEW_LINE> <INDENT> raise AttributeError('non-autodiscover requires a config') <NEW_LINE> <DEDENT> self.protocol = config.protocol <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.default_timezone = default_timezone or EWSTimeZone.localzone() <NEW_LINE> <DEDENT> except (ValueError, UnknownTimeZone) as e: <NEW_LINE> <INDENT> log.warning('%s. Fallback to UTC', e.args[0]) <NEW_LINE> self.default_timezone = UTC <NEW_LINE> <DEDENT> if not isinstance(self.default_timezone, EWSTimeZone): <NEW_LINE> <INDENT> raise ValueError("Expected 'default_timezone' to be an EWSTimeZone, got %s" % self.default_timezone) <NEW_LINE> <DEDENT> self.version = self.protocol.version <NEW_LINE> try: <NEW_LINE> <INDENT> self.root = Root.get_distinguished(account=self) <NEW_LINE> <DEDENT> except ErrorAccessDenied: <NEW_LINE> <INDENT> log.warning('Access denied to root folder') <NEW_LINE> self.root = Root(account=self) <NEW_LINE> <DEDENT> if not isinstance(self.protocol, Protocol): <NEW_LINE> <INDENT> raise ValueError("Expected 'protocol' to be a Protocol, got %s" % self.protocol) <NEW_LINE> <DEDENT> log.debug('Added account: %s', self)
:param primary_smtp_address: The primary email address associated with the account on the Exchange server :param fullname: The full name of the account. Optional. :param access_type: The access type granted to 'credentials' for this account. Valid options are 'delegate' (default) and 'impersonation'. :param autodiscover: Whether to look up the EWS endpoint automatically using the autodiscover protocol. :param credentials: A Credentials object containing valid credentials for this account. :param config: A Configuration object containing EWS endpoint information. Required if autodiscover is disabled :param locale: The locale of the user, e.g. 'en_US'. Defaults to the locale of the host, if available. :param default_timezone: EWS may return some datetime values without timezone information. In this case, we will assume values to be in the provided timezone. Defaults to the timezone of the host.
625941b3507cdc57c6306a83
def _getRoutingSrcPortDst(self, G): <NEW_LINE> <INDENT> node_port_dst = {} <NEW_LINE> for node in G: <NEW_LINE> <INDENT> port_dst = {} <NEW_LINE> node_port_dst[node] = port_dst <NEW_LINE> for destination in G[node].keys(): <NEW_LINE> <INDENT> port = G[node][destination][0]['port'] <NEW_LINE> node_port_dst[node][port] = destination <NEW_LINE> <DEDENT> <DEDENT> return(node_port_dst)
Return a dictionary of dictionaries with the format: node_port_dst[node][port] = next_node Parameters ---------- G : TYPE DESCRIPTION. Returns ------- None.
625941b38da39b475bd64d28
def reverse(dim, R): <NEW_LINE> <INDENT> p = numpy.zeros(dim*(dim+1)/2) <NEW_LINE> k=0 <NEW_LINE> for i in range(dim): <NEW_LINE> <INDENT> for j in range(0,i+1): <NEW_LINE> <INDENT> p[k] = R[i,j] <NEW_LINE> k +=1 <NEW_LINE> <DEDENT> <DEDENT> return p
Transforms a symmetric matrix into a vector containig the lower triangle :param R: the symmetric matrix :return: the vector
625941b3596a89723608987d
def test_multiple(labels, decisions, tests=('ztest', 'fisher', 'chi2', 'BF', 'prop'), display=False): <NEW_LINE> <INDENT> decisions = boolean_array(decisions) <NEW_LINE> crosstab = pd.crosstab(pd.Series(labels), pd.Series(decisions)) <NEW_LINE> crosstab = crosstab.values <NEW_LINE> tb_crosstab = top_bottom_crosstab(labels, decisions) <NEW_LINE> results = {} <NEW_LINE> if 'ztest' in tests: <NEW_LINE> <INDENT> results['z_score'] = crosstab_ztest(tb_crosstab) <NEW_LINE> <DEDENT> if 'fisher' in tests: <NEW_LINE> <INDENT> results['fisher_p'] = fisher_exact(tb_crosstab)[:2] <NEW_LINE> <DEDENT> if 'chi2' in tests: <NEW_LINE> <INDENT> results['chi2_p'] = chi2_contingency(crosstab)[:2] <NEW_LINE> <DEDENT> if 'BF' in tests: <NEW_LINE> <INDENT> results['BF'] = crosstab_bayes_factor(crosstab) <NEW_LINE> <DEDENT> if 'prop' in tests: <NEW_LINE> <INDENT> results['prop'] = min(proportion_test(labels, decisions)) <NEW_LINE> <DEDENT> if display: <NEW_LINE> <INDENT> for key in results: <NEW_LINE> <INDENT> print("{}: {}".format(key, results[key])) <NEW_LINE> <DEDENT> <DEDENT> return results
Function that returns p_values for z-score, fisher exact, and chi2 test of 2x2 crosstab of passing rate by labels and decisions See docs for z_test_ctabs, fisher_exact, chi2_contingency and bf_ctabs for details of specific tests Parameters ---------- labels : array_like categorical labels for each corresponding value of `decision` ie. M/F decisions : array_like binary decision values, ie. True/False or 0/1 tests : list a list of strings specifying the tests to run, valid options are 'ztest', 'fisher', 'chi2' and 'bayes'. Defaults to all four. -ztest: p-value for two-sided z-score for proportions -fisher: p-value for Fisher's exact test for proportions -chi2: p-value for chi-squared test of independence for proportions -bayes: bayes factor for independence assuming uniform prior -prop: proportion of lowest to highest passing rates by group display : bool print the results of each test in addition to returning them Returns ------- results : dict dictionary of values, one for each test. Valid keys are: 'z_score', 'fisher_p', 'chi2_p', 'BF', and 'prop' Examples -------- >>> # no real difference between groups >>> labels = ['group1']*100 + ['group2']*100 + ['group3']*100 >>> decisions = [1,0,0]*100 >>> all_test_ctabs(dependent_ctabs) (0.0, 1.0, 1.0, 0.26162148804907587) >>> # massively biased ratio of hits/misses by group >>> ind_ctabs = np.array([[75,50],[25,50]]) >>> all_test_ctabs(ind_ctabs) (-3.651483716701106, 0.0004203304586999487, 0.0004558800052056139, 202.95548692414306) >>> # correcting with a biased prior >>> biased_prior = np.array([[5,10],[70,10]]) >>> all_test_ctabs(ind_ctabs, biased_prior) (-3.651483716701106, 0.0004203304586999487, 0.0004558800052056139, 0.00012159518854984268)
625941b385dfad0860c3ac0a
def predict_trajectory(x_init, v, y, config): <NEW_LINE> <INDENT> x = np.array(x_init) <NEW_LINE> traj = np.array(x) <NEW_LINE> time = 0 <NEW_LINE> while time <= config.predict_time: <NEW_LINE> <INDENT> x = motion(x, [v, y], config.dt) <NEW_LINE> traj = np.vstack((traj, x)) <NEW_LINE> time += config.dt <NEW_LINE> <DEDENT> return traj
predict trajectory with an input 轨迹产生的原理是,利用motion函数,基于当前机器人的状态x,在未来一段时间内(predict_time)产生一串状态x的序列
625941b394891a1f4081b85a
def single_view(self,request): <NEW_LINE> <INDENT> if request.method == 'GET': <NEW_LINE> <INDENT> form = SingleModelForm() <NEW_LINE> return render(request,'single_view.html',{'form':form}) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> form = SingleModelForm(request.POST) <NEW_LINE> if form.is_valid(): <NEW_LINE> <INDENT> sale_id = AutoSale.get_sale_id() <NEW_LINE> if not sale_id: <NEW_LINE> <INDENT> return HttpResponse('没有销售,无法进行自动分配') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> with transaction.atomic(): <NEW_LINE> <INDENT> form.instance.consultant_id = sale_id <NEW_LINE> form.instance.recv_date = datetime.datetime.now().date() <NEW_LINE> newcustomer_obj = form.save() <NEW_LINE> models.CustomerDistribution.objects.create(user_id=sale_id, customer=newcustomer_obj.id,memo='系统分配') <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> AutoSale.rollback(sale_id) <NEW_LINE> return HttpResponse('录入异常') <NEW_LINE> <DEDENT> return HttpResponse('录入成功') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(form.errors) <NEW_LINE> return render(request,'single_view.html',{'form':form})
单条录入 :param request: :return:
625941b360cbc95b062c62fb
def addCounts(countDict, fname): <NEW_LINE> <INDENT> logging.debug("Parsing %s" % fname) <NEW_LINE> for line in open(fname): <NEW_LINE> <INDENT> line = line.strip() <NEW_LINE> fields = line.split("\t") <NEW_LINE> if len(fields)!=2: <NEW_LINE> <INDENT> logging.error("Count line %s does not contain two fields" % repr(line)) <NEW_LINE> continue <NEW_LINE> <DEDENT> id, count = fields <NEW_LINE> count = int(count) <NEW_LINE> countDict[id]+=count <NEW_LINE> <DEDENT> return countDict
parse line of file with format <id>tab<count>, add counts to dict, return dict
625941b3187af65679ca4ed7
def has_in(self): <NEW_LINE> <INDENT> return self._idx < len(self.stdin)
Return true if input queue has data
625941b3baa26c4b54cb0ed6
def file2groupname(filename, slen=5, symtable=None): <NEW_LINE> <INDENT> def randstr(n): <NEW_LINE> <INDENT> return ''.join([chr(random.randint(97, 122)) for i in range(n)]) <NEW_LINE> <DEDENT> gname = fix_varname(filename).lower() + randstr(slen) <NEW_LINE> if '_' in gname: <NEW_LINE> <INDENT> gname = gname.replace('_', '') <NEW_LINE> gname = fix_varname(gname) <NEW_LINE> <DEDENT> fmt, count, maxcount = "%s{:04d}", 1, 999 <NEW_LINE> fstr = fmt % (gname[:slen]) <NEW_LINE> gname = fstr.format(count) <NEW_LINE> if symtable is not None: <NEW_LINE> <INDENT> scount = 0 <NEW_LINE> while hasattr(symtable, gname): <NEW_LINE> <INDENT> count += 1 <NEW_LINE> if count > maxcount: <NEW_LINE> <INDENT> scount += 1 <NEW_LINE> count = 1 <NEW_LINE> fstr = fmt % randstr(slen) <NEW_LINE> <DEDENT> gname = fstr.format(count) <NEW_LINE> if scount > 1000: <NEW_LINE> <INDENT> raise ValueError("exhausted unique group names") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return gname
create a group name based of filename the group name will have a string component of length slen followed by a 4 digit number Arguments --------- filename (str) filename to use slen (int) length of string portion (default 5) symtable (None or larch symbol table) symbol table for checking that the group name is unique
625941b30a50d4780f666c41
def create_sriov_logicalport(self, ip, logicalpartition_uuid, x_api_session): <NEW_LINE> <INDENT> log.log_debug("starting SRIOV LogicalPort creation") <NEW_LINE> header_object = HmcHeaders.HmcHeaders("web") <NEW_LINE> ns = header_object.ns["xmlns"] <NEW_LINE> sriov_logical_port_object = UOM.SRIOVEthernetLogicalPort() <NEW_LINE> sriov_logical_port_object.AdapterID = ADAPTER_ID <NEW_LINE> sriov_logical_port_object.PhysicalPortID = PHYSICALPORT_ID <NEW_LINE> sriov_logical_port_object.schemaVersion = SCHEMA_VER <NEW_LINE> xml = sriov_logical_port_object.toxml() <NEW_LINE> http_object = HTTPClient.HTTPClient("uom", ip, self.root, self.content_type, x_api_session) <NEW_LINE> http_object.HTTPPut(xml, append = logicalpartition_uuid+"/SRIOVEthernetLogicalPort") <NEW_LINE> log.log_debug("response of SRIOV logical port creation %s"%(http_object.response)) <NEW_LINE> if http_object.response_b: <NEW_LINE> <INDENT> print("SRIOV Logical Port created successfully") <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> root = etree.fromstring(http_object.response.content) <NEW_LINE> error = root.findall(".//{%s}Message"%(ns))[0] <NEW_LINE> log.log_error(error.text)
Creates SRIOV Logical Port for a given LogicaPartition Args: ip:ip address of hmc logicalpartition_uuid : UUID of partition the LoicalPort to be created x_api_session :session to be used
625941b363b5f9789fde6e97
@app.route("/register", methods=["GET", "POST"]) <NEW_LINE> def register(): <NEW_LINE> <INDENT> if request.method == "POST": <NEW_LINE> <INDENT> if request.form.get("password") != request.form.get("confirm-password"): <NEW_LINE> <INDENT> return render_template("register.html", nomatch=1) <NEW_LINE> <DEDENT> rows = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username")) <NEW_LINE> if len(rows): <NEW_LINE> <INDENT> return render_template("register.html", usernametaken=1) <NEW_LINE> <DEDENT> db.execute("INSERT INTO users (username, hash) VALUES (:username, :hash)", username=request.form.get( "username"), hash=generate_password_hash(request.form.get("password"))) <NEW_LINE> return redirect("/login") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return render_template("register.html")
Register user
625941b338b623060ff0aba8
def loss(self, logits, labels): <NEW_LINE> <INDENT> with tf.name_scope('loss'): <NEW_LINE> <INDENT> logits = tf.reshape(logits, (-1, 2)) <NEW_LINE> shape = [logits.get_shape()[0], 2] <NEW_LINE> epsilon = tf.constant(value=1e-8, shape=shape) <NEW_LINE> logits = logits + epsilon <NEW_LINE> labels = tf.to_float(tf.reshape(labels, (-1, 2))) <NEW_LINE> softmax = tf.nn.softmax(logits) <NEW_LINE> cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), reduction_indices=[1]) <NEW_LINE> cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean') <NEW_LINE> tf.add_to_collection('losses', cross_entropy_mean) <NEW_LINE> loss = tf.add_n(tf.get_collection('losses'), name='total_loss') <NEW_LINE> <DEDENT> return loss
Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, 2]. labels: Labels tensor, int32 - [batch_size, 2]. Returns: loss: Loss tensor of type float.
625941b36fece00bbac2d4ed
def register_observation(self, obs: Observation): <NEW_LINE> <INDENT> obs.client = self._client <NEW_LINE> lbl_len = len(obs.labels) <NEW_LINE> obs_space_len = len(obs.observation_space.low) <NEW_LINE> obs_len = obs.compute().size <NEW_LINE> if lbl_len != obs_space_len: <NEW_LINE> <INDENT> raise ValueError('Labels have length {} != obs space len {}'.format( lbl_len, obs_space_len)) <NEW_LINE> <DEDENT> if lbl_len != obs_len: <NEW_LINE> <INDENT> raise ValueError('Labels have length {} != obs len {}'.format( lbl_len, obs_len)) <NEW_LINE> <DEDENT> self._observations.append(obs)
Add an observation to be computed. Args: obs (Observation): Observation to be tracked.
625941b3dc8b845886cb52e6
def _select_polarization (self,value): <NEW_LINE> <INDENT> self.ms_corr_names = value.split(" "); <NEW_LINE> ncorr = len(self.ms_corr_names); <NEW_LINE> if ncorr < 2: <NEW_LINE> <INDENT> corrlist = [self._corr_1, self._corr_1_2x2]; <NEW_LINE> <DEDENT> elif ncorr < 4: <NEW_LINE> <INDENT> corrlist = [self._corr_2,self._corr_1, self._corr_1_2x2]; <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> corrlist = [self._corr_2x2,self._corr_2x2_diag,self._corr_2,self._corr_1, self._corr_1_2x2]; <NEW_LINE> <DEDENT> self.corrsel_option.set_option_list(corrlist);
Called when the polarization representation is selected
625941b345492302aab5e072
def get_item_type(self, xml_str: bytes): <NEW_LINE> <INDENT> root = Etree.fromstring(xml_str) <NEW_LINE> for x in root.findall('entry'): <NEW_LINE> <INDENT> if x.get('key') == "type": <NEW_LINE> <INDENT> raw_type = x.text <NEW_LINE> <DEDENT> <DEDENT> if raw_type in SUPPORTED_ELEMENT_TYPES: <NEW_LINE> <INDENT> if raw_type == 'ScriptModule': <NEW_LINE> <INDENT> return "Action" <NEW_LINE> <DEDENT> return raw_type <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.warning("Unsupported element type for item: %s (%s)" % (self.id, raw_type)) <NEW_LINE> return "Unsupported"
Get the item type. Args: xml_str (bytes): The XML content for item info. Returns: str: The type name.
625941b3b7558d58953c4ccf
def get_params(self): <NEW_LINE> <INDENT> return self._params
Returns the DecentParams structure which originated these results.
625941b3be7bc26dc91cd3b9
def fit(self, X, y=None): <NEW_LINE> <INDENT> return self
Could add a method here to fill in pixels with surrounding median so that the model could pick up digits that are thinner
625941b3b830903b967e96cb
def get_parent(self): <NEW_LINE> <INDENT> return self.__parent
Gets the scope's parent scope. :return: The current scope's parent scope.
625941b38e7ae83300e4ad7f
def ahead(ant): <NEW_LINE> <INDENT> return neighbor(ant, ant.direction)
Gets the ant's ahead neighbor.
625941b366673b3332b91e49
def list_mountables(self): <NEW_LINE> <INDENT> self.output.info('Mountable overlays:') <NEW_LINE> self.output.info('~~~~~~~~~~~~~~~~~~~') <NEW_LINE> if self.mountables: <NEW_LINE> <INDENT> for ovl in sorted(self.mountables): <NEW_LINE> <INDENT> self.output.info(ovl) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.output.warn('N/A')
Lists all overlays that can be mounted.
625941b37047854f462a11c0
def runZlib(WRS, compressLevel = 6): <NEW_LINE> <INDENT> if WRS.compresslevel: compressLevel = WRS.compresslevel <NEW_LINE> useTimer = perf_counter() <NEW_LINE> writeTimer = perf_counter() <NEW_LINE> with open(WRS.fileName, 'wb') as f: <NEW_LINE> <INDENT> f.write(zlib.compress(bytes(json.dumps(data), encoding="ascii"), level=compressLevel)) <NEW_LINE> <DEDENT> f.close() <NEW_LINE> readTimer = perf_counter() <NEW_LINE> with open(WRS.fileName, "rb") as f: <NEW_LINE> <INDENT> res = json.loads(zlib.decompress(f.read())) <NEW_LINE> <DEDENT> f.close() <NEW_LINE> endTimer = perf_counter() <NEW_LINE> WRS.addRecord(useTimer, writeTimer, readTimer, endTimer) <NEW_LINE> WRS.printThisStat()
json zlib
625941b3d268445f265b4c27
def test_ContactHandler_read_filter_id(self): <NEW_LINE> <INDENT> handler = ContactHandler <NEW_LINE> type = 'read' <NEW_LINE> test_data = ( ('?id=1&id=2', {}, 'populated_list', 2), ('?id=1', {}, 'populated_list', 1), ('?id=', {}, 'unprocessable', 1), ('?id=&id=1&id=2', {}, 'unprocessable', 1), ('?id=lalalala', {}, 'unprocessable', 1), ) <NEW_LINE> self.execute(type, handler, test_data)
Plural DELETE request, applying the filter ``id``.
625941b3f7d966606f6a9dbb
def _get_as_dict(self, resource_id, fields=None, os_ironic_api_version=None, global_request_id=None): <NEW_LINE> <INDENT> resource = self._get(resource_id, fields=fields, os_ironic_api_version=os_ironic_api_version, global_request_id=global_request_id) <NEW_LINE> if resource: <NEW_LINE> <INDENT> return resource.to_dict() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return {}
Retrieve a resource as a dictionary :param resource_id: Identifier of the resource. :param fields: List of specific fields to be returned. :param os_ironic_api_version: String version (e.g. "1.35") to use for the request. If not specified, the client's default is used. :param global_request_id: String containing global request ID header value (in form "req-<UUID>") to use for the request. :returns: a dictionary representing the resource; may be empty
625941b31b99ca400220a862
def search_hashtag(self, hashtag): <NEW_LINE> <INDENT> search_hashtag = f'#{hashtag} -filter:retweets' <NEW_LINE> client = self.API() <NEW_LINE> numTweets = 100 <NEW_LINE> distinct_hashtags = collections.defaultdict(int) <NEW_LINE> for tweet in tweepy.Cursor(client.search, q=search_hashtag, tweet_mode='extended').items(numTweets): <NEW_LINE> <INDENT> data = tweet._json['entities']['hashtags'] <NEW_LINE> for i in range(len(data)): <NEW_LINE> <INDENT> distinct_hashtags[data[i]['text']] += 1 <NEW_LINE> <DEDENT> <DEDENT> hashtag_df = pd.DataFrame(distinct_hashtags.items(),columns=['Hashtag','Occurences']) <NEW_LINE> hashtag_df = hashtag_df.sort_values('Hashtag') <NEW_LINE> pd.set_option('display.max_rows',None) <NEW_LINE> if not os.path.exists(self.logs_folder()+f'{hashtag}_hashtag_{self.get_timestamp()}.csv'): <NEW_LINE> <INDENT> with open(self.logs_folder()+f'{hashtag}_hashtag_{self.get_timestamp()}.csv', 'w+'): pass <NEW_LINE> <DEDENT> hashtag_df.to_csv(self.logs_folder()+f'{hashtag}_hashtag_{self.get_timestamp()}.csv',encoding='utf-8-sig', index=False) <NEW_LINE> print(hashtag_df.to_string(index=False))
Print hashtags and the number of their occurences in the first 100 tweets with specified hashtag
625941b3956e5f7376d70c2f
def object_info(self, oname, detail_level=0): <NEW_LINE> <INDENT> content = dict(oname=oname, detail_level=detail_level) <NEW_LINE> msg = self.session.msg('object_info_request', content) <NEW_LINE> self._queue_send(msg) <NEW_LINE> return msg['header']['msg_id']
Get metadata information about an object. Parameters ---------- oname : str A string specifying the object name. detail_level : int, optional The level of detail for the introspection (0-2) Returns ------- The msg_id of the message sent.
625941b363b5f9789fde6e98
def _finite_field_ext_pari_(self): <NEW_LINE> <INDENT> f = self.polynomial() <NEW_LINE> return FiniteField_ext_pari(self.order(), self.variable_name(), f)
Return a :class:`FiniteField_ext_pari` isomorphic to ``self`` with the same defining polynomial. .. NOTE:: This method will vanish eventually because that implementation of finite fields will be deprecated. EXAMPLES:: sage: k.<a> = GF(2^20) sage: kP = k._finite_field_ext_pari_() sage: kP Finite Field in a of size 2^20 sage: type(kP) <class 'sage.rings.finite_rings.finite_field_ext_pari.FiniteField_ext_pari_with_category'>
625941b3f8510a7c17cf94b8
def get_samples_from_labels(self, labels, trials=5): <NEW_LINE> <INDENT> clamped = np.ones(labels.shape, dtype=np.float32) <NEW_LINE> data = self.get_samples([(self.label_idx, labels)], walkback=self.calc_walkback(trials), indices=[self.input_idx], clamped=[clamped], symbolic=False) <NEW_LINE> return np.array(data)[:, 0, :, :]
Clamps labels and generates samples. Parameters ---------- labels : WRITEME trials : WRITEME
625941b3d18da76e23532283
def delete_blocked_domain_with_http_info(self, domain, **kwargs): <NEW_LINE> <INDENT> all_params = ['domain'] <NEW_LINE> all_params.append('async_req') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in six.iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_blocked_domain" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('domain' not in params or params['domain'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `domain` when calling `delete_blocked_domain`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> path_params = {} <NEW_LINE> if 'domain' in params: <NEW_LINE> <INDENT> path_params['domain'] = params['domain'] <NEW_LINE> <DEDENT> query_params = [] <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) <NEW_LINE> header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) <NEW_LINE> auth_settings = ['api-key', 'partner-key'] <NEW_LINE> return self.api_client.call_api( '/smtp/blockedDomains/{domain}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Unblock an existing domain from the list of blocked domains # noqa: E501 Unblocks an existing domain from the list of blocked domains # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_blocked_domain_with_http_info(domain, async_req=True) >>> result = thread.get() :param async_req bool :param str domain: The name of the domain to be deleted (required) :return: None If the method is called asynchronously, returns the request thread.
625941b3046cf37aa974cafe
def fit(self,X,y): <NEW_LINE> <INDENT> X, y = check_X_y(X, y, accept_sparse = None, dtype=np.float64) <NEW_LINE> if self.normalize: <NEW_LINE> <INDENT> self._x_mean = np.mean(X,0) <NEW_LINE> self._x_std = np.std(X,0) <NEW_LINE> X = (X - self._x_mean) / self._x_std <NEW_LINE> <DEDENT> if self.fit_intercept: <NEW_LINE> <INDENT> X = np.concatenate((np.ones([X.shape[0],1]),X),1) <NEW_LINE> <DEDENT> check_classification_targets(y) <NEW_LINE> self.classes_ = np.unique(y) <NEW_LINE> n_classes = len(self.classes_) <NEW_LINE> if n_classes < 2: <NEW_LINE> <INDENT> raise ValueError("Need samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % self.classes_[0]) <NEW_LINE> <DEDENT> if n_classes < 2: <NEW_LINE> <INDENT> raise ValueError("Need samples of at least 2 classes") <NEW_LINE> <DEDENT> if n_classes > 2: <NEW_LINE> <INDENT> self.coef_, self.sigma_ = [0]*n_classes,[0]*n_classes <NEW_LINE> self.intercept_ , self.active_ = [0]*n_classes, [0]*n_classes <NEW_LINE> self.lambda_ = [0]*n_classes <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.coef_, self.sigma_, self.intercept_,self.active_ = [0],[0],[0],[0] <NEW_LINE> self.lambda_ = [0] <NEW_LINE> <DEDENT> for i in range(len(self.classes_)): <NEW_LINE> <INDENT> if n_classes == 2: <NEW_LINE> <INDENT> pos_class = self.classes_[1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pos_class = self.classes_[i] <NEW_LINE> <DEDENT> mask = (y == pos_class) <NEW_LINE> y_bin = np.zeros(y.shape, dtype=np.float64) <NEW_LINE> y_bin[mask] = 1 <NEW_LINE> coef,bias,active,sigma,lambda_ = self._fit(X,y_bin) <NEW_LINE> self.coef_[i], self.intercept_[i], self.sigma_[i] = coef, bias, sigma <NEW_LINE> self.active_[i], self.lambda_[i] = active, lambda_ <NEW_LINE> if n_classes == 2: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> self.coef_ = np.asarray(self.coef_) <NEW_LINE> self.intercept_ = np.asarray(self.intercept_) <NEW_LINE> return self
Fits Logistic Regression with ARD Parameters ---------- X: array-like of size [n_samples, n_features] Training data, matrix of explanatory variables y: array-like of size [n_samples] Target values Returns ------- self : object Returns self.
625941b32c8b7c6e89b3557e
def save(self): <NEW_LINE> <INDENT> self.send('save')
Save current parameters in non-volatile memory.
625941b34f6381625f1147f9
def connect( self, resource_group_name, resource_provider_namespace, parent_resource_type, parent_resource, serial_port, **kwargs ): <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> api_version = "2018-05-01" <NEW_LINE> accept = "application/json" <NEW_LINE> url = self.connect.metadata['url'] <NEW_LINE> path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'), 'parentResourceType': self._serialize.url("parent_resource_type", parent_resource_type, 'str', skip_quote=True), 'parentResource': self._serialize.url("parent_resource", parent_resource, 'str'), 'serialPort': self._serialize.url("serial_port", serial_port, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> header_parameters = {} <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> request = self._client.post(url, query_parameters, header_parameters) <NEW_LINE> pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> raise HttpResponseError(response=response, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> deserialized = self._deserialize('SerialPortConnectResult', pipeline_response) <NEW_LINE> if cls: <NEW_LINE> <INDENT> return cls(pipeline_response, deserialized, {}) <NEW_LINE> <DEDENT> return deserialized
Connect to serial port of the target resource. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param resource_provider_namespace: The namespace of the resource provider. :type resource_provider_namespace: str :param parent_resource_type: The resource type of the parent resource. For example: 'virtualMachines' or 'virtualMachineScaleSets'. :type parent_resource_type: str :param parent_resource: The resource name, or subordinate path, for the parent of the serial port. For example: the name of the virtual machine. :type parent_resource: str :param serial_port: The name of the serial port to connect to. :type serial_port: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SerialPortConnectResult, or the result of cls(response) :rtype: ~azure.mgmt.serialconsole.models.SerialPortConnectResult :raises: ~azure.core.exceptions.HttpResponseError
625941b37b180e01f3dc45bb
def get_mem_usage(pid): <NEW_LINE> <INDENT> mem = {} <NEW_LINE> pid_status_path='/proc/%s/status'%pid <NEW_LINE> with open(pid_status_path) as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> mem[line.split(':')[0]] = line.split(':')[1].strip() <NEW_LINE> <DEDENT> <DEDENT> return mem
得到进程的pid号,查看/proc/pid/status文件,格式如下 VmPeak: 23836 kB VmSize: 23796 kB VmLck: 0 kB VmHWM: 2012 kB VmRSS: 2012 kB VmData: 680 kB VmStk: 88 kB VmExe: 116 kB VmLib: 2356 kB 然后将其放到字典中,以:为分隔第一列是key,第二列是value,最后函数返回字典mem
625941b3c432627299f049fa
def cal_cost(self, X, groups): <NEW_LINE> <INDENT> k = self.k <NEW_LINE> total_cost = 0. <NEW_LINE> for i in xrange(k): <NEW_LINE> <INDENT> idx = np.where(groups == i) <NEW_LINE> group_X = X[idx, :] <NEW_LINE> diff = group_X - self.centers[i, :] <NEW_LINE> cost = np.power(diff, 2).sum() <NEW_LINE> total_cost += cost <NEW_LINE> <DEDENT> avg_cost = total_cost / X.shape[0] <NEW_LINE> return avg_cost
return cost of all clusters
625941b316aa5153ce36222b
def flatten_SIF(data): <NEW_LINE> <INDENT> result = np.zeros(data[0].shape) + 2 <NEW_LINE> for z in range(data.shape[0]): <NEW_LINE> <INDENT> mask = result == 2 <NEW_LINE> result[mask] = data[z][mask] <NEW_LINE> <DEDENT> return result.astype(int)
Color of pixel is the first non-transparent layer. 0=black, 1=white, 2=transparent
625941b33346ee7daa2b2b1b
def create_mlb_target(self, target): <NEW_LINE> <INDENT> target = spotinst_mlb.TargetRequest(target) <NEW_LINE> excluded_group_dict = self.exclude_missing(json.loads(target.toJSON())) <NEW_LINE> formatted_group_dict = self.convert_json( excluded_group_dict, self.underscore_to_camel) <NEW_LINE> body_json = json.dumps(formatted_group_dict) <NEW_LINE> self.print_output(body_json) <NEW_LINE> response = self.send_post( url=self.__base_lb_url + "/target", body=body_json, entity_name="mlb target" ) <NEW_LINE> formatted_response = self.convert_json( response, self.camel_to_underscore) <NEW_LINE> retVal = formatted_response["response"]["items"][0] <NEW_LINE> return retVal
Create MLB target # Arguments target (Target): Target Object # Returns (Object): Spotinst API response
625941b32ae34c7f2600cee4
def __init__(self): <NEW_LINE> <INDENT> with open('authorizer_rules.yaml') as rules_file: <NEW_LINE> <INDENT> self._rules = yaml.load(rules_file)
Initialise the authorizer by loading the rules file
625941b356ac1b37e6263f94
def test_flatten_by_keys_validList(): <NEW_LINE> <INDENT> flattened = flatten_by_keys(FORECAST, keys=['city', 'coord.lat']) <NEW_LINE> assert flattened == {'city': 'jacksonville', 'coord.lat': 30.332}
GIVEN a json-serialzed document converted to a python dict WHEN the user requests to flatten and specifies a list of keys THEN assert it flattens only the specified keys
625941b32ae34c7f2600cee5
def spikeGet(t,v,vT=None): <NEW_LINE> <INDENT> if vT == None: <NEW_LINE> <INDENT> vT = [(0.75*(np.max(v)-np.sqrt(np.std(v)))), 10] <NEW_LINE> vT = np.max(vT) <NEW_LINE> <DEDENT> vTF = v>vT <NEW_LINE> idx = np.nonzero((vTF[:-1]==0) & (vTF[1:]==1)) <NEW_LINE> return t[idx[0]+1]
Extract spike time using boolean logic. Seperate array at T/F order offset one place, compare and detect. Args: t: numpy time array v: numpy voltage array vT = voltage threshold (optional) Returns: firing rate of neuron
625941b3fff4ab517eb2f1eb
def deducirajPobjednika (konacni_rezultat): <NEW_LINE> <INDENT> pobjednik = [0] <NEW_LINE> for i in range(1, len(konacni_rezultat)): <NEW_LINE> <INDENT> if konacni_rezultat[i] > konacni_rezultat[pobjednik[0]]: <NEW_LINE> <INDENT> pobjednik = [i] <NEW_LINE> <DEDENT> elif konacni_rezultat[i] == konacni_rezultat[pobjednik[0]]: <NEW_LINE> <INDENT> pobjednik.append(i) <NEW_LINE> <DEDENT> <DEDENT> if len(pobjednik) > 1: <NEW_LINE> <INDENT> return tuple(pobjednik) <NEW_LINE> <DEDENT> return pobjednik[0]
Otkrij tko je skupio strogo najvise bodova. Argument funkcije mora biti povratna vrijednost funkcije Tablic.Log.konacniRezultat iz koje se trazi indeks igraca sa strogo najvecim brojem skupljenih bodova. Ako vise igraca dijeli prvo mjesto, povratna vrijednost je uzlazno sortirani tuple njihovih indeksa.
625941b3627d3e7fe0d68c01
def _get_name_for_type(self, type_value): <NEW_LINE> <INDENT> datadict = json.loads(self.instance.xform.json) <NEW_LINE> for item in datadict['children']: <NEW_LINE> <INDENT> if type(item) == dict and item.get('type') == type_value: <NEW_LINE> <INDENT> return item['name']
We cannot assume that start time and end times always use the same XPath. This is causing problems for other peoples' forms. This is a quick fix to determine from the original XLSForm's JSON representation what the 'name' was for a given type_value ('start' or 'end')
625941b3ab23a570cc24ff3a
def sign(self, request, consumer, token): <NEW_LINE> <INDENT> key, raw = self.signing_base(request, consumer, token) <NEW_LINE> hashed = hmac.new(key, raw, sha) <NEW_LINE> return binascii.b2a_base64(hashed.digest())[:-1]
Builds the base signature string.
625941b324f1403a92600925
def _get_env_vars(self): <NEW_LINE> <INDENT> env_vars = {} <NEW_LINE> if self.auth_token: <NEW_LINE> <INDENT> env_vars['st2_auth_token'] = self.auth_token.token <NEW_LINE> <DEDENT> if self._env: <NEW_LINE> <INDENT> env_vars.update(self._env) <NEW_LINE> <DEDENT> return env_vars
:rtype: ``dict``
625941b321a7993f00bc7a9b
def fit(self, examples, min_word_count=10, min_char_count=100, num_chars_per_word=16): <NEW_LINE> <INDENT> tokenized_examples = [ _tokenize(example, self._tokenizer, num_chars_per_word) for example in tqdm(examples, 'tokenizing') ] <NEW_LINE> word_counter = _get_word_counter(tokenized_examples) <NEW_LINE> char_counter = _get_char_counter(tokenized_examples) <NEW_LINE> self._word2idx_dict = _counter2vocab(word_counter, min_word_count) <NEW_LINE> tf.logging.info('Word vocab size: %d' % len(self._word2idx_dict)) <NEW_LINE> self._char2idx_dict = _counter2vocab(char_counter, min_char_count) <NEW_LINE> tf.logging.info('Char vocab size: %d' % len(self._char2idx_dict)) <NEW_LINE> glove_word2idx_dict = _get_glove_vocab( self._glove_words, counter=word_counter) <NEW_LINE> tf.logging.info('Glove word vocab size: %d' % len(glove_word2idx_dict)) <NEW_LINE> def glove_word2idx(word): <NEW_LINE> <INDENT> word = word.lower() <NEW_LINE> return glove_word2idx_dict[word] if word in glove_word2idx_dict else 1 <NEW_LINE> <DEDENT> indexed_examples = [ _index(example, self.word2idx, glove_word2idx, self.char2idx) for example in tqdm(tokenized_examples, desc='indexing') ] <NEW_LINE> metadata = self._get_metadata(indexed_examples) <NEW_LINE> metadata['glove_word2idx'] = glove_word2idx_dict <NEW_LINE> metadata['num_chars_per_word'] = num_chars_per_word <NEW_LINE> return indexed_examples, metadata
Fits examples and return indexed examples with metadata. Fitting examples means the vocab is created out of the examples. The vocab can be saved via `save` and loaded via `load` methods. Args: examples: list of dictionary, where each dictionary is an example. min_word_count: `int` value, minimum word count to be included in vocab. min_char_count: `int` value, minimum char count to be included in vocab. num_chars_per_word: `int` value, number of chars to store per word. This is fixed, so if word is shorter, then the rest is padded with 0. The characters are flattened, so need to be reshaped when using them. Returns: a tuple `(indexed_examples, metadata)`, where `indexed_examples` is a list of dict (each dict being indexed example) and `metadata` is a dict of `glove_word2idx_dict` and statistics of the examples.
625941b3d58c6744b4257a14
def download_playlist(db: DBMuziek, name: str): <NEW_LINE> <INDENT> playlist_query = db.get_playlist(name) <NEW_LINE> if not playlist_query: <NEW_LINE> <INDENT> print(f"The playlist {name} doesn't exist.") <NEW_LINE> return <NEW_LINE> <DEDENT> songs = db.get_playlist_songs(playlist_query["playlist_id"]) <NEW_LINE> if not songs: <NEW_LINE> <INDENT> print(f"The playlist {name} is empty.") <NEW_LINE> return <NEW_LINE> <DEDENT> for song in songs: <NEW_LINE> <INDENT> download_song(db, song["song_name"], song["group_id"])
Downloads the playlist requested based on the urls stored in the database. :author: Carlos :param db: The database used. :param name: Name of the playlist to download. :PRE: The database object needs to be connected. :POST: All the songs in the playlist are downloaded
625941b3fff4ab517eb2f1ec
@app.route('/') <NEW_LINE> def viz_page(): <NEW_LINE> <INDENT> return flask.render_template("nmf_demo.html")
Homepage: serve our visualization page, awesome.html
625941b394891a1f4081b85b
def __init__(self, **specs): <NEW_LINE> <INDENT> BackgroundSources.__init__(self, **specs)
Constructor for class GalaxiesFaintStars
625941b3099cdd3c635f0a10
def set_params(self, params): <NEW_LINE> <INDENT> pass
Set parameters from an array, using the same ordering as the list returned by self.params().
625941b30a366e3fb873e5c9
def _hm_event_callback(self, device, caller, attribute, value): <NEW_LINE> <INDENT> _LOGGER.debug("%s received event '%s' value: %s", self._name, attribute, value) <NEW_LINE> has_changed = False <NEW_LINE> if attribute in self._data: <NEW_LINE> <INDENT> if self._data[attribute] != value: <NEW_LINE> <INDENT> self._data[attribute] = value <NEW_LINE> has_changed = True <NEW_LINE> <DEDENT> <DEDENT> if attribute == 'UNREACH': <NEW_LINE> <INDENT> self._available = bool(value) <NEW_LINE> has_changed = True <NEW_LINE> <DEDENT> if has_changed: <NEW_LINE> <INDENT> self.schedule_update_ha_state()
Handle all pyhomematic device events.
625941b31f5feb6acb0c4910
def from_labelling_and_area_sequence(L, D): <NEW_LINE> <INDENT> return ParkingFunction_class([L.index(i)+1-D[L.index(i)] for i in range(1,len(L)+1)])
Returns the parking function corresponding to the labelling area sequence pair. INPUT: - ``L`` -- a labelling permutation - ``D`` -- an area sequence for a Dyck word OUTPUT: - returns the parking function corresponding the labelling permutation ``L`` and ``D`` an area sequence of the corresponding Dyck path EXAMPLES:: sage: from sage.combinat.parking_functions import from_labelling_and_area_sequence sage: from_labelling_and_area_sequence([2, 6, 4, 5, 3, 7, 1], [0, 1, 1, 2, 0, 1, 1]) [6, 1, 5, 2, 2, 1, 5] :: sage: from_labelling_and_area_sequence([1, 2, 3], [0, 1, 2]) [1, 1, 1] sage: from_labelling_and_area_sequence([1, 2, 3], [0, 0, 0]) [1, 2, 3] sage: from_labelling_and_area_sequence([1, 2, 3], [0, 1, 1]) [1, 1, 2] sage: from_labelling_and_area_sequence([1, 2, 4, 3], [0, 1, 2, 1]) [1, 1, 3, 1]
625941b331939e2706e4cc25
def add_neighbor(self, vertex_obj, weight): <NEW_LINE> <INDENT> self.neighbors_dict[vertex_obj.__id] = (vertex_obj, weight) <NEW_LINE> return self.neighbors_dict
Add a neighbor along a weighted edge by storing it in the neighbors dictionary. Parameters: vertex_obj (Vertex): An instance of Vertex to be stored as a neighbor. weight (int): The edge weight from self -> neighbor.
625941b36aa9bd52df036b55
def hostRoles(opt): <NEW_LINE> <INDENT> return hostFact(opt.role_fact, opt)
Return a dict matching hostnames and system roles (as selected via role_fact).
625941b3d7e4931a7ee9dccf
def __get_block(self, block_hash): <NEW_LINE> <INDENT> sql = 'SELECT * FROM block WHERE hash = "{}";'.format(block_hash) <NEW_LINE> return self.__query(sql)
Retrieves a block from the database with a hash.
625941b3a05bb46b383ec5e1
def get_subcategory(self, id, **data): <NEW_LINE> <INDENT> return self.get("/subcategories/{0}/".format(id), data=data)
GET /subcategories/:id/ Gets a :format:`subcategory` by ID as ``subcategory``.
625941b3097d151d1a222c17
def scripting_start(self): <NEW_LINE> <INDENT> self.wd = tk.Toplevel(self.root.master) <NEW_LINE> if self.root.octadist_icon is not None: <NEW_LINE> <INDENT> self.wd.wm_iconbitmap(self.root.octadist_icon) <NEW_LINE> <DEDENT> self.wd.title("OctaDist Scripting Interface") <NEW_LINE> self.wd.bind("<Return>", self.script_execute) <NEW_LINE> self.wd.resizable(0, 0) <NEW_LINE> lbl = tk.Label(self.wd, text="Output:") <NEW_LINE> lbl.grid(padx="5", pady="5", sticky=tk.W, row=0, column=0) <NEW_LINE> self.box_script = tk.Text(self.wd, width=70, height=20) <NEW_LINE> self.box_script.grid(padx="5", pady="5", row=1, column=0, columnspan=2) <NEW_LINE> lbl = tk.Label(self.wd, text="Input:") <NEW_LINE> lbl.grid(padx="5", pady="5", sticky=tk.W, row=2, column=0) <NEW_LINE> self.entry_script = tk.Entry(self.wd, width=62) <NEW_LINE> self.entry_script.grid(padx="5", pady="5", sticky=tk.W, row=3, column=0) <NEW_LINE> btn_script = tk.Button(self.wd, text="Run") <NEW_LINE> btn_script.bind("<Button-1>", self.script_execute) <NEW_LINE> btn_script.grid(padx="5", pady="5", row=3, column=1) <NEW_LINE> self.box_script.insert( tk.INSERT, "Welcome to OctaDist interactive scripting console\n" ) <NEW_LINE> self.box_script.insert( tk.INSERT, "If you have no idea what to do about scripting, " 'type "help" to get started.\n\n', ) <NEW_LINE> self.wd.mainloop()
Start scripting console.
625941b3b5575c28eb68ddb0
def _getting_file_path(self) -> tuple: <NEW_LINE> <INDENT> file_list = os.listdir(self.path) <NEW_LINE> file_list = [os.path.join(self.path, i) for i in file_list] <NEW_LINE> file_list = sorted(file_list, key=os.path.getmtime) <NEW_LINE> file_list.reverse() <NEW_LINE> return tuple(file_list)
Получаем все файлы в нужной дерриктории
625941b36fece00bbac2d4ee