code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __call__(self, results): <NEW_LINE> <INDENT> if 'flip' not in results: <NEW_LINE> <INDENT> flip = True if np.random.rand() < self.flip_ratio else False <NEW_LINE> results['flip'] = flip <NEW_LINE> <DEDENT> if 'flip_direction' not in results: <NEW_LINE> <INDENT> results['flip_direction'] = self.direction <NEW_LINE> <DEDENT> if results['flip']: <NEW_LINE> <INDENT> for key in results.get('img_fields', ['img']): <NEW_LINE> <INDENT> results[key] = mmcv.imflip( results[key], direction=results['flip_direction']) <NEW_LINE> <DEDENT> for key in results.get('bbox_fields', []): <NEW_LINE> <INDENT> results[key] = self.bbox_flip(results[key], results['img_shape'], results['flip_direction']) <NEW_LINE> <DEDENT> for key in results.get('mask_fields', []): <NEW_LINE> <INDENT> results[key] = results[key].flip(results['flip_direction']) <NEW_LINE> <DEDENT> for key in results.get('seg_fields', []): <NEW_LINE> <INDENT> results[key] = mmcv.imflip( results[key], direction=results['flip_direction']) <NEW_LINE> <DEDENT> <DEDENT> return results
Call function to flip bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Flipped results, 'flip', 'flip_direction' keys are added into result dict.
625941b230bbd722463cbb54
def get_cation_cn(self, radius=2.6, min_weight=10e-5, anions=None): <NEW_LINE> <INDENT> if anions is None: <NEW_LINE> <INDENT> anions = ['O2-', 'O', 'F-', 'F', 'Cl-', 'Cl', 'I-', 'I', 'Br-', 'Br', 'S2-', 'S'] <NEW_LINE> <DEDENT> cation_sites = [] <NEW_LINE> for site in self._structure.sites: <NEW_LINE> <INDENT> if site.species_string not in anions: <NEW_LINE> <INDENT> cation_sites.append(site) <NEW_LINE> <DEDENT> <DEDENT> cn_list = {} <NEW_LINE> for site in cation_sites: <NEW_LINE> <INDENT> site_cn = self.get_site_cn(site, radius, min_weight) <NEW_LINE> if site.species_string not in cn_list.keys(): <NEW_LINE> <INDENT> cn_list[site.species_string] = [site_cn] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cn_list[site.species_string].append(site_cn) <NEW_LINE> <DEDENT> <DEDENT> return cn_list
Get all cation-centered polyhedra for a structure :param radius: (float) distance in Angstroms for bond cutoff :param anions: (List of Strings) list of species which we consider anions in the structure :return: (dict) A dictionary with keys corresponding to different cations and the values to the cation's ECoN coordination numbers
625941b2d268445f265b4bff
def __contains__(self, key): <NEW_LINE> <INDENT> return hasattr(self, str(self.HASH_PREFIX + key))
Return True if our object contains the given key (JSON name).
625941b2462c4b4f79d1d45a
def test_x_sw(self): <NEW_LINE> <INDENT> bk = Backend(product='qulacs', device='cpu_simulator') <NEW_LINE> qc = QCirc().x(0).sw(0,1) <NEW_LINE> res = bk.run(qcirc=qc) <NEW_LINE> actual = res.info['quantumstate'].get_vector() <NEW_LINE> expect = reverse_bit_order(np.array([0j, (1+0j), 0j, 0j])) <NEW_LINE> ans = equal_vectors(actual, expect) <NEW_LINE> self.assertEqual(ans,True)
test 'sw' gate (following 'x' gate, not 'h' gates)
625941b2d8ef3951e32432c8
def __init__(self, WL, T): <NEW_LINE> <INDENT> self.WL = WL <NEW_LINE> self.T = T <NEW_LINE> self.h = [None] * T <NEW_LINE> self.w = np.zeros(T) <NEW_LINE> self.D = None
Parameters ---------- WL : the class of the base weak learner T : the number of base learners to learn
625941b23346ee7daa2b2af2
def test_size_width_too_big(self): <NEW_LINE> <INDENT> request_path = '/%s/full/3601,/0/default.jpg' % (self.test_jpeg_id,) <NEW_LINE> resp = self.client.get(request_path) <NEW_LINE> self.assertEqual(resp.status_code, 404)
Explicit width in size parameter is larger than image size.
625941b22ae34c7f2600cec4
def predict(self, src_seq): <NEW_LINE> <INDENT> src_id_seq = Variable(torch.LongTensor([self.src_vocab.stoi[tok] for tok in src_seq]), volatile=True).view(1, -1) <NEW_LINE> if torch.cuda.is_available(): <NEW_LINE> <INDENT> src_id_seq = src_id_seq.cuda() <NEW_LINE> <DEDENT> decoder_kick = Variable(torch.LongTensor([self.tgt_vocab.stoi['<sos>']]), volatile=True).view(1, -1) <NEW_LINE> softmax_list, _, other = self.model(src_id_seq, [len(src_seq)], decoder_kick) <NEW_LINE> length = other['length'][0] <NEW_LINE> tgt_id_seq = [other['sequence'][di][0].data[0] for di in range(length)] <NEW_LINE> tgt_seq = [self.tgt_vocab.itos[tok] for tok in tgt_id_seq] <NEW_LINE> return tgt_seq
Make prediction given `src_seq` as input. Args: src_seq (list): list of tokens in source language Returns: tgt_seq (list): list of tokens in target language as predicted by the pre-trained model
625941b267a9b606de4a7c4e
def fit_base_models(self, data, labels, oos_data, oos_labels=None): <NEW_LINE> <INDENT> dataset_blend_train = [] <NEW_LINE> dataset_blend_oos = [] <NEW_LINE> for i, model in enumerate(self.base_models): <NEW_LINE> <INDENT> train_predictions, oos_predictions = self.cv_fit_model(model, data, labels, oos_data, oos_labels) <NEW_LINE> dataset_blend_train.append(train_predictions) <NEW_LINE> dataset_blend_oos.append(oos_predictions) <NEW_LINE> <DEDENT> return (dataset_blend_train, dataset_blend_oos)
Helper method called by fit_predict method that fits the training data to the base models and makes predictions on train and oos datasets. The method then blends/combines the predictions and returns the newly generated train and oos datasets. Parameters ---------- data: numpy array, shape: (number_of_samples, number_of_features) Training data. Corresponds to the traditional X. labels: numpy array, shape: (number_of_samples, ) True target values or labels. Corresponds to the traditional y. oos_data: numpy array, shape: (oos_number_samples, number_of_features) This is the unseen/new/out of sample(oos) data for which predictions are to be made. oos_labels: numpy array, optional, shape:(oos_number_samples, ), default: None Optional parameter. True target values for oos samples. Returns ------- dataset_blend_train: numpy array, shape: (number_of_samples, number_of_base_models) for Regression or (number_of_samples, number_of_base_models * number_of_unique_classes) for Classification Newly created train dataset generated by horizontally stacking base models' predictions on the training data in a cross-validated (out-of-fold predictions) fashion. dataset_blend_oos: numpy array, shape: (oos_number_samples, number_of_base_models) for Regression or (oos_number_samples, number_of_base_models * number_of_unique_classes) for Classification Newly created oos dataset generated by horizontally stacking base models' predictions on the oos data in a cross-validated (oos predictions of all k models across k-fold cv are averaged) fashion.
625941b2925a0f43d2549bfd
def induce_program(self, output, timestep): <NEW_LINE> <INDENT> label = torch.max(output, 1)[1].data.cpu().numpy() <NEW_LINE> for index in range(self.batch_size): <NEW_LINE> <INDENT> exp = self.unique_draws[label[index]] <NEW_LINE> self.expressions[index] += exp <NEW_LINE> program = self.tokenizer(self.expressions[index]) <NEW_LINE> if self.wrong_indices[index] or self.stopped_programs[index]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if (exp == "$"): <NEW_LINE> <INDENT> self.wrong_indices[index] = (not validity(program, self.max_time, timestep)) <NEW_LINE> self.stopped_programs[index] = True <NEW_LINE> if self.wrong_indices[index]: <NEW_LINE> <INDENT> self.stack[index, :, :, :, :] = np.zeros( (self.stack_size, self.canvas_shape[0], self.canvas_shape[1], self.canvas_shape[2])) <NEW_LINE> <DEDENT> continue <NEW_LINE> <DEDENT> self.wrong_indices[index] = (not validity(program, self.max_time, timestep)) <NEW_LINE> if (not self.wrong_indices[index]): <NEW_LINE> <INDENT> temp = [program[-1]] <NEW_LINE> self.sim[index].generate_stack(temp, start_scratch=False, if_primitives=True) <NEW_LINE> self.stack[index, :, :, :, :] = self.sim[index].stack_t[-1] <NEW_LINE> <DEDENT> elif self.wrong_indices[index]: <NEW_LINE> <INDENT> self.stack[index, :, :, :, :] = np.zeros( (self.stack_size, self.canvas_shape[0], self.canvas_shape[1], self.canvas_shape[2])) <NEW_LINE> <DEDENT> <DEDENT> stack = np.expand_dims(self.stack.astype(np.float32), 0) <NEW_LINE> return Variable(torch.from_numpy(stack)).cuda(), self.expressions
Induces the program by taking current output from the network, returns the simulated stack. Also takes care of the validity of the programs. Currently, as soon as the program is recognized to be wrong, it just drops it. For invalid programs we produce empty canvas. For programs that stop and are valid, the state of the canvas at the stopped timestep is repeated. If we encounter any wrong instruction, we lose hope and forget it. :param output: Output from the network at some point of time :param timestep: Current time step at which output is produced :return: stack: simulated stack at that time step
625941b256b00c62f0f143e6
def levelOrder(self, root): <NEW_LINE> <INDENT> if not root: return [] <NEW_LINE> ans = [] <NEW_LINE> q = deque([root]) <NEW_LINE> while q: <NEW_LINE> <INDENT> current_level, size = [], len(q) <NEW_LINE> for _ in range(size): <NEW_LINE> <INDENT> node = q.popleft() <NEW_LINE> current_level.append(node.val) <NEW_LINE> if node.left: <NEW_LINE> <INDENT> q.append(node.left) <NEW_LINE> <DEDENT> if node.right: <NEW_LINE> <INDENT> q.append(node.right) <NEW_LINE> <DEDENT> <DEDENT> ans.append(current_level) <NEW_LINE> <DEDENT> return ans
:type root: TreeNode :rtype: List[List[int]]
625941b250812a4eaa59c0b2
def make_kk_task(self,**kwargs): <NEW_LINE> <INDENT> from ..utils import KKflow <NEW_LINE> self.kktask = KKflow( dirname = os.path.join(self.dirname, '00-KK'), **kwargs) <NEW_LINE> self.add_task(self.kktask) <NEW_LINE> kwargs.update( tetrahedra_fname = self.kktask.tetrahedra_fname, symmetries_fname = self.kktask.symmetries_fname, kreciprocal_fname = self.kktask.kreciprocal_fname) <NEW_LINE> tetrahedra_fname = self.kktask.tetrahedra_fname <NEW_LINE> symmetries_fname = self.kktask.symmetries_fname <NEW_LINE> kreciprocal_fname = self.kktask.kreciprocal_fname <NEW_LINE> return tetrahedra_fname,symmetries_fname, kreciprocal_fname
Run KK flow. Initialize parameters for tetrahedrum integration.
625941b25fdd1c0f98dbffc4
def compile(self, tmp_dir=None, verbose=False): <NEW_LINE> <INDENT> base_prefix = self.prefix <NEW_LINE> if tmp_dir is None: <NEW_LINE> <INDENT> codedir = tempfile.mkdtemp(".pydy_compile") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> codedir = os.path.abspath(tmp_dir) <NEW_LINE> <DEDENT> if not os.path.exists(codedir): <NEW_LINE> <INDENT> os.makedirs(codedir) <NEW_LINE> <DEDENT> self.prefix = '{}_{}'.format(base_prefix, CythonMatrixGenerator._module_counter) <NEW_LINE> workingdir = os.getcwd() <NEW_LINE> os.chdir(codedir) <NEW_LINE> try: <NEW_LINE> <INDENT> sys.path.append(codedir) <NEW_LINE> self.write() <NEW_LINE> cmd = [sys.executable, self.prefix + '_setup.py', 'build_ext', '--inplace'] <NEW_LINE> output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) <NEW_LINE> if verbose: <NEW_LINE> <INDENT> print(output.decode()) <NEW_LINE> <DEDENT> cython_module = importlib.import_module(self.prefix) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> raise Exception('Failed to compile and import Cython module.') <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> sys.path.remove(codedir) <NEW_LINE> CythonMatrixGenerator._module_counter += 1 <NEW_LINE> os.chdir(workingdir) <NEW_LINE> if tmp_dir is None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> shutil.rmtree(codedir) <NEW_LINE> <DEDENT> except OSError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> self.prefix = base_prefix <NEW_LINE> return getattr(cython_module, 'eval')
Returns a function which evaluates the matrices. Parameters ========== tmp_dir : string The path to an existing or non-existing directory where all of the generated files will be stored. verbose : boolean If true the output of the completed compilation steps will be printed.
625941b2d18da76e2353225b
def _make_bowtie_index(self, fullLengthSeqs): <NEW_LINE> <INDENT> tmp = tempfile.NamedTemporaryFile(delete=False) <NEW_LINE> for name, seq in fullLengthSeqs.items(): <NEW_LINE> <INDENT> tmp.write('>%s\n%s\n' % (name, seq)) <NEW_LINE> <DEDENT> tmp.close() <NEW_LINE> bi = amplishot.app.bowtie.Bowtie2Build(WorkingDir=self.outdir) <NEW_LINE> logging.debug(str(bi)) <NEW_LINE> bi([tmp.name, self.outprefix]) <NEW_LINE> os.remove(tmp.name)
write the full-length sequences to file then index them with bowtie. After which the tmpfile can be deleted.
625941b2a17c0f6771cbdde8
def rearm_idle(self, *largs): <NEW_LINE> <INDENT> if not hasattr(self, "idle_timer"): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.idle_timer is None: <NEW_LINE> <INDENT> self.dispatch("on_wakeup") <NEW_LINE> <DEDENT> self.idle_timer = monotonic()
Rearm the idle timer
625941b2462c4b4f79d1d45b
def test_thumb_object(): <NEW_LINE> <INDENT> path = os.path.join(test_location, "armel", "i2c_api.o") <NEW_LINE> l = cle.Loader(path, rebase_granularity=0x1000) <NEW_LINE> for r in l.main_object.relocs: <NEW_LINE> <INDENT> if r.__class__ == cle.backends.elf.relocation.arm.R_ARM_THM_JUMP24: <NEW_LINE> <INDENT> if r.symbol.name == 'HAL_I2C_ER_IRQHandler': <NEW_LINE> <INDENT> irsb = pyvex.lift(struct.pack('<I', r.value), r.rebased_addr + 1, l.main_object.arch, bytes_offset=1) <NEW_LINE> assert_equal(irsb.default_exit_target, r.resolvedby.rebased_addr) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> assert False, "Could not find JUMP24 relocation for HAL_I2C_ER_IRQHandler"
Test for an object file I ripped out of an ARM firmware HAL. Uses some nasty relocs :return:
625941b24a966d76dd550d9a
def createDatabaseConnection(dbFile=config['SQLITE']['DATABASE']): <NEW_LINE> <INDENT> conn = None <NEW_LINE> try: <NEW_LINE> <INDENT> conn = sqlite3.connect(dbFile, timeout=40, check_same_thread=False) <NEW_LINE> conn.execute('pragma journal_mode=wal') <NEW_LINE> return conn <NEW_LINE> <DEDENT> except Error as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> <DEDENT> return conn
create a database connection to the SQLite database specified by db_file :param db_file: database file :return: Connection object or None
625941b21d351010ab8558b1
def __init__(self, throttling=None, local_vars_configuration=None): <NEW_LINE> <INDENT> if local_vars_configuration is None: <NEW_LINE> <INDENT> local_vars_configuration = Configuration() <NEW_LINE> <DEDENT> self.local_vars_configuration = local_vars_configuration <NEW_LINE> self._throttling = None <NEW_LINE> self.discriminator = None <NEW_LINE> if throttling is not None: <NEW_LINE> <INDENT> self.throttling = throttling
InlineResponse20011 - a model defined in OpenAPI
625941b20a366e3fb873e5a2
def array_string_csv_null( self, array_query=None, custom_headers={}, raw=False, **operation_config): <NEW_LINE> <INDENT> url = '/queries/array/csv/string/null' <NEW_LINE> query_parameters = {} <NEW_LINE> if array_query is not None: <NEW_LINE> <INDENT> query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div=',') <NEW_LINE> <DEDENT> header_parameters = {} <NEW_LINE> header_parameters['Content-Type'] = 'application/json; charset=utf-8' <NEW_LINE> if custom_headers: <NEW_LINE> <INDENT> header_parameters.update(custom_headers) <NEW_LINE> <DEDENT> request = self._client.get(url, query_parameters) <NEW_LINE> response = self._client.send(request, header_parameters, **operation_config) <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> raise models.ErrorException(self._deserialize, response) <NEW_LINE> <DEDENT> if raw: <NEW_LINE> <INDENT> client_raw_response = ClientRawResponse(None, response) <NEW_LINE> return client_raw_response
Get a null array of string using the csv-array format :param array_query: a null array of string using the csv-array format :type array_query: list of str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :rtype: None :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true
625941b2097d151d1a222bef
def __init__(self, docs): <NEW_LINE> <INDENT> self.docs_ = docs <NEW_LINE> self.id2word = {} <NEW_LINE> self.word2id = {} <NEW_LINE> self.vocab_ = set() <NEW_LINE> self.__build_vocab() <NEW_LINE> self.__build_id2word()
Initialize Vocabulary data structure with a collection of documents :param docs: Documents
625941b28e7ae83300e4ad5f
def __create_lnk(self, lnk_config, output_file): <NEW_LINE> <INDENT> arguments = [] <NEW_LINE> arguments.append(self.wine) <NEW_LINE> arguments.append(self.mklnk) <NEW_LINE> arguments.append("-t") <NEW_LINE> arguments.append(lnk_config["target_path"]) <NEW_LINE> arguments.append("-o") <NEW_LINE> arguments.append(output_file) <NEW_LINE> arguments.append("--window-style") <NEW_LINE> arguments.append(lnk_config["window_style"]) <NEW_LINE> if lnk_config["working_dir"]: <NEW_LINE> <INDENT> arguments.append("-w") <NEW_LINE> arguments.append(lnk_config["working_dir"]) <NEW_LINE> <DEDENT> if lnk_config["arguments"]: <NEW_LINE> <INDENT> arguments.append("-a") <NEW_LINE> arguments.append(lnk_config["arguments"]) <NEW_LINE> <DEDENT> if lnk_config["icon_path"]: <NEW_LINE> <INDENT> arguments.append("-i") <NEW_LINE> arguments.append(lnk_config["icon_path"]) <NEW_LINE> <DEDENT> if lnk_config["icon_index"]: <NEW_LINE> <INDENT> arguments.append("--icon-index") <NEW_LINE> arguments.append(lnk_config["icon_index"]) <NEW_LINE> <DEDENT> if lnk_config["description"]: <NEW_LINE> <INDENT> arguments.append("-d") <NEW_LINE> arguments.append(lnk_config["description"]) <NEW_LINE> <DEDENT> return subprocess.check_output(arguments)
Creates a .lnk file based on the given config. Parameters ---------- link_config: dict The .lnk attributes configuration output_file: str The output file to be created Returns ---------- sub_output: str The subprocess output
625941b24527f215b584c1f0
def process_should_be_running(self, handle=None, error_message='Process is not running.'): <NEW_LINE> <INDENT> if not self.is_process_running(handle): <NEW_LINE> <INDENT> raise AssertionError(error_message)
Verifies that the process is running. If `handle` is not given, uses the current `active process`. Fails if the process has stopped.
625941b26fece00bbac2d4c6
def nameGenerator(self) -> 'UniqueNameGenerator': <NEW_LINE> <INDENT> return typing.cast('UniqueNameGenerator', self.idGenerators('name'))
Utility method to access provided names generator (inside environment) Returns the environment unique name generator
625941b2be7bc26dc91cd39a
def hamming_distance(pt1, pt2): <NEW_LINE> <INDENT> distance = 0 <NEW_LINE> for i in range(len(pt1)): <NEW_LINE> <INDENT> if pt1[i] != pt2[i]: <NEW_LINE> <INDENT> distance += 1 <NEW_LINE> <DEDENT> <DEDENT> print(f'Hamming Distance for {pt1}, {pt2}:\n\t{distance}') <NEW_LINE> return distance
Calculate the distance between two points using the Hamming Distance. Scipy method: scipy.spatial.distance.hamming Note: the Scipy method returns a float between 0 and 1, as a resulting of dividing the distance by the number of dimensions of the lists. For each dimension that does not have the same value in both points, increment the distance by 1.
625941b2e5267d203edcda2f
def project_plans(self, project_key): <NEW_LINE> <INDENT> resource = 'project/{}'.format(project_key, max_results=25) <NEW_LINE> return self.base_list_call(resource, expand='plans', favourite=False, clover_enabled=False, max_results=25, elements_key='plans', element_key='plan')
Returns a generator with the plans in a given project :param project_key: Project key :return: Generator with plans
625941b245492302aab5e04b
def getClassTypeId(): <NEW_LINE> <INDENT> return _simvoleon.SoVolumeTriangleStripSet_getClassTypeId()
getClassTypeId() -> SoType
625941b20383005118ecf371
def test_add_translation(self): <NEW_LINE> <INDENT> obj = self.get() <NEW_LINE> resp = obj.add_translation("nl_BE") <NEW_LINE> self.assertEqual(resp["data"]["id"], 827) <NEW_LINE> self.assertEqual( resp["data"]["revision"], "da6ea2777f61fbe1d2a207ff6ebdadfa15f26d1a" )
Perform verification that the correct endpoint is accessed.
625941b2956e5f7376d70c09
def ubrmse(actual: np.ndarray, predicted: np.ndarray): <NEW_LINE> <INDENT> return np.sqrt(np.nansum((actual-(predicted-np.nanmean(actual)))**2)/len(actual))
unbiased Root Mean Squared Error
625941b2f548e778e58cd30e
def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> if args and isinstance(args[0], list): <NEW_LINE> <INDENT> self.polynom = args[0] <NEW_LINE> <DEDENT> elif args: <NEW_LINE> <INDENT> self.polynom = args <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.polynom = [kwargs.get(x, 0) for x in ['x' + str(i) for i in range(len(kwargs)+1)]]
Polynom initialization with args kwargs and args[0]=List
625941b215fb5d323cde089b
def solveSudoku( board): <NEW_LINE> <INDENT> usenum = ['1', '2', '3', '4', '5', '6', '7', '8', '9'] <NEW_LINE> def getnumlist(i, j, board): <NEW_LINE> <INDENT> used = dict() <NEW_LINE> for k in range(9): <NEW_LINE> <INDENT> if board[k][j] != '.': <NEW_LINE> <INDENT> if board[k][j] not in used: <NEW_LINE> <INDENT> used[board[k][j]] = 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for k in range(9): <NEW_LINE> <INDENT> if board[i][k] != '.': <NEW_LINE> <INDENT> if board[i][k] not in used: <NEW_LINE> <INDENT> used[board[i][k]] = 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> start = 0 <NEW_LINE> end = 0 <NEW_LINE> if i // 3 == 0: <NEW_LINE> <INDENT> start = 0 <NEW_LINE> <DEDENT> elif i // 3 == 1: <NEW_LINE> <INDENT> start = 1 * 3 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> start = 2 * 3 <NEW_LINE> <DEDENT> if j // 3 == 0: <NEW_LINE> <INDENT> end = 0 <NEW_LINE> <DEDENT> elif j // 3 == 1: <NEW_LINE> <INDENT> end = 1 * 3 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> end = 2 * 3 <NEW_LINE> <DEDENT> for k in range(3): <NEW_LINE> <INDENT> for l in range(3): <NEW_LINE> <INDENT> if board[k + start][l + end] != '.': <NEW_LINE> <INDENT> if board[k + start][l + end] not in used: <NEW_LINE> <INDENT> used[board[k + start][l + end]] = 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> res = [] <NEW_LINE> for i in range(9): <NEW_LINE> <INDENT> if usenum[i] not in used: <NEW_LINE> <INDENT> res.append(usenum[i]) <NEW_LINE> <DEDENT> <DEDENT> return res <NEW_LINE> <DEDENT> flag = [False] <NEW_LINE> memo = dict() <NEW_LINE> stack=[] <NEW_LINE> def fillboard(board): <NEW_LINE> <INDENT> if flag[0]: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for i in range(9): <NEW_LINE> <INDENT> for j in range(9): <NEW_LINE> <INDENT> if board[i][j] == '.': <NEW_LINE> <INDENT> nums = getnumlist(i, j, board) <NEW_LINE> for num in nums: <NEW_LINE> <INDENT> board[i][j] = num <NEW_LINE> fillboard(board) <NEW_LINE> if not flag[0]: <NEW_LINE> <INDENT> board[i][j] = '.' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> return <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> flag[0] = True <NEW_LINE> <DEDENT> fillboard(board) <NEW_LINE> return board
:type board: List[List[str]] :rtype: None Do not return anything, modify board in-place instead.
625941b28c3a87329515814a
def load_data(train_path, valid_path, test_path): <NEW_LINE> <INDENT> train = pd.read_csv(train_path, header=None) <NEW_LINE> valid = pd.read_csv(valid_path, header=None) <NEW_LINE> test = pd.read_csv( test_path, header=None) <NEW_LINE> train_filenames = train[0] <NEW_LINE> train_labels = train[1] <NEW_LINE> valid_filenames = valid[0] <NEW_LINE> valid_labels = valid[1] <NEW_LINE> test_filenames = test[0] <NEW_LINE> test_labels = test[1] <NEW_LINE> train_filenames = tf.constant(train_filenames) <NEW_LINE> train_labels = tf.constant(train_labels ) <NEW_LINE> valid_filenames = tf.constant(valid_filenames) <NEW_LINE> valid_labels = tf.constant(valid_labels ) <NEW_LINE> test_filenames = tf.constant(test_filenames ) <NEW_LINE> test_labels = tf.constant(test_labels ) <NEW_LINE> return (train_filenames, train_labels), (valid_filenames, valid_labels), (test_filenames, test_labels)
Returns the ILSVRC dataset as (train_x, train_y), (test_x, test_y).
625941b28a349b6b435e7f09
def diff(self, n, axis=0): <NEW_LINE> <INDENT> if axis == 0: <NEW_LINE> <INDENT> raise NotImplementedError <NEW_LINE> <DEDENT> new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 <NEW_LINE> new_values = new_values.reshape(1, len(new_values)) <NEW_LINE> new_values = new_values.astype('timedelta64[ns]') <NEW_LINE> return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
1st discrete difference Parameters ---------- n : int, number of periods to diff axis : int, axis to diff upon. default 0 Return ------ A list with a new TimeDeltaBlock. Note ---- The arguments here are mimicking shift so they are called correctly by apply.
625941b2796e427e537b034d
def test_another_nested_proxy_field_model_serializer_depth(self): <NEW_LINE> <INDENT> self._nested_proxy_field_model_serializer_depth( self.proxy_author_listing_url )
Test NestedProxyField and ModelSerializer with more depth.
625941b27c178a314d6ef1e4
def test_get_readable_by_date_expired_key(self): <NEW_LINE> <INDENT> expKey = Key.objects.get(key="exp_key") <NEW_LINE> try: <NEW_LINE> <INDENT> DataStream.objects.get_readable_by_key(expKey) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.assertEqual(str(e), "None is not a valid key.")
' Test that a Key which has an expired date raises an exception when using DataStream's get_readable_by_key
625941b299fddb7c1c9de127
def test_append_paths(self): <NEW_LINE> <INDENT> if self.MODULE_GENERATOR_CLASS == ModuleGeneratorTcl: <NEW_LINE> <INDENT> expected = ''.join([ "append-path\tkey\t\t$root/path1\n", "append-path\tkey\t\t$root/path2\n", "append-path\tkey\t\t$root\n", ]) <NEW_LINE> paths = ['path1', 'path2', ''] <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("key", paths)) <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("key", paths)) <NEW_LINE> expected = "append-path\tbar\t\t$root/foo\n" <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("bar", "foo")) <NEW_LINE> res = self.modgen.append_paths("key", ["/abs/path"], allow_abs=True) <NEW_LINE> self.assertEqual("append-path\tkey\t\t/abs/path\n", res) <NEW_LINE> res = self.modgen.append_paths('key', ['[email protected]'], expand_relpaths=False) <NEW_LINE> self.assertEqual("append-path\tkey\t\[email protected]\n", res) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> expected = ''.join([ 'append_path("key", pathJoin(root, "path1"))\n', 'append_path("key", pathJoin(root, "path2"))\n', 'append_path("key", root)\n', ]) <NEW_LINE> paths = ['path1', 'path2', ''] <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("key", paths)) <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("key", paths)) <NEW_LINE> expected = 'append_path("bar", pathJoin(root, "foo"))\n' <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("bar", "foo")) <NEW_LINE> expected = 'append_path("key", "/abs/path")\n' <NEW_LINE> self.assertEqual(expected, self.modgen.append_paths("key", ["/abs/path"], allow_abs=True)) <NEW_LINE> res = self.modgen.append_paths('key', ['[email protected]'], expand_relpaths=False) <NEW_LINE> self.assertEqual('append_path("key", "[email protected]")\n', res) <NEW_LINE> <DEDENT> self.assertErrorRegex(EasyBuildError, "Absolute path %s/foo passed to update_paths " "which only expects relative paths." % self.modgen.app.installdir, self.modgen.append_paths, "key2", ["bar", "%s/foo" % self.modgen.app.installdir])
Test generating append-paths statements.
625941b2de87d2750b85fb19
def main(argv): <NEW_LINE> <INDENT> usage = 'Usage: %prog --out=merged_csv_file input_csv_files...' <NEW_LINE> parser = optparse.OptionParser(usage=usage) <NEW_LINE> parser.add_option('--out', dest='outpath', type='string', action='store', default=None, help='File to write merged results to') <NEW_LINE> (options, args) = parser.parse_args(argv) <NEW_LINE> if not options.outpath: <NEW_LINE> <INDENT> parser.print_help() <NEW_LINE> oper.Die('The --out option is required.') <NEW_LINE> <DEDENT> if not args: <NEW_LINE> <INDENT> parser.print_help() <NEW_LINE> oper.Die('At least one input_csv_file is required.') <NEW_LINE> <DEDENT> csv_table = LoadAndMergeTables(args) <NEW_LINE> WriteTable(csv_table, options.outpath)
Main function.
625941b230bbd722463cbb56
def get_flavor(self, name_or_id, filters=None, get_extra=True): <NEW_LINE> <INDENT> search_func = functools.partial( self.search_flavors, get_extra=get_extra) <NEW_LINE> return _utils._get_entity(self, search_func, name_or_id, filters)
Get a flavor by name or ID. :param name_or_id: Name or ID of the flavor. :param filters: A dictionary of meta data to use for further filtering. Elements of this dictionary may, themselves, be dictionaries. Example:: { 'last_name': 'Smith', 'other': { 'gender': 'Female' } } OR A string containing a jmespath expression for further filtering. Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" :param get_extra: Whether or not the list_flavors call should get the extra flavor specs. :returns: A flavor ``munch.Munch`` or None if no matching flavor is found.
625941b2b5575c28eb68dd8a
def harvesine_distance(loc1, loc2): <NEW_LINE> <INDENT> lat1, long1 = loc1 <NEW_LINE> lat2, long2 = loc2 <NEW_LINE> lat1 = lat1*math.pi/180 <NEW_LINE> lat2 = lat2*math.pi/180 <NEW_LINE> long1 = long1*math.pi/180 <NEW_LINE> long2 = long2*math.pi/180 <NEW_LINE> dlat = (lat2-lat1) <NEW_LINE> dlong = (long2-long1) <NEW_LINE> R = 6371 <NEW_LINE> a = math.sin(dlat/2)**2 + (math.cos(lat1)*math.cos(lat2)*math.sin(dlong/2)**2) <NEW_LINE> b = 2*math.atan2(math.sqrt(a), math.sqrt(1-a)) <NEW_LINE> c = R*b <NEW_LINE> return c
input: locations as lat, long output: harvesine_distance
625941b2507cdc57c6306a5f
def p_LValue_lvalue_ID( p ): <NEW_LINE> <INDENT> p[0] = PT_LValue_Period_Id( p[1], p[3])
lvalue : lvalue PERIOD IDENTIFIER
625941b28a43f66fc4b53dff
def _add_fields(self): <NEW_LINE> <INDENT> for name, kind in ((str(self.elevation_attribute), ogr.OFTReal), (str(self.feature_id_attribute), ogr.OFTInteger)): <NEW_LINE> <INDENT> definition = ogr.FieldDefn(name, kind) <NEW_LINE> self.layer.CreateField(definition)
Create extra fields.
625941b23346ee7daa2b2af5
def create_app(): <NEW_LINE> <INDENT> app = jobmonitor.create_app() <NEW_LINE> app.config.from_object('monitoring_app.config') <NEW_LINE> if not app.debug: <NEW_LINE> <INDENT> add_logging(app) <NEW_LINE> <DEDENT> example = Blueprint('example', __name__, template_folder='templates', static_folder='static', static_url_path='/{0}'.format(__name__)) <NEW_LINE> app.register_blueprint(example) <NEW_LINE> app.add_job_resolver(job_resolvers.tasks_resolver) <NEW_LINE> return app
Create a Flask application deriving from jobmonitor.
625941b232920d7e50b27f61
def SetExtractionRegion(self, *args): <NEW_LINE> <INDENT> return _itkExtractImageFilterPython.itkExtractImageFilterIVF33IVF33_SetExtractionRegion(self, *args)
SetExtractionRegion(self, itkImageRegion3 extractRegion)
625941b21f5feb6acb0c48ea
def getMenuNamed(self, menuName): <NEW_LINE> <INDENT> if self.a11yAppName is None: <NEW_LINE> <INDENT> self.a11yAppName = self.internCommand <NEW_LINE> <DEDENT> app = root <NEW_LINE> apps = root.applications() <NEW_LINE> for i in apps: <NEW_LINE> <INDENT> if i.name.lower() == self.a11yAppName: <NEW_LINE> <INDENT> app = i <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> appMenu = app.child(roleName='menu bar') <NEW_LINE> return appMenu.child(name=menuName) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return None
Return submenu with name specified with 'menuName'
625941b2bde94217f3682b8c
def test_ack(self): <NEW_LINE> <INDENT> namespace = self.socketIO.define(Namespace) <NEW_LINE> self.socketIO.emit( 'trigger_server_expects_callback', deepcopy(PAYLOAD)) <NEW_LINE> self.socketIO.wait(self.wait_time_in_seconds) <NEW_LINE> self.assertEqual(namespace.args_by_event, { 'server_expects_callback': (PAYLOAD,), 'server_received_callback': (PAYLOAD,), })
Respond to a server callback request
625941b23539df3088e2e0d9
def cached_url(url): <NEW_LINE> <INDENT> folder = 'cached' <NEW_LINE> filename = url.rsplit('/')[-2] + '.html' <NEW_LINE> path = os.path.join(folder, filename) <NEW_LINE> if os.path.exists(path): <NEW_LINE> <INDENT> with open(path, 'rb') as f: <NEW_LINE> <INDENT> return f.read() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if not os.path.exists(folder): <NEW_LINE> <INDENT> os.makedirs(folder) <NEW_LINE> <DEDENT> driver.get(url) <NEW_LINE> with open(path, 'wb') as f: <NEW_LINE> <INDENT> f.write(driver.page_source.encode()) <NEW_LINE> <DEDENT> content = driver.page_source <NEW_LINE> return content
缓存,避免重复下载 :param url: :return:
625941b2e64d504609d745d3
def p_BoolExpr_EQ(p): <NEW_LINE> <INDENT> p[0] = BinaryBoolNode(p[1], p[2], p[3]) <NEW_LINE> p[0].pos_info = getPosition(p, 0)
BoolExpr : Expr EQ Expr
625941b2097d151d1a222bf1
def display_series(self): <NEW_LINE> <INDENT> data_mgr = DatabaseManager(Config().database_name, None) <NEW_LINE> if self.list_series.currentItem(): <NEW_LINE> <INDENT> series_rowid = self.list_series.currentItem().data(Qt.UserRole) <NEW_LINE> cur = data_mgr.query("SELECT rowid, * FROM Series WHERE rowid = %d" % series_rowid) <NEW_LINE> series = entry_to_series(cur.fetchone()) <NEW_LINE> if series: <NEW_LINE> <INDENT> self.list_series.currentItem().setText(series.compact_string()) <NEW_LINE> self.table_setup(series) <NEW_LINE> self.edit_series_button.setEnabled(True) <NEW_LINE> self.remove_series_button.setEnabled(True) <NEW_LINE> self.add_next_volume_button.setEnabled(True) <NEW_LINE> self.mark_as_completed_button.setEnabled(True)
Retrieves and displays info for selected series. This function retrieves the unique rowid for the selected series and retrieves the series from the database. It then updates all main window elements which show series info to show up-to-date properties. Once all series information is properly displayed, buttons which can change the selected series's properties are enabled.
625941b2e8904600ed9f1cb7
def demo(): <NEW_LINE> <INDENT> if BibleOrgSysGlobals.verbosityLevel > 1: print( ProgNameVersion ) <NEW_LINE> if BibleOrgSysGlobals.commandLineOptions.export: <NEW_LINE> <INDENT> bbosc = BibleBookOrdersConverter().loadSystems() <NEW_LINE> bbosc.pickle() <NEW_LINE> bbosc.exportDataToPython() <NEW_LINE> bbosc.exportDataToJSON() <NEW_LINE> bbosc.exportDataToC() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bbosc = BibleBookOrdersConverter().loadSystems() <NEW_LINE> print( bbosc )
Main program to handle command line parameters and then run what they want.
625941b224f1403a92600900
def hasCycle(self, head): <NEW_LINE> <INDENT> if not head: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> walker = head <NEW_LINE> runner = head <NEW_LINE> while runner.next and runner.next.next: <NEW_LINE> <INDENT> walker = walker.next <NEW_LINE> runner = runner.next.next <NEW_LINE> if walker == runner: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
:type head: ListNode :rtype: bool
625941b28c0ade5d55d3e74d
def _AddToNewSizer(self, sizer, props): <NEW_LINE> <INDENT> for child in self.GetChildren(): <NEW_LINE> <INDENT> csp = props.get(child.GetId(), None) <NEW_LINE> if csp is not None: <NEW_LINE> <INDENT> self.GetSizer().Add(child) <NEW_LINE> child.SetSizerProps(csp)
Add children to new sizer. :param `sizer`: param is not used, remove it ??? :param `props`: sizer properties
625941b285dfad0860c3abe6
def learn( targets, numTrees=10, path="", regression=False, advice=False, softm=False, alpha=0.0, beta=0.0, saveJson=True, ): <NEW_LINE> <INDENT> models = {} <NEW_LINE> for target in targets: <NEW_LINE> <INDENT> trainData = Utils.readTrainingData( target, path=path, regression=regression, advice=advice, softm=softm, alpha=alpha, beta=beta, ) <NEW_LINE> trees = [] <NEW_LINE> for i in range(numTrees): <NEW_LINE> <INDENT> node.setMaxDepth(2) <NEW_LINE> node.learnTree(trainData) <NEW_LINE> trees.append(node.learnedDecisionTree) <NEW_LINE> updateGradients(trainData, trees) <NEW_LINE> if saveJson: <NEW_LINE> <INDENT> params = { "target": target, "trees": i + 1, "regression": regression, "advice": advice, "softm": softm, "alpha": alpha, "beta": beta, } <NEW_LINE> model = [params, trees] <NEW_LINE> Utils.save(".rfgb/models/" + target + ".json", model) <NEW_LINE> <DEDENT> <DEDENT> models[target] = trees <NEW_LINE> <DEDENT> return models
.. versionadded:: 0.3.0 Learn a relational dependency network from facts and positive/negative examples via relational regression trees. .. note:: This currently requires that training data is stored as files on disk. :param targets: List of target predicates to learn models for. :type targets: list of str. :param numTrees: Number of trees to learn. :type numTrees: int. :param path: Path to the location training data is stored. :type path: str. :param regression: Learn a regression model instead of classification. :type regression: bool. :param advice: Read an advice file from the same directory as trainPath. :type advice: bool. :default regression: False :default advice: False :returns: Dictionary where the key is the target and the value is the set of trees returned for that target. :rtype: dict.
625941b2596a897236089859
def draw( self, ventana ): <NEW_LINE> <INDENT> ventana.blit( self.image, self.rect )
Muestra al personaje en pantalla.
625941b245492302aab5e04d
def call(self, context, x, losses=None): <NEW_LINE> <INDENT> memory_antecedent = self._get_memory_antecedent(context) <NEW_LINE> memory_input_dim = memory_antecedent.shape[-1] <NEW_LINE> if memory_input_dim != context.model.model_dim: <NEW_LINE> <INDENT> raise NotImplementedError( "TODO(noam): support different model_dim in encoder and decoder.") <NEW_LINE> <DEDENT> q = self.compute_q(context, x) <NEW_LINE> if context.mode == "incremental": <NEW_LINE> <INDENT> m, memory_length = context.get_constant_state() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> m = memory_antecedent <NEW_LINE> memory_length, = [d for d in m.shape.dims if d.name == "memory_length"] <NEW_LINE> if context.mode == "first_part": <NEW_LINE> <INDENT> context.record_constant_state((m, memory_length)) <NEW_LINE> <DEDENT> <DEDENT> bias = enc_dec_attention_bias(self, context, self.heads_dims) <NEW_LINE> return self.attention_internal(context, q, m, memory_length, bias)
Call the layer.
625941b2a79ad161976cbed4
def _get_authorized_password(): <NEW_LINE> <INDENT> return [config.get("secure_uninstall"), config.get("admin_passwd")]
You can define your own authorized keys
625941b291af0d3eaac9b7a1
def index(request): <NEW_LINE> <INDENT> u = current_user(request) <NEW_LINE> body = template('weibo_index.html') <NEW_LINE> return http_response(body)
主页的处理函数, 返回主页的响应
625941b27c178a314d6ef1e5
def jerr(r): <NEW_LINE> <INDENT> rc = r['return'] <NEW_LINE> re = r['error'] <NEW_LINE> out('Error: '+re) <NEW_LINE> raise KeyboardInterrupt
Print error message for CK functions in the Jupyter Notebook and raise KeyboardInterrupt Target audience: end users Used in Jupyter Notebook Example: import ck.kernel as ck r=ck.access({'action':'load', 'module_uoa':'tmp', 'data_uoa':'some tmp entry'}) if r['return']>0: ck.jerr(r) Args: r (dict): output dictionary of any standard CK function: - return (int): return code - (error) (str): error string if return>0 Returns: None - exits script with KeyboardInterrupt!
625941b2d486a94d0b98dede
def prep_http_method(self, method): <NEW_LINE> <INDENT> method.view_class = self.viewClass <NEW_LINE> method.init_kwargs = self.kwargs <NEW_LINE> return method
To emulate View.as_view() we could do this on EACH http method. Normally as_view is only made with one.
625941b250485f2cf553cb26
def metadata(self, request): <NEW_LINE> <INDENT> metadata = super(AmCATMetadataMixin, self).metadata(request) <NEW_LINE> metadata['label'] = self.get_label() <NEW_LINE> grfm = api.rest.resources.get_resource_for_model <NEW_LINE> metadata['models'] = {name : grfm(field.queryset.model).get_url() for (name, field) in self.get_serializer().get_fields().iteritems() if hasattr(field, 'queryset')} <NEW_LINE> metadata['fields'] = {name : _get_field_name(field) for (name, field) in self.get_serializer().get_fields().iteritems()} <NEW_LINE> metadata['filter_fields'] = list(self.get_filter_fields()) <NEW_LINE> return metadata
This is used by the OPTIONS request; add models, fields, and label for datatables
625941b276d4e153a657e8be
@click.command() <NEW_LINE> @click.argument('term') <NEW_LINE> def run(term): <NEW_LINE> <INDENT> word = TurkishWord(input) <NEW_LINE> word.query() <NEW_LINE> output = word.meaning <NEW_LINE> click.echo(output)
A command line tool to query meaning of Turkish word from official dictionary.
625941b20fa83653e4656d53
def purgeBrackets(string): <NEW_LINE> <INDENT> if not string: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> string = string.replace("<", "").replace(">", "") <NEW_LINE> return string
Get rid of <> around a string
625941b2dc8b845886cb52c2
def test_llist_find_from_positive_non_existent_key(self): <NEW_LINE> <INDENT> elements_list = TestLList.llist_integer.find_from( 21, 2, {'timeout': 1000}) <NEW_LINE> assert elements_list == [56, 122]
Invoke find_from() to access elements from a non-existent key
625941b2796e427e537b0350
def _ascii_to_hex(symbol): <NEW_LINE> <INDENT> return symbol.encode("hex")
encode an ASCII symbol to an hex char
625941b2f8510a7c17cf9494
@Pipe <NEW_LINE> def _add_move_issue_subparser(subparsers: _SubParsersAction): <NEW_LINE> <INDENT> move_issue_parser = subparsers.add_parser("move", aliases=["mv"], help="Moves an issue from one status to another") <NEW_LINE> move_issue_parser.add_argument("transition", metavar="TRANSITION", help="The transition to perform on the issue") <NEW_LINE> move_issue_parser.set_defaults(move_issue=True) <NEW_LINE> return subparsers
Creates a subparser for logging work
625941b2de87d2750b85fb1b
def draw_geometry(self, painter): <NEW_LINE> <INDENT> painter.ellipse(self.frame.x1, self.frame.y1, self.frame.x2, self.frame.y2)
Отрисовка эллипса :param painter: средство рисования
625941b273bcbd0ca4b2be0b
def _record(self, value, rank, delta, successor): <NEW_LINE> <INDENT> return _Sample(value, rank, delta, successor)
Catalogs a sample.
625941b27047854f462a11a4
def setUp(self): <NEW_LINE> <INDENT> self.register_endpoint = reverse('register') <NEW_LINE> self.login_endpoint = reverse('login') <NEW_LINE> self.comment_reactions_endpoint = reverse('comment_reactions') <NEW_LINE> self.user = { "user": { "username": "username_tu", "email": "[email protected]", "password": "#Strong2-password" } } <NEW_LINE> self.another_user = { "user": { "username": "dmithamo", "email": "[email protected]", "password": "#Strong2-password" } } <NEW_LINE> self.third_user = { "user": { "username": "lkhalegi", "email": "[email protected]", "password": "#Strong2-password" } } <NEW_LINE> self.article = { "title": "Django Unchained", "description": "Django without chains", "body": "The chains were removed from the Django", "tagList": "tag, list" } <NEW_LINE> self.article_two = { "title": "War is not It", "description": "Civil War and Stuff", "body": "The civil war happened and yes", "tagList": "civil, war" } <NEW_LINE> self.comment_one = { "body": "Mike will made it" } <NEW_LINE> self.comment_two = { "body": "Mithamo made a comment. Or did he?" }
Set up
625941b2046cf37aa974cada
def _winrm_connect(self): <NEW_LINE> <INDENT> display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) <NEW_LINE> netloc = '%s:%d' % (self._winrm_host, self._winrm_port) <NEW_LINE> endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) <NEW_LINE> errors = [] <NEW_LINE> for transport in self._winrm_transport: <NEW_LINE> <INDENT> if transport == 'kerberos': <NEW_LINE> <INDENT> if not HAVE_KERBEROS: <NEW_LINE> <INDENT> errors.append('kerberos: the python kerberos library is not installed') <NEW_LINE> continue <NEW_LINE> <DEDENT> if self._kerb_managed: <NEW_LINE> <INDENT> self._kerb_auth(self._winrm_user, self._winrm_pass) <NEW_LINE> <DEDENT> <DEDENT> display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) <NEW_LINE> try: <NEW_LINE> <INDENT> winrm_kwargs = self._winrm_kwargs.copy() <NEW_LINE> if self._winrm_connection_timeout: <NEW_LINE> <INDENT> winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout <NEW_LINE> winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1 <NEW_LINE> <DEDENT> protocol = Protocol(endpoint, transport=transport, **winrm_kwargs) <NEW_LINE> if not self.shell_id: <NEW_LINE> <INDENT> self.shell_id = protocol.open_shell(codepage=65001) <NEW_LINE> display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) <NEW_LINE> <DEDENT> return protocol <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> err_msg = to_text(e).strip() <NEW_LINE> if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I): <NEW_LINE> <INDENT> raise AnsibleError('the connection attempt timed out') <NEW_LINE> <DEDENT> m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg) <NEW_LINE> if m: <NEW_LINE> <INDENT> code = int(m.groups()[0]) <NEW_LINE> if code == 401: <NEW_LINE> <INDENT> err_msg = 'the specified credentials were rejected by the server' <NEW_LINE> <DEDENT> elif code == 411: <NEW_LINE> <INDENT> return protocol <NEW_LINE> <DEDENT> <DEDENT> errors.append(u'%s: %s' % (transport, err_msg)) <NEW_LINE> display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host) <NEW_LINE> <DEDENT> <DEDENT> if errors: <NEW_LINE> <INDENT> raise AnsibleConnectionFailure(', '.join(map(to_native, errors))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise AnsibleError('No transport found for WinRM connection')
Establish a WinRM connection over HTTP/HTTPS.
625941b2e1aae11d1e749a49
def add_discovery(self, device, address): <NEW_LINE> <INDENT> self.post('discovery', params={'device': device, 'address': address})
Add an entry to the discovery cache. Args: device (str): Device ID. address (str): destination address, a valid hostname or IP address that's serving a Syncthing instance. Returns: None
625941b23346ee7daa2b2af7
def prepare_data_directory(): <NEW_LINE> <INDENT> if not os.path.exists(data_directory): <NEW_LINE> <INDENT> os.makedirs(data_directory)
create the target directory
625941b215baa723493c3d06
def reverse_list_in_place(items): <NEW_LINE> <INDENT> swap_number = custom_len(input_list) // 2 <NEW_LINE> for i in range(swap_number): <NEW_LINE> <INDENT> current_n = input_list[i] <NEW_LINE> current_neg_n = input_list[(i + 1) * -1] <NEW_LINE> input_list[i] = current_neg_n <NEW_LINE> input_list[(i + 1) * -1] = current_n
Reverse the input list `in place`. Reverse the input list given, but do it "in place" --- that is, do not create a new list and return it, but modify the original list. **Do not use** the python function `reversed()` or the method `list.reverse()`. For example:: >>> orig = [1, 2, 3] >>> reverse_list_in_place(orig) >>> orig [3, 2, 1] >>> orig = ["cookies", "love", "I"] >>> reverse_list_in_place(orig) >>> orig ['I', 'love', 'cookies']
625941b2b5575c28eb68dd8c
def dice(predictions, labels, num_classes): <NEW_LINE> <INDENT> dice_scores = np.zeros((num_classes)) <NEW_LINE> for i in range(num_classes): <NEW_LINE> <INDENT> tmp_den = (np.sum(predictions == i) + np.sum(labels == i)) <NEW_LINE> tmp_dice = 2. * np.sum((predictions == i) * (labels == i)) / tmp_den if tmp_den > 0 else 1. <NEW_LINE> dice_scores[i] = tmp_dice <NEW_LINE> <DEDENT> return dice_scores.astype(np.float32)
Calculates the categorical Dice similarity coefficients for each class between labels and predictions. Args: predictions (np.ndarray): predictions labels (np.ndarray): labels num_classes (int): number of classes to calculate the dice coefficient for Returns: np.ndarray: dice coefficient per class
625941b226068e7796caea6f
def setUp(self): <NEW_LINE> <INDENT> super(AtStyleSchedulerTests, self).setUp() <NEW_LINE> response = self.autoscale_behaviors.create_scaling_group_given( lc_name='at_style_scheduled', gc_cooldown=0) <NEW_LINE> self.group = response.entity <NEW_LINE> self.resources.add(self.group, self.empty_scaling_group)
Create a scaling group with minentities=0 and cooldown=0
625941b2fff4ab517eb2f1cf
def test04_itersorted8(self): <NEW_LINE> <INDENT> table = self.table <NEW_LINE> sortedtable = numpy.sort(table[:], order='icol')[55:33:-5] <NEW_LINE> sortedtable2 = numpy.array( [row.fetch_all_fields() for row in table.itersorted( 'icol', start=55, stop=33, step=-5)], dtype=table._v_dtype) <NEW_LINE> if verbose: <NEW_LINE> <INDENT> print("Original sorted table:", sortedtable) <NEW_LINE> print("The values from the iterator:", sortedtable2) <NEW_LINE> <DEDENT> self.assertTrue(allequal(sortedtable, sortedtable2))
Testing the Table.itersorted() method with a start, stop and negative step.
625941b2a17c0f6771cbddec
def rgb_to_cmyk(rgb): <NEW_LINE> <INDENT> r = rgb.red / 255.0 <NEW_LINE> g = rgb.green / 255.0 <NEW_LINE> b = rgb.blue / 255.0 <NEW_LINE> c = 1 - r <NEW_LINE> m = 1 - g <NEW_LINE> y = 1 - b <NEW_LINE> if c == 1 and m == 1 and y == 1: <NEW_LINE> <INDENT> return colormodel.CMYK(0.0,0.0,0.0,1.0*100.0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> k = min(c,m,y) <NEW_LINE> cy = ((c-k)/(1-k))*100.0 <NEW_LINE> mag = ((m-k)/(1-k))*100.0 <NEW_LINE> yell = ((y-k)/(1-k))*100.0 <NEW_LINE> return colormodel.CMYK(cy,mag,yell,k*100.0)
Returns: color rgb in space CMYK, with the most black possible. Formulae from en.wikipedia.org/wiki/CMYK_color_model. Parameter rgb: the color to convert to a CMYK object Precondition: rgb is an RGB object
625941b23539df3088e2e0da
def getNetworkSwitchRoutingMulticastRendezvousPoint(self, networkId: str, rendezvousPointId: str): <NEW_LINE> <INDENT> metadata = { 'tags': ['switch', 'configure', 'routing', 'multicast', 'rendezvousPoints'], 'operation': 'getNetworkSwitchRoutingMulticastRendezvousPoint' } <NEW_LINE> resource = f'/networks/{networkId}/switch/routing/multicast/rendezvousPoints/{rendezvousPointId}' <NEW_LINE> return self._session.get(metadata, resource)
**Return a multicast rendezvous point** https://developer.cisco.com/meraki/api-v1/#!get-network-switch-routing-multicast-rendezvous-point - networkId (string): (required) - rendezvousPointId (string): (required)
625941b21d351010ab8558b5
def getPremiumInfo(self, authenticationToken): <NEW_LINE> <INDENT> self.send_getPremiumInfo(authenticationToken) <NEW_LINE> return self.recv_getPremiumInfo()
Returns information regarding a user's Premium account corresponding to the provided authentication token, or throws an exception if this token is not valid. Parameters: - authenticationToken
625941b2ec188e330fd5a540
def reset(self): <NEW_LINE> <INDENT> self.clear_cache() <NEW_LINE> self.clear_data() <NEW_LINE> self.clear_settings()
Delete workflow settings, cache and data. File :attr:`settings <settings_path>` and directories :attr:`cache <cachedir>` and :attr:`data <datadir>` are deleted.
625941b28e05c05ec3eea107
def __repr__(self): <NEW_LINE> <INDENT> return u'{}(return_code={!r}, stderr={!r}, msg={!r}'.format( type(self).__name__, self.return_code, self.stderr, self.msg )
Include class name return_code, stderr and msg to improve logging
625941b2e5267d203edcda31
@pytest.fixture( params = get_test_data_list()) <NEW_LINE> def get_my_test_data(request): <NEW_LINE> <INDENT> return request.param
My custom fixture. By using this fixture pytest executes the test method for each element in the list params. :param request: A request for a fixture from a test or fixture function. A request object gives access to the requesting test context and has an optional param attribute in case the fixture is parametrized indirectly. :type request: fixture :return: request.param :rtype:dict
625941b26aa9bd52df036b31
def slidingPuzzle(self, board): <NEW_LINE> <INDENT> target = '123450' <NEW_LINE> start = ''.join(str(i) for tiles in board for i in tiles) <NEW_LINE> moves = [[1,3],[0,2,4],[1,5],[0,4],[1,3,5],[2,4]] <NEW_LINE> current_level, next_level = [start], [] <NEW_LINE> result = 0 <NEW_LINE> visited = set() <NEW_LINE> while current_level: <NEW_LINE> <INDENT> for state in current_level: <NEW_LINE> <INDENT> if state == target: <NEW_LINE> <INDENT> return result <NEW_LINE> <DEDENT> i = state.index('0') <NEW_LINE> for move in moves[i]: <NEW_LINE> <INDENT> new_state = list(state) <NEW_LINE> new_state[i], new_state[move] = new_state[move], new_state[i] <NEW_LINE> new_state = ''.join(new_state) <NEW_LINE> if new_state not in visited: <NEW_LINE> <INDENT> next_level.append(new_state) <NEW_LINE> visited.add(new_state) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> result += 1 <NEW_LINE> current_level, next_level = next_level, [] <NEW_LINE> <DEDENT> return -1
:type board: List[List[int]] :rtype: int
625941b20a50d4780f666c1e
def flip_query_coords(self, n): <NEW_LINE> <INDENT> qs = self.qs <NEW_LINE> self.qs = n - self.qe <NEW_LINE> self.qe = n - qs
Flip the coordinates with respect to the query with n fragments
625941b24527f215b584c1f4
def onOK(self): <NEW_LINE> <INDENT> fld = self.getField(True) <NEW_LINE> if fld.name == "": <NEW_LINE> <INDENT> QMessageBox.critical(self, self.tr("DB Manager"), self.tr("field name must not be empty")) <NEW_LINE> return <NEW_LINE> <DEDENT> if fld.dataType == "": <NEW_LINE> <INDENT> QMessageBox.critical(self, self.tr("DB Manager"), self.tr("field type must not be empty")) <NEW_LINE> return <NEW_LINE> <DEDENT> self.accept()
first check whether everything's fine
625941b2046cf37aa974cadb
def verify_profile_pin(guid): <NEW_LINE> <INDENT> if not g.LOCAL_DB.get_profile_config('isPinLocked', False, guid=guid): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> pin = ask_for_pin(common.get_local_string(30006)) <NEW_LINE> return None if not pin else verify_profile_lock(guid, pin)
Verify if the profile is locked by a PIN and ask the PIN
625941b2460517430c393f22
def _parse_area_source(element): <NEW_LINE> <INDENT> ID, name, tect_reg = _get_id_name_tect_reg(element) <NEW_LINE> polygon = _get_polygon(element) <NEW_LINE> mfd = _get_mfd(element) <NEW_LINE> return AreaSourceNRML04(polygon, mfd)
Parse NRML 0.4 area source element.
625941b2956e5f7376d70c0d
def rand_points_with_push(n, box, sep): <NEW_LINE> <INDENT> x_coord = np.random.randint(box[0], box[1], (1, n)).astype(float) <NEW_LINE> y_coord = np.random.randint(box[2], box[3], (1, n)).astype(float) <NEW_LINE> return _push_points(x_coord, y_coord, box, sep)
Generate a set of n random points within a box. Box should be a tuple containing the minimum and maximum co-ordinates desired in the form (xmin, xmax, ymin, ymax)
625941b25f7d997b8717482b
def menu(runtime): <NEW_LINE> <INDENT> page_refresh() <NEW_LINE> sleep(1) <NEW_LINE> print('1) Ping a single host for the default time.') <NEW_LINE> print('2) Ping a number of hosts for the default time.') <NEW_LINE> print('3) Ping each host in a path for the default time.') <NEW_LINE> print('4) Change the default time (currently: {} minutes).'.format(runtime)) <NEW_LINE> print('5) Create graph of an existing file.') <NEW_LINE> print('0) Exit') <NEW_LINE> selection = input('\nPlease select one of the options above: ') <NEW_LINE> if selection == '1': <NEW_LINE> <INDENT> hostname = input('\nEnter the destination address/hostname: ') <NEW_LINE> if host_check(hostname) is not None: <NEW_LINE> <INDENT> one(runtime, hostname) <NEW_LINE> <DEDENT> elif host_check(hostname) is None: <NEW_LINE> <INDENT> print('{} is not responsive.'.format(hostname)) <NEW_LINE> reset_session(runtime) <NEW_LINE> <DEDENT> <DEDENT> elif selection == '2': <NEW_LINE> <INDENT> hosts, hosts_raw = [], [] <NEW_LINE> if not path.exists('hosts.txt'): <NEW_LINE> <INDENT> num_hosts = int(input('Enter the number of hosts to test: ')) <NEW_LINE> for n in range(num_hosts): <NEW_LINE> <INDENT> hostname = input('Enter the address/hostname of host #{}: '.format(n + 1)) <NEW_LINE> with open('hosts.txt', 'a+') as file: <NEW_LINE> <INDENT> file.write(hostname) <NEW_LINE> file.write('\n') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> with open('hosts.txt', 'r') as file: <NEW_LINE> <INDENT> for line in file.readlines(): <NEW_LINE> <INDENT> hosts_raw.append(line.strip()) <NEW_LINE> with Pool(len(hosts_raw)) as checker: <NEW_LINE> <INDENT> checked = checker.map(host_check, hosts_raw) <NEW_LINE> for i in list(checked): <NEW_LINE> <INDENT> if i is not None: <NEW_LINE> <INDENT> hosts.append(str(i)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> two(runtime, hosts) <NEW_LINE> <DEDENT> elif selection == '3': <NEW_LINE> <INDENT> hostname = input('\nPlease enter the destination address/hostname: ') <NEW_LINE> print('Checking endpoint host...') <NEW_LINE> if host_check(hostname) is not None: <NEW_LINE> <INDENT> three(runtime, hostname) <NEW_LINE> <DEDENT> elif host_check(hostname) is None: <NEW_LINE> <INDENT> print('{} is not responsive.'.format(hostname)) <NEW_LINE> reset_session(runtime) <NEW_LINE> <DEDENT> <DEDENT> elif selection == '4': <NEW_LINE> <INDENT> mod_runtime = int(input('Enter the time (in minutes) to run each test: ')) <NEW_LINE> menu(mod_runtime) <NEW_LINE> <DEDENT> elif selection == '5': <NEW_LINE> <INDENT> filename = input('Please enter the filename (ends in ".log": ').strip() <NEW_LINE> if path.exists(filename): <NEW_LINE> <INDENT> hostname = filename[:-4] <NEW_LINE> graph(hostname) <NEW_LINE> <DEDENT> elif not path.exists(filename): <NEW_LINE> <INDENT> print('That file is not present in the current directory.\nResetting session.') <NEW_LINE> reset_session(runtime) <NEW_LINE> <DEDENT> <DEDENT> elif selection == '0': <NEW_LINE> <INDENT> page_refresh() <NEW_LINE> exit() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Your selection did not match one of the options provided!') <NEW_LINE> reset_session(runtime)
Menu system for user interaction.
625941b250485f2cf553cb28
def handle_add_permissions_to_key(request, service): <NEW_LINE> <INDENT> service_name = request.get('name') <NEW_LINE> group_name = request.get('group') <NEW_LINE> group_namespace = request.get('group-namespace') <NEW_LINE> if group_namespace: <NEW_LINE> <INDENT> group_name = "{}-{}".format(group_namespace, group_name) <NEW_LINE> <DEDENT> group = get_group(group_name=group_name) <NEW_LINE> service_obj = get_service_groups(service=service_name, namespace=group_namespace) <NEW_LINE> format("Service object: {}".format(service_obj)) <NEW_LINE> permission = request.get('group-permission') or "rwx" <NEW_LINE> if service_name not in group['services']: <NEW_LINE> <INDENT> group['services'].append(service_name) <NEW_LINE> <DEDENT> save_group(group=group, group_name=group_name) <NEW_LINE> if permission not in service_obj['group_names']: <NEW_LINE> <INDENT> service_obj['group_names'][permission] = [] <NEW_LINE> <DEDENT> if group_name not in service_obj['group_names'][permission]: <NEW_LINE> <INDENT> service_obj['group_names'][permission].append(group_name) <NEW_LINE> <DEDENT> save_service(service=service_obj, service_name=service_name) <NEW_LINE> service_obj['groups'] = _build_service_groups(service_obj, group_namespace) <NEW_LINE> update_service_permissions(service_name, service_obj, group_namespace)
Groups are defined by the key cephx.groups.(namespace-)?-(name). This key will contain a dict serialized to JSON with data about the group, including pools and members. A group can optionally have a namespace defined that will be used to further restrict pool access.
625941b2796e427e537b0352
def nodes_status(self, client: str = "", node: int = None): <NEW_LINE> <INDENT> client_plugin = self._select_client(instance_id=client).plugin <NEW_LINE> nodes = client_plugin.nodes() <NEW_LINE> if node is None: <NEW_LINE> <INDENT> return cli_output(list(node.status.to_dict() for node in nodes)) <NEW_LINE> <DEDENT> return cli_output(nodes[node].status.to_dict())
Get info about a client plugin.
625941b27c178a314d6ef1e8
def check_best_score(stats, sb): <NEW_LINE> <INDENT> if stats.best_score > stats.step: <NEW_LINE> <INDENT> stats.best_score = stats.step <NEW_LINE> sb.prep_best_score()
检查是否诞生了最好成绩
625941b23317a56b869399fc
def _add_defaults_optional(self): <NEW_LINE> <INDENT> pass
We don't want any of the default optional files. This is setup.cfg, pyproject.toml, and test/test*.py, all from the toplevel, which we don't want.
625941b282261d6c526ab234
def test_contains(self): <NEW_LINE> <INDENT> t = StringTrie() <NEW_LINE> test_keys = [u'', u'f', u'foo', u'foobar', u'baz'] <NEW_LINE> for key in test_keys: <NEW_LINE> <INDENT> t[key] = key <NEW_LINE> <DEDENT> for key in test_keys: <NEW_LINE> <INDENT> self.assertTrue(key in t) <NEW_LINE> <DEDENT> for key in [u'x', u'fb', u'foob', u'fooba', u'bazz']: <NEW_LINE> <INDENT> self.assertFalse(key in t)
Test the contains operator.
625941b260cbc95b062c62d9
def new(self, notebody='', category=''): <NEW_LINE> <INDENT> cid = self.find_category(name=category) <NEW_LINE> if category and not cid: <NEW_LINE> <INDENT> cid = str(uuid.uuid4()) <NEW_LINE> self.categories[cid]={'name':category} <NEW_LINE> <DEDENT> note = Note(gui_class=self.gui_class, noteset=self, category=cid) <NEW_LINE> note.body=notebody <NEW_LINE> note.set_locked_state(not not notebody) <NEW_LINE> self.notes.append(note) <NEW_LINE> self.gui_class and note.show() <NEW_LINE> return note
Creates a new note and adds it to the note set
625941b29b70327d1c4e0b66
def _extract_raw_predictions(self, predictions: Optional[pd.DataFrame] = None) -> pd.DataFrame: <NEW_LINE> <INDENT> if predictions is None: <NEW_LINE> <INDENT> predictions = self.predict <NEW_LINE> <DEDENT> df = predictions.merge(self.avgint, on=['avgint_id']) <NEW_LINE> df = df.merge(self.integrand, on=['integrand_id']) <NEW_LINE> df['rate'] = df['integrand_name'].map( PRIMARY_INTEGRANDS_TO_RATES ) <NEW_LINE> if not [c for c in df.columns if 'location_id' in c]: <NEW_LINE> <INDENT> df = df.merge(self.node, on=['node_id']) <NEW_LINE> <DEDENT> if not [c for c in df.columns if 'sex_id' in c]: <NEW_LINE> <INDENT> sex_cov = self.covariate.loc[self.covariate.c_covariate_name.isin(['sex', 's_sex']), 'covariate_name'].squeeze() <NEW_LINE> sex_id_map = {v:SEX_NAME_TO_ID[k] for k,v in StudyCovConstants.SEX_COV_VALUE_MAP.items()} <NEW_LINE> df['sex_id'] = df[sex_cov].replace(sex_id_map) <NEW_LINE> <DEDENT> return df
Grab raw predictions from the predict table. Or, optionally merge some predictions on the avgint table and integrand table. This is a work-around when we've wanted to use a different prediction data frame (from using multithreading) because dismod_at does not allow you to set the predict table.
625941b28c3a87329515814e
def get_info(): <NEW_LINE> <INDENT> temp_info = {} <NEW_LINE> with open('info.txt','r',encoding='utf-8') as f: <NEW_LINE> <INDENT> line = f.readline() <NEW_LINE> while line: <NEW_LINE> <INDENT> info = line.rstrip().split(' ') <NEW_LINE> temp_info[info[0]] = int(info[1]) <NEW_LINE> line = f.readline() <NEW_LINE> <DEDENT> <DEDENT> return temp_info
获取员工数据 :return: 员工数据字典
625941b2091ae35668666cfa
def target_update(self): <NEW_LINE> <INDENT> self.target_net.load_state_dict(self.policy_net.state_dict())
Update the target network run this method regularly and makes learning stable.
625941b22ae34c7f2600cec8
def output_sdss_dir(catl_kind='data', catl_type='mr', sample_s='19', Program_Msg=fd.Program_Msg(__file__)): <NEW_LINE> <INDENT> outdir = gp.get_plot_path()+'SDSS/'+catl_kind+'/'+catl_type+'/' <NEW_LINE> outdir += 'Mr'+sample_s <NEW_LINE> fd.Path_Folder(outdir) <NEW_LINE> print('{0} `outdir`: {1}'.format(Program_Msg, outdir)) <NEW_LINE> return outdir
Output for sdss directorry, either for `data` or `mocks` Parameters ---------- catl_kind: string, optional (default = 'data') type of catalogue to use Options: - 'data': catalogues comes from SDSS 'real' catalog - 'mocks': catalogue(s) come from SDSS 'mock' catalogues catl_type: string, optional (default = 'mr') type of catalogue to use. It shows which abundance matching method was used for the CLF when assigning halo masses. Options: - 'mr' : Uses r-band abs. luminosities - 'mstar': Uses stellar masses sample_s: string, optional (default = '19') volume-limited sample to use. Options: - '19': Uses the -19 volume-limited 'Consuelo' sample - '20': Uses the -20 volume-limited 'Esmeralda' sample - '21': Uses the -21 volume-limited 'Carmen' sample Returns ---------- outdir: string path to the output directory
625941b23346ee7daa2b2af8
def generateBoogieVamp(blRealization=None, numRepeats=5): <NEW_LINE> <INDENT> from music21 import converter <NEW_LINE> from music21 import stream <NEW_LINE> from music21 import interval <NEW_LINE> if blRealization is None: <NEW_LINE> <INDENT> bluesLine = twelveBarBlues() <NEW_LINE> fbRules = rules.Rules() <NEW_LINE> fbRules.partMovementLimits = [(1, 4), (2, 12), (3, 12)] <NEW_LINE> fbRules.forbidVoiceOverlap = False <NEW_LINE> blRealization = bluesLine.realize(fbRules) <NEW_LINE> <DEDENT> sampleScore = blRealization.generateRandomRealizations(numRepeats) <NEW_LINE> boogieBassLine = converter.parse("tinynotation: BB-8. D16 F8. G16 A-8. G16 F8. D16", makeNotation=False) <NEW_LINE> newBassLine = stream.Part() <NEW_LINE> newBassLine.append(sampleScore[1][0]) <NEW_LINE> newBassLine.append(sampleScore[1][1]) <NEW_LINE> for n in sampleScore[1].notes: <NEW_LINE> <INDENT> i = interval.notesToInterval(boogieBassLine[0], n) <NEW_LINE> tp = boogieBassLine.transpose(i) <NEW_LINE> for lyr in n.lyrics: <NEW_LINE> <INDENT> tp.notes.first().addLyric(lyr.text) <NEW_LINE> <DEDENT> for m in tp.notes: <NEW_LINE> <INDENT> newBassLine.append(m) <NEW_LINE> <DEDENT> <DEDENT> newScore = stream.Score() <NEW_LINE> newScore.insert(0, sampleScore[0]) <NEW_LINE> newScore.insert(newBassLine) <NEW_LINE> return newScore
Turns whole notes in twelve bar blues bass line to blues boogie woogie bass line. Takes in numRepeats, which is the number of times to repeat the bass line. Also, takes in a realization of :meth:`~music21.figuredBass.examples.twelveBarBlues`. If none is provided, a default realization with :attr:`~music21.figuredBass.rules.Rules.forbidVoiceOverlap` set to False and :attr:`~music21.figuredBass.rules.Rules.partMovementLimits` set to [(1, 4), (2, 12), (3, 12)] is used. >>> from music21.figuredBass import examples >>> #_DOCS_SHOW examples.generateBoogieVamp(numRepeats=1).show() .. image:: images/figuredBass/fbExamples_boogieVamp.* :width: 700
625941b2de87d2750b85fb1e
def add_timeseries(self, in_epoch_name, timeseries): <NEW_LINE> <INDENT> epoch_ts = {} <NEW_LINE> if isinstance(timeseries, nwbts.TimeSeries): <NEW_LINE> <INDENT> timeseries_path = timeseries.full_path() <NEW_LINE> <DEDENT> elif isinstance(timeseries, str): <NEW_LINE> <INDENT> timeseries_path = timeseries <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.nwb.fatal_error("Don't recognize timeseries parameter as time series or path") <NEW_LINE> <DEDENT> if not timeseries_path.startswith('/'): <NEW_LINE> <INDENT> timeseries_path = '/' + timeseries_path <NEW_LINE> <DEDENT> epoch_ts["timeseries"] = timeseries_path <NEW_LINE> if timeseries_path not in self.nwb.file_pointer: <NEW_LINE> <INDENT> self.nwb.fatal_error("Time series '%s' not found" % timeseries_path) <NEW_LINE> <DEDENT> ts = self.nwb.file_pointer[timeseries_path] <NEW_LINE> if "timestamps" in ts: <NEW_LINE> <INDENT> t = ts["timestamps"].value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> n = ts["num_samples"].value <NEW_LINE> t0 = ts["starting_time"].value <NEW_LINE> rate = ts["starting_time"].attrs["rate"] <NEW_LINE> t = t0 + np.arange(n) / rate <NEW_LINE> <DEDENT> i0, i1 = self.find_ts_overlap(t) <NEW_LINE> if i0 is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> epoch_ts["start_idx"] = i0 <NEW_LINE> epoch_ts["count"] = i1 - i0 + 1 <NEW_LINE> self.timeseries_dict[in_epoch_name] = epoch_ts <NEW_LINE> label = "'" + in_epoch_name + "' is '" + timeseries_path + "'" <NEW_LINE> self.spec["_attributes"]["links"]["_value"].append(label) <NEW_LINE> self.spec["_attributes"]["links"]["_value"].sort()
Associates time series with epoch. This will create a link to the specified time series within the epoch and will calculate its overlaps. Arguments: *in_epoch_name* (text) Name that time series will use in the epoch (this can be different than the actual time series name) *timeseries* (text or TimeSeries object) Full hdf5 path to time series that's being added, or the TimeSeries object itself Returns: *nothing*
625941b2925a0f43d2549c03
def simon(message, **kwargs): <NEW_LINE> <INDENT> warnings.warn("SIMON says: {0}".format(message), **kwargs)
The Statistical Interpretation MONitor. A warning system designed to always remind the user that Simon is watching him/her. Parameters ---------- message : string The message that is thrown kwargs : dict The rest of the arguments that are passed to ``warnings.warn``
625941b21d351010ab8558b7
def p_paren(p): <NEW_LINE> <INDENT> p[0] = p[2]
condition : LPAREN condition RPAREN
625941b2462c4b4f79d1d461
def run_test(self): <NEW_LINE> <INDENT> self.log.info("Compare responses from gewalletinfo RPC and `educacoin-cli getwalletinfo`") <NEW_LINE> cli_response = self.nodes[0].cli.getwalletinfo() <NEW_LINE> rpc_response = self.nodes[0].getwalletinfo() <NEW_LINE> assert_equal(cli_response, rpc_response) <NEW_LINE> self.log.info("Compare responses from getblockchaininfo RPC and `educacoin-cli getblockchaininfo`") <NEW_LINE> cli_response = self.nodes[0].cli.getblockchaininfo() <NEW_LINE> rpc_response = self.nodes[0].getblockchaininfo() <NEW_LINE> assert_equal(cli_response, rpc_response) <NEW_LINE> user, password = get_auth_cookie(self.nodes[0].datadir) <NEW_LINE> self.log.info("Test -stdinrpcpass option") <NEW_LINE> assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount()) <NEW_LINE> assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo) <NEW_LINE> self.log.info("Test -stdin and -stdinrpcpass") <NEW_LINE> assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo()) <NEW_LINE> assert_raises_process_error(1, "incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo) <NEW_LINE> self.log.info("Make sure that -getinfo with arguments fails") <NEW_LINE> assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help) <NEW_LINE> self.log.info("Compare responses from `educacoin-cli -getinfo` and the RPCs data is retrieved from.") <NEW_LINE> cli_get_info = self.nodes[0].cli('-getinfo').send_cli() <NEW_LINE> wallet_info = self.nodes[0].getwalletinfo() <NEW_LINE> network_info = self.nodes[0].getnetworkinfo() <NEW_LINE> blockchain_info = self.nodes[0].getblockchaininfo() <NEW_LINE> assert_equal(cli_get_info['version'], network_info['version']) <NEW_LINE> assert_equal(cli_get_info['protocolversion'], network_info['protocolversion']) <NEW_LINE> assert_equal(cli_get_info['walletversion'], wallet_info['walletversion']) <NEW_LINE> assert_equal(cli_get_info['balance'], wallet_info['balance']) <NEW_LINE> assert_equal(cli_get_info['blocks'], blockchain_info['blocks']) <NEW_LINE> assert_equal(cli_get_info['timeoffset'], network_info['timeoffset']) <NEW_LINE> assert_equal(cli_get_info['connections'], network_info['connections']) <NEW_LINE> assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy']) <NEW_LINE> assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty']) <NEW_LINE> assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test") <NEW_LINE> assert_equal(cli_get_info['balance'], wallet_info['balance']) <NEW_LINE> assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest']) <NEW_LINE> assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize']) <NEW_LINE> assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee']) <NEW_LINE> assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
Main test logic
625941b207d97122c417861b