code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __init__(self, data): <NEW_LINE> <INDENT> self.data = data <NEW_LINE> self.named = [x for x in self.data][0]
Initialization a new instance of the class
625941b48c3a87329515818e
def save(self): <NEW_LINE> <INDENT> fields = [ 'level' , 'xp', 'likes' ] <NEW_LINE> values = [ str(self.level), str(self.xp), str(self.likes) ] <NEW_LINE> Parser.updateSave(CONST.SAVE_FILE, 'save', fields, values)
Saving the game - writing the state to file savegame.cfg
625941b4b545ff76a8913bf1
def train(self, input_file): <NEW_LINE> <INDENT> f = open(input_file, "r") <NEW_LINE> for document in f: <NEW_LINE> <INDENT> topic, words = self.vectorize(document) <NEW_LINE> self.topic_counter[topic] += 1 <NEW_LINE> for word in words: <NEW_LINE> <INDENT> self.corpus[topic][word] += 1 <NEW_LINE> self.vocab.append(word) <NEW_LINE> <DEDENT> <DEDENT> self.vocab = list(set(self.vocab)) <NEW_LINE> f.close() <NEW_LINE> for word in self.vocab: <NEW_LINE> <INDENT> for topic in self.corpus: <NEW_LINE> <INDENT> smoothed = (self.feat_delta + self.corpus[topic][word]) / (2 * self.feat_delta + self.topic_counter[topic]) <NEW_LINE> self.corpus[topic][word] = smoothed <NEW_LINE> self.class_qsum[topic] += log(1 - smoothed) <NEW_LINE> <DEDENT> <DEDENT> for topic, count in self.topic_counter.items(): <NEW_LINE> <INDENT> self.class_probs[topic] = (self.class_delta + count) / (2 * self.class_delta + sum(self.topic_counter.values())) <NEW_LINE> <DEDENT> for idx, topic in enumerate(self.topic_counter): <NEW_LINE> <INDENT> self.topic_dict[topic] = idx <NEW_LINE> <DEDENT> return self.corpus
Train the model by calculating prior probabilities using a binary feature method :param input_file: :return:
625941b463f4b57ef0000ef4
def get_applicable_value_count(self, row, column): <NEW_LINE> <INDENT> return self._candidates[row][column].get_applicable_value_count()
Returns the number of candidate values applicable to the cell with the given coordinates. Args: row (int): The row coordinate of the cell for which the number of applicable candidate values is to be returned. Zero corresponds to the first row, eight corresponds to the last row. column (int): The column coordinate of the cell for which the number of candidate values is to be returned. Zero corresponds to the first column, eight corresponds to the last column.
625941b4d10714528d5ffaae
def _gap_init_(self): <NEW_LINE> <INDENT> return 'CyclotomicField(%s)'%self.__n
Return a string that provides a representation of ``self`` in GAP. TESTS:: sage: K = CyclotomicField(8) sage: gap(K) # indirect doctest CF(8) sage: gap(K.0) E(8) sage: K(gap(K.0^5)); K(gap(K.0^5))==K.0^5 -zeta8 True The following was the motivating example to introduce a genuine representation of cyclotomic fields in the GAP interface -- see :trac:`5618`. :: sage: H = AlternatingGroup(4) sage: g = H.list()[1] sage: K = H.subgroup([g]) sage: z = CyclotomicField(3).an_element(); z zeta3 sage: c = K.character([1,z,z**2]); c Character of Subgroup of (Alternating group of order 4!/2 as a permutation group) generated by [(1,2,3)] sage: c(g^2); z^2 -zeta3 - 1 -zeta3 - 1
625941b4dc8b845886cb5304
def test_linearization(self): <NEW_LINE> <INDENT> x = numpy.asarray(numpy.asarray([0,1,2,3])) <NEW_LINE> y = numpy.asarray(numpy.asarray([1,1,1,1])) <NEW_LINE> g = invariant.Guinier() <NEW_LINE> data_in = Data1D(x=x, y=y) <NEW_LINE> data_out = g.linearize_data(data_in) <NEW_LINE> x_out, y_out, dy_out = data_out.x, data_out.y, data_out.dy <NEW_LINE> self.assertEqual(len(x_out), 3) <NEW_LINE> self.assertEqual(len(y_out), 3) <NEW_LINE> self.assertEqual(len(dy_out), 3)
Check that the linearization process filters out points that can't be transformed
625941b40fa83653e4656d8f
def configure(self, sysroot): <NEW_LINE> <INDENT> sysroot.verify_source(self.source)
Complete the configuration of the component.
625941b438b623060ff0abc5
@pytest.mark.parametrize( "years, interests", [([0, 1], [2]), ([1, 4, 2, 3], [1, 9, 8, 19])] ) <NEW_LINE> def test_get_interests_received_for_trustline_positive_balance( ethindex_db_for_currency_network_with_trustlines_and_interests, currency_network_with_trustlines_and_interests_session, web3, chain, accounts, years, interests, wait_for_ethindex_to_sync, ): <NEW_LINE> <INDENT> currency_network = currency_network_with_trustlines_and_interests_session <NEW_LINE> path = [accounts[0], accounts[1], accounts[2], accounts[3]] <NEW_LINE> accrue_interests(currency_network, web3, chain, path, years) <NEW_LINE> wait_for_ethindex_to_sync() <NEW_LINE> accrued_interests = EventsInformationFetcher( ethindex_db_for_currency_network_with_trustlines_and_interests ).get_list_of_paid_interests_for_trustline( currency_network.address, accounts[2], accounts[1] ) <NEW_LINE> list_of_interests = [ accrued_interest.value for accrued_interest in accrued_interests ] <NEW_LINE> assert list_of_interests == interests <NEW_LINE> for accrued_interest in accrued_interests: <NEW_LINE> <INDENT> assert accrued_interest.interest_rate == 1000
Test with a transfer A -> B the interests viewed from B
625941b491af0d3eaac9b7e3
def sketch(self, datasymbol= 'o', derivs= 0, GPcolor= 'blue', nostds= 2): <NEW_LINE> <INDENT> x, y, xnew= self.x, self.y, self.xnew <NEW_LINE> if derivs == 0: <NEW_LINE> <INDENT> f= self.f <NEW_LINE> sd= np.sqrt(self.fvar) <NEW_LINE> if datasymbol: plt.plot(x, y, 'r' + datasymbol) <NEW_LINE> <DEDENT> elif derivs == 1: <NEW_LINE> <INDENT> f= self.df <NEW_LINE> sd= np.sqrt(self.dfvar) <NEW_LINE> <DEDENT> elif derivs == 2: <NEW_LINE> <INDENT> f= self.ddf <NEW_LINE> sd= np.sqrt(self.ddfvar) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('sketch: error in derivs') <NEW_LINE> <DEDENT> plt.plot(xnew, f, color= GPcolor) <NEW_LINE> plt.fill_between(xnew, f-nostds*sd, f+nostds*sd, facecolor= GPcolor, alpha=0.2)
Plots data with mean prediction plus band of twice the standard deviation. Arguments -- datasymbol: the symbol used to mark the data points (if False do not plot data points) derivs: if 0, plot data and mean prediction; if 1, plot first derivative with respect to x; if 2, plot second derivative GPcolor: color to draw mean and standard deviation of Gaussian process nostds: number of standard deviations to use as errorbars
625941b432920d7e50b27f9b
def Exercise_ba2b_FindMedianString(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser(description=Exercise_ba2b_FindMedianString.__doc__) <NEW_LINE> parser.add_argument('k', type=int, help="The length of patterns to look for") <NEW_LINE> parser.add_argument('dna', type=str, nargs='+', help="A list of DNA sequences to search in") <NEW_LINE> args = parser.parse_args() <NEW_LINE> k = args.k <NEW_LINE> dna = args.dna <NEW_LINE> result = utils.FindMedianString(k, dna) <NEW_LINE> print ('The median string is:', result)
Find a string that minimizes the hamming distance between itself and any pattern in a list of DNA sequences.
625941b48e05c05ec3eea141
def getPssn(self, iterNum): <NEW_LINE> <INDENT> reMatchStr = r"\w*PSSN.txt" <NEW_LINE> matchFilePath = self._getMatchFilePathInIter(reMatchStr, iterNum) <NEW_LINE> data = np.loadtxt(matchFilePath) <NEW_LINE> pssn = data[0, 0:31] <NEW_LINE> return pssn
Get the PSSN in specific iteration number. PSSN: Normalized point source sensitivity. Parameters ---------- iterNum : int Iteration number. Returns ------- ndarray PSSN data.
625941b476e4537e8c351447
def get(self, request, *args, **kwargs): <NEW_LINE> <INDENT> return get_object_or_404(models.Brother, slug=self.request.user.username)
Load the member
625941b45510c4643540f1ca
def test__api_stringsearchchoices_bad_search(self): <NEW_LINE> <INDENT> url = '/__api/stringsearchchoices/volumeid.json?fredethel=2&reqno=123' <NEW_LINE> self._run_status_equal(url, 404, HTTP404_SEARCH_PARAMS_INVALID( '/__api/stringsearchchoices/volumeid.json'))
[test_search_api.py] /api/stringsearchchoices: bad search
625941b4d6c5a10208143e17
def test_coins(self): <NEW_LINE> <INDENT> for value, denoms, coins in self.known_coins: <NEW_LINE> <INDENT> result = dynamic_practice.min_coins(value, denoms) <NEW_LINE> self.assertEqual(result, coins)
Test min-change finder
625941b455399d3f05588484
def __call__(self, query, key, value=None, mask=None): <NEW_LINE> <INDENT> return super(AttentionCell, self).__call__(query, key, value, mask)
Compute the attention. Parameters ---------- query : Symbol or NDArray Query vector. Shape (batch_size, query_length, query_dim) key : Symbol or NDArray Key of the memory. Shape (batch_size, memory_length, key_dim) value : Symbol or NDArray or None, default None Value of the memory. If set to None, the value will be set as the key. Shape (batch_size, memory_length, value_dim) mask : Symbol or NDArray or None, default None Mask of the memory slots. Shape (batch_size, query_length, memory_length) Only contains 0 or 1 where 0 means that the memory slot will not be used. If set to None. No mask will be used. Returns ------- context_vec : Symbol or NDArray Shape (batch_size, query_length, context_vec_dim) att_weights : Symbol or NDArray Attention weights. Shape (batch_size, query_length, memory_length)
625941b482261d6c526ab273
def validate(self): <NEW_LINE> <INDENT> from .files import File <NEW_LINE> assert ( self.source_id is not None ), "Assumption Failed: Node must have a source_id" <NEW_LINE> assert isinstance( self.title, str ), "Assumption Failed: Node title is not a string" <NEW_LINE> assert ( len(self.title.strip()) > 0 ), "Assumption Failed: Node title cannot be empty" <NEW_LINE> assert ( isinstance(self.description, str) or self.description is None ), "Assumption Failed: Node description is not a string" <NEW_LINE> assert isinstance( self.children, list ), "Assumption Failed: Node children is not a list" <NEW_LINE> for f in self.files: <NEW_LINE> <INDENT> assert isinstance(f, File), "Assumption Failed: files must be file class" <NEW_LINE> f.validate() <NEW_LINE> <DEDENT> source_ids = [c.source_id for c in self.children] <NEW_LINE> duplicates = set([x for x in source_ids if source_ids.count(x) > 1]) <NEW_LINE> assert ( len(duplicates) == 0 ), "Assumption Failed: Node must have unique source id among siblings ({} appears multiple times)".format( duplicates ) <NEW_LINE> return True
validate: Makes sure node is valid Args: None Returns: boolean indicating if node is valid
625941b415baa723493c3d42
def get_settings( parent ): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> assert(isinstance(parent, h5py._hl.group.Group)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> raise TypeError('Something wrong with the input.') <NEW_LINE> <DEDENT> if not parent.attrs['type'] == np.string_(cur_set_vers): <NEW_LINE> <INDENT> print('Don\'t know the format of these settings.') <NEW_LINE> return None <NEW_LINE> <DEDENT> settings = {} <NEW_LINE> settings['lmax_r'] = parent.attrs['lmax_r'] <NEW_LINE> settings['lmax_thresh'] = parent.attrs['lmax_thresh'] <NEW_LINE> settings['lmax_cinit'] = parent.attrs['lmax_cinit'] <NEW_LINE> settings['lmax_range'] = parent.attrs['lmax_range'] <NEW_LINE> settings['ns'] = parent.attrs['ns'] <NEW_LINE> settings['fit_rrange'] = parent.attrs['fit_rrange'] <NEW_LINE> settings['back_xs'] = parent.attrs['back_xs'] <NEW_LINE> settings['back_xswidth'] = parent.attrs['back_xswidth'] <NEW_LINE> settings['back_init'] = parent.attrs['back_init'] <NEW_LINE> settings['fit_init'] = parent.attrs['fit_init'] <NEW_LINE> if isinstance(parent.attrs['fit_funcs'], np.ndarray): <NEW_LINE> <INDENT> in_funcs = parent.attrs['fit_funcs'][0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> in_funcs = parent.attrs['fit_funcs'] <NEW_LINE> <DEDENT> in_funcs = in_funcs.split(b',') <NEW_LINE> out_funcs = [] <NEW_LINE> for i in range(len(in_funcs)): <NEW_LINE> <INDENT> out_funcs.append(in_funcs[i].decode('utf-8').strip()) <NEW_LINE> <DEDENT> settings['fit_funcs'] = tuple(out_funcs) <NEW_LINE> if 'plt_imgminmax' in parent.attrs: <NEW_LINE> <INDENT> settings['plt_imgminmax'] = parent.attrs['plt_imgminmax'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings['plt_imgminmax'] = None <NEW_LINE> <DEDENT> if 'rad_rmax' in parent.attrs: <NEW_LINE> <INDENT> settings['rad_rmax'] = parent.attrs['rad_rmax'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings['rad_rmax'] = None <NEW_LINE> <DEDENT> if 'rad_dr' in parent.attrs: <NEW_LINE> <INDENT> settings['rad_dr'] = parent.attrs['rad_dr'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings['rad_dr'] = None <NEW_LINE> <DEDENT> if 'rad_sigma' in parent.attrs: <NEW_LINE> <INDENT> settings['rad_sigma'] = parent.attrs['rad_sigma'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings['rad_sigma'] = None <NEW_LINE> <DEDENT> if 'mask' in parent: <NEW_LINE> <INDENT> settings['mask'] = np.copy(parent['mask']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings['mask'] = None <NEW_LINE> <DEDENT> if 'fit_maxfev' in parent.attrs: <NEW_LINE> <INDENT> settings['fit_maxfev'] = parent.attrs['fit_maxfev'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings['fit_maxfev'] = None <NEW_LINE> <DEDENT> return settings
Get settings for radial profile evaluation. Parameters ---------- parent : h5py.Group Input group. Returns ------- : dict Settings read from parent.
625941b4be7bc26dc91cd3d7
def gen_doc_embedding(model, text): <NEW_LINE> <INDENT> words = [x for x in jieba.lcut(text) if x not in stopwords] <NEW_LINE> vect_list = [] <NEW_LINE> for w in words: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> vect_list.append(model.wv[w]) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> if len(vect_list) <= 0: <NEW_LINE> <INDENT> vect_list = [_zero_vec] <NEW_LINE> <DEDENT> vect_list = numpy.array(vect_list) <NEW_LINE> vect = vect_list.sum(axis=0) <NEW_LINE> vec = vect / numpy.sqrt((vect ** 2).sum()) <NEW_LINE> vec = vec / linalg.norm(vec) <NEW_LINE> return vec
:param model: :param text: :return: 向量 [float] 注意它不能保证是float32 因为float32在python中是不存在的 当前实际值是 numpy.array(dtype=float32)
625941b42c8b7c6e89b3559b
def test_incident_assignment_policy_no_default(self): <NEW_LINE> <INDENT> context = {'assigned_policy_id': 'policy_id_to_assign'} <NEW_LINE> assigned_key, assigned_value = self._dispatcher._incident_assignment(context) <NEW_LINE> assert_equal(assigned_key, 'escalation_policy') <NEW_LINE> assert_equal(assigned_value['id'], 'policy_id_to_assign') <NEW_LINE> assert_equal(assigned_value['type'], 'escalation_policy_reference')
PagerDutyIncidentOutput - Incident Assignment Policy (No Default)
625941b4ad47b63b2c509d5c
def Input(self, number, controled=None): <NEW_LINE> <INDENT> if self.selected == None: return <NEW_LINE> if controled == -1: <NEW_LINE> <INDENT> if self.elementList[self.selected].number.preNumber == None or self.elementList[self.selected].number.preNumber == 0: <NEW_LINE> <INDENT> self.elementList[self.selected].number.preNumber = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.elementList[self.selected].number.preNumber = self.elementList[self.selected].number.preNumber // 10 <NEW_LINE> <DEDENT> <DEDENT> elif controled == 1: <NEW_LINE> <INDENT> if self.elementList[self.selected].number.preNumber == None or self.elementList[self.selected].number.preNumber == 0: <NEW_LINE> <INDENT> self.elementList[self.selected].number.preNumber = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.elementList[self.selected].value = self.elementList[self.selected].number.preNumber <NEW_LINE> self.elementList[self.selected].number.number = self.elementList[self.selected].number.preNumber <NEW_LINE> self.elementList[self.selected].number.preNumber = None <NEW_LINE> self.elementList[self.selected].number.UpdateNumberRect(self.selected, self.length) <NEW_LINE> <DEDENT> self.Deselect() <NEW_LINE> <DEDENT> elif controled == -2: <NEW_LINE> <INDENT> self.elementList[self.selected].number.preNumber = None <NEW_LINE> self.Deselect() <NEW_LINE> <DEDENT> elif controled == 2: <NEW_LINE> <INDENT> if self.elementList[self.selected].number.preNumber == None or self.elementList[self.selected].number.preNumber == 0: <NEW_LINE> <INDENT> self.elementList[self.selected].number.preNumber = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.elementList[self.selected].value = self.elementList[self.selected].number.preNumber <NEW_LINE> self.elementList[self.selected].number.number = self.elementList[self.selected].number.preNumber <NEW_LINE> self.elementList[self.selected].number.preNumber = None <NEW_LINE> self.elementList[self.selected].number.UpdateNumberRect(self.selected, self.length) <NEW_LINE> <DEDENT> if self.selected < self.length - 1: <NEW_LINE> <INDENT> self.Select(self.selected + 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.Deselect() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if self.elementList[self.selected].number.preNumber == None: <NEW_LINE> <INDENT> self.elementList[self.selected].number.preNumber = 0 <NEW_LINE> <DEDENT> self.elementList[self.selected].number.preNumber = self.elementList[self.selected].number.preNumber * 10 + number
Control input options
625941b45fdd1c0f98dc0002
def get_dataset_names(base_url='data'): <NEW_LINE> <INDENT> return [x for x in os.listdir(base_url) if os.path.exists(os.path.join(base_url, x, CSV_FILE_NAME))]
gets list of datasets in base_url
625941b4ec188e330fd5a578
def save(self, path, fileformat=None): <NEW_LINE> <INDENT> if not self.is_loaded(): <NEW_LINE> <INDENT> raise AlbumArtError('AlbumArt not yet initialized.') <NEW_LINE> <DEDENT> if fileformat is None: <NEW_LINE> <INDENT> fileformat = self.get_fileformat() <NEW_LINE> <DEDENT> if os.path.isdir(path): <NEW_LINE> <INDENT> path = os.path.join(path, DEFAULT_ARTWORK_FILENAME) <NEW_LINE> <DEDENT> if os.path.isfile(path): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> os.unlink(path) <NEW_LINE> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> raise AlbumArtError('Error removing existing file {}: {}'.format( path, e, )) <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> self.__image.save(path, fileformat) <NEW_LINE> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> raise AlbumArtError('Error saving {}: {}'.format( path, e, ))
Saves the image data to given target file. If target filename exists, it is removed before saving.
625941b4d7e4931a7ee9dcec
def flight_message_handler(context): <NEW_LINE> <INDENT> flight_string_list = list() <NEW_LINE> flights = context["flights"] <NEW_LINE> text_to_send = "Выберите понравившийся рейс и введите его цифру:" <NEW_LINE> for num, flight in enumerate(flights): <NEW_LINE> <INDENT> date_departure = date_convert(flight['departure']) <NEW_LINE> date_arrival = date_convert(flight['arrival']) <NEW_LINE> string_flight = f"{context['from_city']} ({flight['airport_from']}) {date_departure} ----> " f"{context['to_city']} ({flight['airport_to']}) {date_arrival}" <NEW_LINE> string_flight_iter = f"\n{num + 1}. " + string_flight <NEW_LINE> text_to_send += string_flight_iter <NEW_LINE> flight_string_list.append(string_flight) <NEW_LINE> <DEDENT> context["flights_string"] = flight_string_list <NEW_LINE> return text_to_send
Формируем сообщение с имеющимися рейсами
625941b421bff66bcd684726
def test_magn_grad_flns_2(self): <NEW_LINE> <INDENT> fx = np.array([ [1, 2, 3], [1, 2, 3], [1, 2, 3] ]) <NEW_LINE> fy = np.array([ [0, 0, 0], [0, 0, 0], [0, 0, 0] ]) <NEW_LINE> oriens_d = orients_d(fx, fy) <NEW_LINE> assert np.allclose( oriens_d, 90.0) <NEW_LINE> mag = magnitude(fx, fy) <NEW_LINE> assert np.allclose( mag, np.sqrt(fx**2 + fy**2)) <NEW_LINE> mgflwlns = magn_grad_along_flowlines( fld_x=fx, fld_y=fy, cell_size_x=10, cell_size_y=10) <NEW_LINE> assert np.allclose( mgflwlns, 0.1)
Test the gradients calculations. :return:
625941b4b830903b967e96e9
def canonicalize(number): <NEW_LINE> <INDENT> if 'number_country' in CONF: <NEW_LINE> <INDENT> number_country = CONF['number_country'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> number_country = 'US' <NEW_LINE> <DEDENT> if number_country == "PH" and number.startswith("63"): <NEW_LINE> <INDENT> number = number[2:] <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> canon = convert_to_e164(number, country=number_country) <NEW_LINE> <DEDENT> except phonenumbers.phonenumberutil.NumberParseException: <NEW_LINE> <INDENT> canon = number <NEW_LINE> <DEDENT> if canon[0] == '+': <NEW_LINE> <INDENT> canon = canon[1:] <NEW_LINE> <DEDENT> return str(canon)
Force a dialed number to have a country code. If the number arg has a leading + sign, the number_country's prefix will not be added to the front.
625941b473bcbd0ca4b2be4d
def decode_oct_str(encoded): <NEW_LINE> <INDENT> decoded = [] <NEW_LINE> l = len(encoded) <NEW_LINE> i = 0 <NEW_LINE> while i < l: <NEW_LINE> <INDENT> c = encoded[i] <NEW_LINE> i += 1 <NEW_LINE> if c != '\\': <NEW_LINE> <INDENT> decoded.append(c) <NEW_LINE> continue <NEW_LINE> <DEDENT> if i + 3 > l: <NEW_LINE> <INDENT> reason = "Expected an octal value in a longer string: %s" % (encoded[i:],) <NEW_LINE> raise ValueError(reason) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> c = chr(int(encoded[i : i + 3], 8)) <NEW_LINE> decoded.append(c) <NEW_LINE> i += 3 <NEW_LINE> <DEDENT> <DEDENT> return ''.join(decoded)
Returns a decoded string from the kernel's encoding under /process/mounts. Raises ValueError on invalid input.
625941b4fff4ab517eb2f209
def __init__(self, end_time=None, log=None, numof_summarize=None, numof_tweets=None, process_time=None, query=None, start_time=None, tweets=None): <NEW_LINE> <INDENT> self._end_time = None <NEW_LINE> self._log = None <NEW_LINE> self._numof_summarize = None <NEW_LINE> self._numof_tweets = None <NEW_LINE> self._process_time = None <NEW_LINE> self._query = None <NEW_LINE> self._start_time = None <NEW_LINE> self._tweets = None <NEW_LINE> self.discriminator = None <NEW_LINE> self.end_time = end_time <NEW_LINE> self.log = log <NEW_LINE> self.numof_summarize = numof_summarize <NEW_LINE> self.numof_tweets = numof_tweets <NEW_LINE> self.process_time = process_time <NEW_LINE> self.query = query <NEW_LINE> self.start_time = start_time <NEW_LINE> self.tweets = tweets
TwitterSummarizeResponseEntity - a model defined in Swagger
625941b4de87d2750b85fb5e
def __init__(self, app, wallet, allowed_methods=None): <NEW_LINE> <INDENT> if allowed_methods is None: <NEW_LINE> <INDENT> self.allowed_methods = [ PaymentChannel(*flask_channel_adapter(app, PaymentServer(wallet))), OnChain(wallet), BitTransfer(wallet)]
Configure bitserv settings. Args: app (flask.Flask): A flask app to wrap payment handling around. wallet (two1.lib.wallet.Wallet): The merchant's wallet instance.
625941b416aa5153ce362249
def version() -> str: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with contextlib.closing(Database(':memory:')) as in_memory_db: <NEW_LINE> <INDENT> return in_memory_db.query("select sqlite_version()").run().value() <NEW_LINE> <DEDENT> <DEDENT> except KnownError as e: <NEW_LINE> <INDENT> return f'UNAVAILABLE ({e})'
Return the sqlite version string.
625941b4099cdd3c635f0a2d
@follows(mkdir("meme.dir")) <NEW_LINE> @transform(exportMotifDiscoverySequences, regex("(.*).discovery.fasta"), r"meme.dir/\1.meme") <NEW_LINE> def runMeme(infile, outfile): <NEW_LINE> <INDENT> track = P.snip(infile, ".discovery.fasta") <NEW_LINE> PipelineMotifs.runMEMEOnSequences(infile, outfile)
run MEME to find motifs. In order to increase the signal/noise ratio, MEME is not run on all intervals but only the top 10% of intervals (peakval) are used. Also, only the segment of 200 bp around the peak is used and not the complete interval. * Softmasked sequence is converted to hardmasked sequence to avoid the detection of spurious motifs. * Sequence is run through dustmasker
625941b4eab8aa0e5d26d92f
def classification_signature_def(examples, classes, scores): <NEW_LINE> <INDENT> if examples is None: <NEW_LINE> <INDENT> raise ValueError('Classification examples cannot be None.') <NEW_LINE> <DEDENT> if not isinstance(examples, ops.Tensor): <NEW_LINE> <INDENT> raise ValueError('Classification examples must be a string Tensor.') <NEW_LINE> <DEDENT> if classes is None and scores is None: <NEW_LINE> <INDENT> raise ValueError('Classification classes and scores cannot both be None.') <NEW_LINE> <DEDENT> input_tensor_info = utils.build_tensor_info(examples) <NEW_LINE> if input_tensor_info.dtype != types_pb2.DT_STRING: <NEW_LINE> <INDENT> raise ValueError('Classification examples must be a string Tensor.') <NEW_LINE> <DEDENT> signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info} <NEW_LINE> signature_outputs = {} <NEW_LINE> if classes is not None: <NEW_LINE> <INDENT> classes_tensor_info = utils.build_tensor_info(classes) <NEW_LINE> if classes_tensor_info.dtype != types_pb2.DT_STRING: <NEW_LINE> <INDENT> raise ValueError('Classification classes must be a string Tensor.') <NEW_LINE> <DEDENT> signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = ( classes_tensor_info) <NEW_LINE> <DEDENT> if scores is not None: <NEW_LINE> <INDENT> scores_tensor_info = utils.build_tensor_info(scores) <NEW_LINE> if scores_tensor_info.dtype != types_pb2.DT_FLOAT: <NEW_LINE> <INDENT> raise ValueError('Classification scores must be a float Tensor.') <NEW_LINE> <DEDENT> signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = ( scores_tensor_info) <NEW_LINE> <DEDENT> signature_def = build_signature_def( signature_inputs, signature_outputs, signature_constants.CLASSIFY_METHOD_NAME) <NEW_LINE> return signature_def
Creates classification signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Classify API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. classes: A string `Tensor`. Note that the ClassificationResponse message requires that class labels are strings, not integers or anything else. scores: a float `Tensor`. Returns: A classification-flavored signature_def. Raises: ValueError: If examples is `None`.
625941b431939e2706e4cc42
def start(self): <NEW_LINE> <INDENT> if self.lircthread is not None: <NEW_LINE> <INDENT> self.lircthread.start()
fire up IR read loop
625941b43346ee7daa2b2b39
def meter_step(self, sim_step): <NEW_LINE> <INDENT> if self.get_state() == 1: <NEW_LINE> <INDENT> self.red_increment(sim_step) <NEW_LINE> if self.get_detector() == 1 and self.RedTimer >= self.redTime: <NEW_LINE> <INDENT> self.change2green() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.green_increment(sim_step) <NEW_LINE> if self.GreenTimer >= self.greenTime: <NEW_LINE> <INDENT> self.change2red()
update the meter states for one simulation step
625941b4460517430c393f61
def get_word_score(input_word, word_length): <NEW_LINE> <INDENT> letter_values = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10} <NEW_LINE> word_score = 0 <NEW_LINE> for char in input_word: <NEW_LINE> <INDENT> word_score = word_score + letter_values[char] <NEW_LINE> <DEDENT> if len(input_word) == word_length: <NEW_LINE> <INDENT> return ((word_score)*len(input_word))+50 <NEW_LINE> <DEDENT> return (word_score)*len(input_word)
Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0
625941b4956e5f7376d70c4d
def validate(self): <NEW_LINE> <INDENT> super(cls, self).validate() <NEW_LINE> r = self.makeControl() <NEW_LINE> r.validate()
Validate the config object by constructing a control object and using a C++ validate() implementation.
625941b496565a6dacc8f4a6
def createdPimo(self): <NEW_LINE> <INDENT> return QUrl()
static QUrl Nepomuk.Vocabulary.PIMO.createdPimo()
625941b40c0af96317bb7fba
def setExtension(self, QWidget): <NEW_LINE> <INDENT> pass
QDialog.setExtension(QWidget)
625941b48a349b6b435e7f46
def create_level(metadata, name=None, dimension=None): <NEW_LINE> <INDENT> metadata = dict(expand_level_metadata(metadata)) <NEW_LINE> try: <NEW_LINE> <INDENT> name = name or metadata.pop("name") <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise ModelError("No name specified in level metadata") <NEW_LINE> <DEDENT> attributes = attribute_list(metadata.pop("attributes")) <NEW_LINE> for attribute in attributes: <NEW_LINE> <INDENT> attribute.dimension = dimension <NEW_LINE> <DEDENT> return Level(name=name, attributes=attributes, **metadata)
Create a level object from metadata. `name` can override level name in the metadata.
625941b4925a0f43d2549c44
def put_whitespace(in_str): <NEW_LINE> <INDENT> return in_str.replace(".", " ").replace("-", " ").replace("_", " ")
Replace periods (.), underscores (_) and hyphens (-) in `in_str` with spaces
625941b45166f23b2e1a4f2a
def invoke_compucell_mac(io_manager): <NEW_LINE> <INDENT> def batch_completed(): <NEW_LINE> <INDENT> required_dir = os.path.join(io_manager.screenshot_output_path, str(io_manager.number_of_runs() - 1)) <NEW_LINE> return os.path.isdir(required_dir) <NEW_LINE> <DEDENT> while not batch_completed(): <NEW_LINE> <INDENT> cmd = 'osascript {} & {}'.format(io_manager.apple_script_dir.replace(' ','\ '), io_manager.cc3d_command_dir) <NEW_LINE> shell_output = subprocess.check_output(cmd, shell=True) <NEW_LINE> print(shell_output)
BETA this function will restart CompuCell if it crashes before the completion of the batch runs. It handles crashes in a way that only works on mac.
625941b4dc8b845886cb5305
def test_hash_unicode(self): <NEW_LINE> <INDENT> text = "Příšerně žluťoučký kůň úpěl ďábelské ódy" <NEW_LINE> text_hash = calculate_hash(text) <NEW_LINE> self.assertEqual(text_hash, -4296353750398394478) <NEW_LINE> self.assertEqual(text_hash, calculate_hash(text))
Ensure hash works for unicode.
625941b4cb5e8a47e48b7882
def _flushHeaderMessage(self, buf, wout, wsz): <NEW_LINE> <INDENT> transform_data = StringIO() <NEW_LINE> num_transforms = len(self.__write_transforms) <NEW_LINE> for trans_id in self.__write_transforms: <NEW_LINE> <INDENT> transform_data.write(getVarint(trans_id)) <NEW_LINE> <DEDENT> if self.__identity: <NEW_LINE> <INDENT> self.__write_headers[self.ID_VERSION_HEADER] = self.ID_VERSION <NEW_LINE> self.__write_headers[self.IDENTITY_HEADER] = self.__identity <NEW_LINE> <DEDENT> info_data = StringIO() <NEW_LINE> _flush_info_headers(info_data, self.get_write_persistent_headers(), INFO.PERSISTENT) <NEW_LINE> _flush_info_headers(info_data, self.__write_headers, INFO.NORMAL) <NEW_LINE> header_data = StringIO() <NEW_LINE> header_data.write(getVarint(self.__proto_id)) <NEW_LINE> header_data.write(getVarint(num_transforms)) <NEW_LINE> header_size = transform_data.tell() + header_data.tell() + info_data.tell() <NEW_LINE> padding_size = 4 - (header_size % 4) <NEW_LINE> header_size = header_size + padding_size <NEW_LINE> wsz += header_size + 10 <NEW_LINE> if wsz > MAX_FRAME_SIZE: <NEW_LINE> <INDENT> buf.write(pack("!I", BIG_FRAME_MAGIC)) <NEW_LINE> buf.write(pack("!Q", wsz)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> buf.write(pack("!I", wsz)) <NEW_LINE> <DEDENT> buf.write(pack("!HH", HEADER_MAGIC >> 16, self.__flags)) <NEW_LINE> buf.write(pack("!I", self.seq_id)) <NEW_LINE> buf.write(pack("!H", header_size // 4)) <NEW_LINE> buf.write(header_data.getvalue()) <NEW_LINE> buf.write(transform_data.getvalue()) <NEW_LINE> buf.write(info_data.getvalue()) <NEW_LINE> for x in range(0, padding_size, 1): <NEW_LINE> <INDENT> buf.write(pack("!c", b'\0')) <NEW_LINE> <DEDENT> buf.write(wout)
Write a message for CLIENT_TYPE.HEADER @param buf(StringIO): Buffer to write message to @param wout(str): Payload @param wsz(int): Payload length
625941b415baa723493c3d43
def on_BTNAñadirComanda_clicked(self, witget, data=None): <NEW_LINE> <INDENT> idmesa = self.CMBMesasServicios.get_active() <NEW_LINE> nombreServicio = self.CMBComidas.get_active_text() <NEW_LINE> cantidad = self.etCantidadComida.get_text() <NEW_LINE> nombreMesa = self.CMBMesasServicios.get_active_text() <NEW_LINE> if nombreMesa == "Mesa 1": <NEW_LINE> <INDENT> idMesa = 1 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 2": <NEW_LINE> <INDENT> idMesa = 2 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 3": <NEW_LINE> <INDENT> idMesa = 3 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 4": <NEW_LINE> <INDENT> idMesa = 4 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 5": <NEW_LINE> <INDENT> idMesa = 5 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 6": <NEW_LINE> <INDENT> idMesa = 6 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 7": <NEW_LINE> <INDENT> idMesa = 7 <NEW_LINE> <DEDENT> if nombreMesa == "Mesa 8": <NEW_LINE> <INDENT> idMesa = 8 <NEW_LINE> <DEDENT> if idmesa == 0: <NEW_LINE> <INDENT> self.winErrores.show() <NEW_LINE> self.lblError.set_text("Selecciona una mesa.") <NEW_LINE> <DEDENT> if nombreServicio == None: <NEW_LINE> <INDENT> self.winErrores.show() <NEW_LINE> self.lblError.set_text("Selecciona un servicio.") <NEW_LINE> <DEDENT> if cantidad == "": <NEW_LINE> <INDENT> self.winErrores.show() <NEW_LINE> self.lblError.set_text("Selecciona una cantidad.") <NEW_LINE> <DEDENT> if idmesa != 0 and nombreServicio != None and cantidad != "": <NEW_LINE> <INDENT> BBDD.AñadirServicioFacturaLista(idmesa, nombreServicio, cantidad) <NEW_LINE> self.etCantidadComida.set_text("") <NEW_LINE> BBDD.CargaServiciosMesaNormal(self.ListaComandas, self.treeServicios, idMesa)
# Accion para añadir una comanda a una factura de una mesa seleccionada
625941b46aa9bd52df036b73
def unexplainable_Trends(pval, slope, test_region, change, sign): <NEW_LINE> <INDENT> if pval > 0.1: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> haz_slope = DATA_ATTR.loc[DATA_ATTR['Region'] == test_region, change].sum() <NEW_LINE> if (haz_slope < 0) and (slope > 0): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if (haz_slope > 0) and (slope < 0): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
Check for the presence of an unexplainable trend after adjustment for teleconnections Parameters ---------- pval : float p-value of residual trend slope : float slope of residual trend test_region : string region change : slope slope of trend in damages sign : float significance of slope in damages Returns ------- bool
625941b4bde94217f3682bcf
def test_lyrics(self): <NEW_LINE> <INDENT> result = requests.get(f"{URL}/lyrics+{Song_name}").json()["output"] <NEW_LINE> self.assertEqual(result, lyrics)
Checks: whether the `Lyrics API` gives the same lyrics of a song when ran
625941b426238365f5f0ec3a
def _simulate_light_curves_worker(args): <NEW_LINE> <INDENT> new_light_curve, indices, num = args <NEW_LINE> for ii in range(num): <NEW_LINE> <INDENT> mag = np.random.normal(np.median(new_light_curve.mag), new_light_curve.error) <NEW_LINE> new_light_curve.mag = mag <NEW_LINE> new_light_curve.error += np.random.normal(0., new_light_curve.error/20.) <NEW_LINE> one_computed_var_indices = compute_variability_indices(new_light_curve, indices, return_tuple=True) <NEW_LINE> try: <NEW_LINE> <INDENT> computed_var_indices = np.vstack((computed_var_indices, one_computed_var_indices)) <NEW_LINE> <DEDENT> except NameError: <NEW_LINE> <INDENT> computed_var_indices = one_computed_var_indices <NEW_LINE> <DEDENT> <DEDENT> return computed_var_indices
The function that takes the input light curve and simulates new light curves based on the MJD and error arrays. This function is meant to be the 'worker' function in a multiprocessing pool.
625941b44d74a7450ccd3f95
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True): <NEW_LINE> <INDENT> if not gfile.Exists(vocabulary_path): <NEW_LINE> <INDENT> print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) <NEW_LINE> vocab = {} <NEW_LINE> with gfile.GFile(data_path, mode="rb") as f: <NEW_LINE> <INDENT> counter = 0 <NEW_LINE> for line in f: <NEW_LINE> <INDENT> counter += 1 <NEW_LINE> line = line.strip().split('\t')[0] <NEW_LINE> if counter % 100000 == 0: <NEW_LINE> <INDENT> print(" processing line %d" % counter) <NEW_LINE> <DEDENT> tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) <NEW_LINE> for w in tokens: <NEW_LINE> <INDENT> word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w <NEW_LINE> if word in vocab: <NEW_LINE> <INDENT> vocab[word] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> vocab[word] = 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> sorted_vocab = sorted(vocab, key=vocab.get, reverse=True) <NEW_LINE> vocab_list = _START_VOCAB + sorted_vocab <NEW_LINE> if len(vocab_list) > max_vocabulary_size: <NEW_LINE> <INDENT> vocab_list = vocab_list[:max_vocabulary_size] <NEW_LINE> print("Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d." % ( data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d." % ( data_path, sum(vocab.values()), len(vocab), len(vocab), 0)) <NEW_LINE> <DEDENT> with gfile.GFile(vocabulary_path, mode="wb") as vocab_file: <NEW_LINE> <INDENT> for w in vocab_list: <NEW_LINE> <INDENT> vocab_file.write(w + b"\n")
Create vocabulary file (if it does not exist yet) from data file. Data file is assumed to contain one sentence per line. Each sentence is tokenized and digits are normalized (if normalize_digits is set). Vocabulary contains the most-frequent tokens up to max_vocabulary_size. We write it to vocabulary_path in a one-token-per-line format, so that later token in the first line gets id=0, second line gets id=1, and so on. Args: vocabulary_path: path where the vocabulary will be created. data_path: data file that will be used to create vocabulary. max_vocabulary_size: limit on the size of the created vocabulary. tokenizer: a function to use to tokenize each data sentence; if None, basic_tokenizer will be used. normalize_digits: Boolean; if true, all digits are replaced by 0s.
625941b46e29344779a623e8
def test_solar_flux(self): <NEW_LINE> <INDENT> sflux = self.solar_irr.inband_solarflux(self.rsr, scale=1.0) <NEW_LINE> self.assertAlmostEqual(sflux, 2.002927627)
Calculate the solar-flux.
625941b48e05c05ec3eea142
def save(self, id_user, username, nume, nr_telefon, tip, password): <NEW_LINE> <INDENT> user = User(id_user, username, nume, nr_telefon, tip, password) <NEW_LINE> self.__validator.validate(user) <NEW_LINE> self.__user_model.insertUser(user)
Primeste chestiile care definesc un user Valideaza chestiile Le baga in BD prin Model
625941b421a7993f00bc7aba
@beeline.traced('instructor.views._section_send_email') <NEW_LINE> def _section_send_email(course, access): <NEW_LINE> <INDENT> course_key = course.id <NEW_LINE> with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types): <NEW_LINE> <INDENT> html_module = HtmlBlock( course.system, DictFieldData({'data': ''}), ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake')) ) <NEW_LINE> fragment = course.system.render(html_module, 'studio_view') <NEW_LINE> <DEDENT> fragment = wrap_xblock( 'LmsRuntime', html_module, 'studio_view', fragment, None, extra_data={"course-id": six.text_type(course_key)}, usage_id_serializer=lambda usage_id: quote_slashes(six.text_type(usage_id)), request_token=uuid.uuid1().hex ) <NEW_LINE> cohorts = [] <NEW_LINE> if is_course_cohorted(course_key): <NEW_LINE> <INDENT> cohorts = get_course_cohorts(course) <NEW_LINE> <DEDENT> course_modes = [] <NEW_LINE> if not VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key): <NEW_LINE> <INDENT> course_modes = CourseMode.modes_for_course(course_key, include_expired=True, only_selectable=False) <NEW_LINE> <DEDENT> email_editor = fragment.content <NEW_LINE> section_data = { 'section_key': 'send_email', 'section_display_name': _('Email'), 'access': access, 'send_email': reverse('send_email', kwargs={'course_id': six.text_type(course_key)}), 'editor': email_editor, 'cohorts': cohorts, 'course_modes': course_modes, 'default_cohort_name': DEFAULT_COHORT_NAME, 'list_instructor_tasks_url': reverse( 'list_instructor_tasks', kwargs={'course_id': six.text_type(course_key)} ), 'email_background_tasks_url': reverse( 'list_background_email_tasks', kwargs={'course_id': six.text_type(course_key)} ), 'email_content_history_url': reverse( 'list_email_content', kwargs={'course_id': six.text_type(course_key)} ), } <NEW_LINE> return section_data
Provide data for the corresponding bulk email section
625941b4293b9510aa2c306b
def should_knock(self, signal_type, created=False): <NEW_LINE> <INDENT> new = self.app_config.send_knock_create and self.is_published and self.date_published == self.date_modified <NEW_LINE> updated = self.app_config.send_knock_update and self.is_published <NEW_LINE> return (new or updated) and signal_type in ("post_save", "post_delete")
Returns whether to emit knocks according to the post state
625941b494891a1f4081b879
def depthFirstSearch(problem): <NEW_LINE> <INDENT> startState = problem.getStartState() <NEW_LINE> directionList = [] <NEW_LINE> visitedList = [] <NEW_LINE> solution = dfsRecurr(problem, startState, None, directionList, visitedList) <NEW_LINE> return solution
Search the deepest nodes in the search tree first [p 85]. Your search algorithm needs to return a list of actions that reaches the goal. Make sure to implement a graph search algorithm [Fig. 3.7]. To get started, you might want to try some of these simple commands to understand the search problem that is being passed in: print "Start:", problem.getStartState() print "Is the start a goal?", problem.isGoalState(problem.getStartState()) print "Start's successors:", problem.getSuccessors(problem.getStartState())
625941b4adb09d7d5db6c565
def getCCut(self): <NEW_LINE> <INDENT> median_angles = [] <NEW_LINE> for bid in self.BM.getNonChimericBinIds(): <NEW_LINE> <INDENT> if len(self.BM.getBin(bid).rowIndices) > 1: <NEW_LINE> <INDENT> cdistance = self.cDist(self.BM.getBin(bid).rowIndices) <NEW_LINE> median_angles.append(cdistance) <NEW_LINE> <DEDENT> <DEDENT> return np_median(median_angles), np_std(median_angles)
Work out the easy cutoff for coverage angle difference
625941b423849d37ff7b2e64
@main.command() <NEW_LINE> @click.option('--database', '-d', default='default') <NEW_LINE> @click.pass_context <NEW_LINE> def schemaspy(ctx, database): <NEW_LINE> <INDENT> click.echo(schemaspy_command(database))
SchemaSpy
625941b43c8af77a43ae3577
@to_opdef <NEW_LINE> @core(universal=True) <NEW_LINE> def cell_get(h, U): <NEW_LINE> <INDENT> return U, universe_getitem(U, h)
Get the current value of the cell.
625941b4dd821e528d63af7e
def detect_sounds(self, str): <NEW_LINE> <INDENT> str = str.replace(' ','') <NEW_LINE> i = len(str) <NEW_LINE> if i == 0: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> while i > 0: <NEW_LINE> <INDENT> if str[:i] in self.all_sounds: <NEW_LINE> <INDENT> return [str[:i]] + self.detect_sounds(str[i:]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> i -= 1 <NEW_LINE> <DEDENT> <DEDENT> return self.detect_sounds(str[1:])
Parses a string to detect known sound symbols Necessary because of 0-space chars in sounds (diacritics etc.) :param str: :return:
625941b4507cdc57c6306aa4
def loss(self, logits, labels): <NEW_LINE> <INDENT> raise NotImplementedError <NEW_LINE> return loss
Calculates the multiclass cross-entropy loss from the logits predictions and the ground truth labels. The function will also add the regularization loss from network weights to the total loss that is return. In order to implement this function you should have a look at tf.nn.softmax_cross_entropy_with_logits. You can use tf.scalar_summary to save scalar summaries of cross-entropy loss, regularization loss, and full loss (both summed) for use with TensorBoard. This will be useful for compiling your report. Args: logits: 2D float Tensor of size [batch_size, self.n_classes]. The predictions returned through self.inference. labels: 2D int Tensor of size [batch_size, self.n_classes] with one-hot encoding. Ground truth labels for each observation in batch. Returns: loss: scalar float Tensor, full loss = cross_entropy + reg_loss
625941b42eb69b55b151c67c
def __init__(self, monitor_widget, text_font=DEFAULT_FONT, text_size=18, offset_x=0, offset_y=0, text_color=ui_theme.get_color("osd_tooltip_text"), border_color=ui_theme.get_color("osd_tooltip_border"), border_radious=1): <NEW_LINE> <INDENT> gtk.Window.__init__(self, gtk.WINDOW_POPUP) <NEW_LINE> self.monitor_widget = monitor_widget <NEW_LINE> self.text = "" <NEW_LINE> self.text_size = text_size <NEW_LINE> self.text_font = text_font <NEW_LINE> self.offset_x = offset_x <NEW_LINE> self.offset_y = offset_y <NEW_LINE> self.text_color = text_color <NEW_LINE> self.border_color = border_color <NEW_LINE> self.border_radious = border_radious <NEW_LINE> self.monitor_window = None <NEW_LINE> self.monitor_window_x = None <NEW_LINE> self.monitor_window_y = None <NEW_LINE> self.monitor_window_width = None <NEW_LINE> self.monitor_window_height = None <NEW_LINE> self.start_hide_delay = 5000 <NEW_LINE> self.hide_time = 500 <NEW_LINE> self.configure_event_callback_id = None <NEW_LINE> self.destroy_callback_id = None <NEW_LINE> self.start_hide_callback_id = None <NEW_LINE> self.focus_out_callback_id = None <NEW_LINE> self.set_decorated(False) <NEW_LINE> self.set_skip_taskbar_hint(True) <NEW_LINE> self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG) <NEW_LINE> self.set_colormap(gtk.gdk.Screen().get_rgba_colormap()) <NEW_LINE> self.add_events(gtk.gdk.ALL_EVENTS_MASK) <NEW_LINE> self.set_accept_focus(False) <NEW_LINE> self.connect("expose-event", self.expose_osd_tooltip) <NEW_LINE> self.connect("realize", self.realize_osd_tooltip) <NEW_LINE> self.connect("show", self.show_osd_tooltip)
Initialize OSDTooltip class. @param monitor_widget: Widget to monitor event. @param text_font: Text font, default is DEFAULT_FONT. @param text_size: Text size, default is 18. @param offset_x: Offset X coordinate relative to monitor widget. @param offset_y: Offset Y coordinate relative to monitor widget. @param text_color: Text color. @param border_color: Border color. @param border_radious: Border radious.
625941b49b70327d1c4e0ba6
def parse(path): <NEW_LINE> <INDENT> lines = codecs.open(path, "r", "utf-8").read().strip().split('\n') <NEW_LINE> if lines[0] != "#AUDIOSCROBBLER/1.1": <NEW_LINE> <INDENT> raise UnsupportedProtocolError() <NEW_LINE> <DEDENT> if lines[1] == "#TZ/UTC": <NEW_LINE> <INDENT> tz_offset = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tz_offset = get_tz_offset() <NEW_LINE> <DEDENT> tracks = [] <NEW_LINE> for line in lines[3:]: <NEW_LINE> <INDENT> v = line.split('\t') <NEW_LINE> if len(v) < 8: <NEW_LINE> <INDENT> v = v + [""]*(8-len(v)) <NEW_LINE> <DEDENT> v[6] = str(int(v[6]) - tz_offset) <NEW_LINE> track = Track(*v) <NEW_LINE> if track.rating == "L": <NEW_LINE> <INDENT> tracks.append(track) <NEW_LINE> <DEDENT> <DEDENT> return tracks
Parses the log and returns a sequence of Track
625941b4cb5e8a47e48b7883
def createVolume(self, name, cpgName, sizeMiB, optional=None): <NEW_LINE> <INDENT> info = {'name': name, 'cpg': cpgName, 'sizeMiB': sizeMiB} <NEW_LINE> if optional: <NEW_LINE> <INDENT> info = self._mergeDict(info, optional) <NEW_LINE> <DEDENT> response, body = self.http.post('/volumes', body=info) <NEW_LINE> return body
Create a new volume :param name: the name of the volume :type name: str :param cpgName: the name of the destination CPG :type cpgName: str :param sizeMiB: size in MiB for the volume :type sizeMiB: int :param optional: dict of other optional items :type optional: dict .. code-block:: python optional = { 'id': 12, 'comment': 'some comment', 'snapCPG' :'CPG name', 'ssSpcAllocWarningPct' : 12, 'ssSpcAllocLimitPct': 22, 'tpvv' : True, 'usrSpcAllocWarningPct': 22, 'usrSpcAllocLimitPct': 22, 'expirationHours': 256, 'retentionHours': 256 } :returns: List of Volumes :raises: :class:`~hp3parclient.exceptions.HTTPBadRequest` - INV_INPUT - Invalid Parameter :raises: :class:`~hp3parclient.exceptions.HTTPBadRequest` - TOO_LARGE - Volume size above limit :raises: :class:`~hp3parclient.exceptions.HTTPBadRequest` - NO_SPACE - Not Enough space is available :raises: :class:`~hp3parclient.exceptions.HTTPForbidden` - PERM_DENIED - Permission denied :raises: :class:`~hp3parclient.exceptions.HTTPConflict` - EXISTENT_SV - Volume Exists already
625941b4b545ff76a8913bf3
def test_dict(self) -> None: <NEW_LINE> <INDENT> builtins.__file__ = 'builtins' <NEW_LINE> stack: List[object] = [builtins] <NEW_LINE> stash: List[object] = [] <NEW_LINE> concat.stdlib.pyinterop.module.dict(stack, stash) <NEW_LINE> message = 'module.dict has incorrect stack effect' <NEW_LINE> self.assertEqual(stack, [builtins.__dict__], msg=message)
Test that module.dict works.
625941b4a4f1c619b28afe15
def concat(a, b): <NEW_LINE> <INDENT> return (a + b)
Concats two strings
625941b426068e7796caeaaa
def _qs(items): <NEW_LINE> <INDENT> return u','.join((u'?' for _ in items))
returns a list of '?' for each item in $items, for use in queries.
625941b438b623060ff0abc6
def _pbt_one_moment_df2(rank_C, rank_U, rank_X, total_N): <NEW_LINE> <INDENT> min_rank_C_U = min(rank_C, rank_U) <NEW_LINE> df2 = min_rank_C_U * (total_N - rank_X + min_rank_C_U - rank_U) <NEW_LINE> return df2
Calculate df2 for a pbt which is using an approximator which matches one moment
625941b47047854f462a11e0
def process_removed (field_list): <NEW_LINE> <INDENT> field_list[REO_IX_DICT["removed"]] = ("True" if field_list[REO_IX_DICT["removed"]] != "" else "False") <NEW_LINE> return (field_list[REO_IX_DICT["removed"]] == "True")
Additionally process 'removed'. Args: field_list of values retrieved so far. Returns: bool removed (side effect updates a value in field_list).
625941b4d6c5a10208143e18
def swapPairs(self, head): <NEW_LINE> <INDENT> dummy=ListNode(0) <NEW_LINE> dummy.next=head <NEW_LINE> current=dummy <NEW_LINE> while current.next and current.next.next: <NEW_LINE> <INDENT> next_one=current.next <NEW_LINE> next_two=next_one.next <NEW_LINE> next_three=next_two.next <NEW_LINE> current.next=next_two <NEW_LINE> next_two.next=next_one <NEW_LINE> next_one.next=next_three <NEW_LINE> current=next_one <NEW_LINE> <DEDENT> return dummy.next
:type head: ListNode :rtype: ListNode
625941b491f36d47f21ac2c7
def is_overrides_management_page_loaded_properly(self): <NEW_LINE> <INDENT> return self.is_element_present(self.overrides_management_page_header_locator)
Implementing is overrides management page loaded properly functionality :return: True/False
625941b450485f2cf553cb6b
def hist_plot_resid(resids, test): <NEW_LINE> <INDENT> plt.hist(resids) <NEW_LINE> plt.title("Histogram of Residuals\n" + test) <NEW_LINE> plt.xlabel("Residuals") <NEW_LINE> plt.show()
Plots a histogram of the residuals from a model Inputs: resids: residuals test: string for whether these are test or training data
625941b4c432627299f04a16
def __init__(self, env, feature_transformer, initial_alpha=.1, gamma=.9, alpha_decay=0, seq_len=3, translate=True): <NEW_LINE> <INDENT> self.env = env <NEW_LINE> self.feature_transformer = feature_transformer <NEW_LINE> self.initial_alpha = initial_alpha <NEW_LINE> self.gamma = gamma <NEW_LINE> self._alpha_decay = alpha_decay <NEW_LINE> self.seq_len = seq_len <NEW_LINE> self.translate = translate <NEW_LINE> num_obs = env.observation_space.n <NEW_LINE> exp = seq_len <NEW_LINE> num_states = 1 <NEW_LINE> while exp > 0: <NEW_LINE> <INDENT> num_states += num_obs**exp <NEW_LINE> exp = exp - 1 <NEW_LINE> <DEDENT> num_actions = env.action_space.n <NEW_LINE> self.Q = np.random.uniform(low=0, high=0, size=(num_states, num_actions)) <NEW_LINE> self._n_updates = 0 <NEW_LINE> self.last_n_obs = [] <NEW_LINE> self.train_obs_seq_counts = {} <NEW_LINE> self.train_obs_seq_action_counts = {}
Started 05/04/2019 Q Learner with all combinations of seq_len observations as state space. Parameters ---------- env : gym.Env OpenAI Gym environment. feature_transformer : object Object that transforms raw state/observations into the Q function's state representation. Must have a `transform()` instance method. initial_alpha : float Learning rate. gamma : float Discount factor. alpha_decay : float, default 0 Learning rate alpha will decay at 1/_n_updates**_alpha_decay. seq_len : int Number of sequential observations to use as the state. translate : bool, default True If true, keeps track of translated observations. Attributes ---------- feature_transformer : object Transforms raw state into representation, usually a reduced one. Q : 2D numpy array Q <state,value> matrix where - axis0 index is transformed observation - axis1 index is action - value is Q value. E.g. Q[1][2] represents the Q value for taking action 2 in (transformed) state 1. _n_updates : int Number of updates made to Q matrix. last_n_obs : list A sequence of the most recent <seq_len> raw observations. TODO 05/09/2019 - Could improve this by only storing *transformed* observations. train_obs_seq_counts : dict<str, int> Dict with stringified observation sequences as keys and values as the number of times this sequence was predicted on. The observations and actions are referring to the sets that are sampled from experience replay and used to update the DQN. Currently this is only used for debuggging. train_obs_seq_action_counts : dict<str, int> Dict with stringified observation sequence + action as keys and values as the count of taking that action on that observation sequence. The observations and actions are referring to the sets that are sampled from experience replay and used to update the DQN. Currently this is only used for debugging.
625941b4091ae35668666d38
@pytest.fixture <NEW_LINE> def caplog(request): <NEW_LINE> <INDENT> result = LogCaptureFixture(request.node) <NEW_LINE> yield result <NEW_LINE> result._finalize()
Access and control log capturing. Captured logs are available through the following methods:: * caplog.text() -> string containing formatted log output * caplog.records() -> list of logging.LogRecord instances * caplog.record_tuples() -> list of (logger_name, level, message) tuples * caplog.clear() -> clear captured records and formatted log output string
625941b47b180e01f3dc45da
def addStrings(self, num1, num2): <NEW_LINE> <INDENT> d = { '0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9 } <NEW_LINE> ans = '' <NEW_LINE> if len(num1) > len(num2): <NEW_LINE> <INDENT> num2 = (len(num1) - len(num2)) * "0" + num2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> num1 = (len(num2) - len(num1)) * "0" + num1 <NEW_LINE> <DEDENT> carry = 0 <NEW_LINE> for i in range(len(num1)-1, -1, -1): <NEW_LINE> <INDENT> a = d[num1[i]] + d[num2[i]] + carry <NEW_LINE> carry = a > 9 <NEW_LINE> ans += str(a % 10) <NEW_LINE> <DEDENT> if carry: <NEW_LINE> <INDENT> ans += str(int(carry)) <NEW_LINE> <DEDENT> return ans[::-1]
:type num1: str :type num2: str :rtype: str
625941b4091ae35668666d39
def get_format_plugins(): <NEW_LINE> <INDENT> return OrderedDict(sorted(output_plugins.list_name_plugin()))
Return an ordered mapping of format name --> plugin callable for all the output plugins. The mapping is ordered by sorted key. This is the main API for other code to access format plugins.
625941b482261d6c526ab275
def getDataWindow(stream, start=None, end=None, relative='starttime'): <NEW_LINE> <INDENT> stream = stream.slice2(start, end, relative=relative) <NEW_LINE> N_stream = len(stream) <NEW_LINE> if N_stream == 0: <NEW_LINE> <INDENT> raise ValueError('Stream has length 0') <NEW_LINE> <DEDENT> samp = stream.getHI('sampling_rate') <NEW_LINE> if min(samp) != max(samp): <NEW_LINE> <INDENT> stream.downsample2(min(samp)) <NEW_LINE> log.warning('Downsampling stream because of differing sampling rate.') <NEW_LINE> <DEDENT> npts = stream.getHI('npts') <NEW_LINE> if min(npts) != max(npts): <NEW_LINE> <INDENT> log.warning('Traces in stream have different NPTS. ' 'Difference: %d samples' % (max(npts) - min(npts))) <NEW_LINE> <DEDENT> data = np.zeros((N_stream, max(npts))) <NEW_LINE> for i, trace in enumerate(stream): <NEW_LINE> <INDENT> data[i, :len(trace.data)] = trace.data <NEW_LINE> <DEDENT> return data
Return array with data in time window (start, end) around relative. 'time' can stand for UTCDateTime, list of UTCDateTimes, header entry out of ('ponset', 'sonset', 'startime', 'endtime') or 'middle' :param stream: Stream object with data :param start, end: time or float (seconds) relative to param=relative :param relative: time, is needed if start or end in seconds (float) :return: np.array of shape (N_stream, N_data)
625941b430dc7b766590173d
def getopt(args: List[str], shortopts: str, longopts: Iterable[str] = []) -> Tuple[List[Tuple[str, str]], List[str]]: <NEW_LINE> <INDENT> opts = [] <NEW_LINE> if isinstance(longopts, str): <NEW_LINE> <INDENT> longopts = [longopts] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> longopts = list(longopts) <NEW_LINE> <DEDENT> while args and args[0].startswith('-') and args[0] != '-': <NEW_LINE> <INDENT> if args[0] == '--': <NEW_LINE> <INDENT> args = args[1:] <NEW_LINE> break <NEW_LINE> <DEDENT> if args[0].startswith('--'): <NEW_LINE> <INDENT> opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) <NEW_LINE> <DEDENT> <DEDENT> return opts, args
getopt(args, options[, long_options]) -> opts, args Parses command line options and parameter list. args is the argument list to be parsed, without the leading reference to the running program. Typically, this means "sys.argv[1:]". shortopts is the string of option letters that the script wants to recognize, with options that require an argument followed by a colon (i.e., the same format that Unix getopt() uses). If specified, longopts is a list of strings with the names of the long options which should be supported. The leading '--' characters should not be included in the option name. Options which require an argument should be followed by an equal sign ('='). The return value consists of two elements: the first is a list of (option, value) pairs; the second is the list of program arguments left after the option list was stripped (this is a trailing slice of the first argument). Each option-and-value pair returned has the option as its first element, prefixed with a hyphen (e.g., '-x'), and the option argument as its second element, or an empty string if the option has no argument. The options occur in the list in the same order in which they were found, thus allowing multiple occurrences. Long and short options may be mixed.
625941b4377c676e91271f7d
def query_transit_data_siri(data_db_location, schedule_monitor, time_index): <NEW_LINE> <INDENT> MonitoredStops = query_siri() <NEW_LINE> write_transit_data_to_json(config.siri_json_dir, 'siri-', MonitoredStops) <NEW_LINE> parsed_data = parse_siri_transit_data(MonitoredStops, time_index) <NEW_LINE> parsed_data_with_delays = compare_actual_to_schedule(parsed_data, schedule_monitor) <NEW_LINE> save_transit_data(parsed_data_with_delays, 'siri', data_db_location) <NEW_LINE> sf.insert_periodic_task_monitor(data_db_location, time_index) <NEW_LINE> (maxDeperatureDelay, delayed_trains) = determine_delayed_trains( parsed_data_with_delays) <NEW_LINE> if maxDeperatureDelay >= warn_delay_threhold: <NEW_LINE> <INDENT> pn.delay_push_notify(config.push_notification_sql, delayed_trains) <NEW_LINE> <DEDENT> return None
:param: data_db_location: location of the sql database to store the results :type: string :param: schedule_monitor: dataframe that contains schedule information :type: schedule_monitor: pandas dataframe :param: time_index: time index for when the data is collected :type: integer :return: None:
625941b46fece00bbac2d50d
def func(arg=interger): <NEW_LINE> <INDENT> print(arg)
pass
625941b423849d37ff7b2e65
def mlperf_submission_log(benchmark): <NEW_LINE> <INDENT> config_logger(benchmark) <NEW_LINE> log_event( key=constants.SUBMISSION_BENCHMARK, value=benchmark, ) <NEW_LINE> log_event( key=constants.SUBMISSION_ORG, value='reference_implementation') <NEW_LINE> log_event( key=constants.SUBMISSION_DIVISION, value='closed') <NEW_LINE> log_event( key=constants.SUBMISSION_STATUS, value='onprem') <NEW_LINE> log_event( key=constants.SUBMISSION_PLATFORM, value='reference_implementation') <NEW_LINE> log_event( key=constants.SUBMISSION_ENTRY, value="reference_implementation") <NEW_LINE> log_event( key=constants.SUBMISSION_POC_NAME, value='reference_implementation') <NEW_LINE> log_event( key=constants.SUBMISSION_POC_EMAIL, value='reference_implementation')
Logs information needed for MLPerf submission
625941b4f7d966606f6a9ddb
def rebin_to_wcs(self, wcs, remote=None, rebin_remote_threshold=None, in_place=False, ignore=None): <NEW_LINE> <INDENT> pixelscale = wcs.average_pixelscale <NEW_LINE> if in_place: rebin_to_pixelscale(*self.values, names=self.names, pixelscale=pixelscale, wcs=wcs, remote=remote, rebin_remote_threshold=rebin_remote_threshold, in_place=True, ignore=ignore) <NEW_LINE> else: <NEW_LINE> <INDENT> new_frames = rebin_to_pixelscale(*self.values, names=self.names, pixelscale=pixelscale, wcs=wcs, remote=remote, rebin_remote_threshold=rebin_remote_threshold, ignore=ignore) <NEW_LINE> self.remove_all() <NEW_LINE> for frame in new_frames: self.append(frame)
This function ... :param wcs: :param remote: :param rebin_remote_threshold: :param in_place: :param ignore: :return:
625941b4925a0f43d2549c45
def __getitem__(self, index): <NEW_LINE> <INDENT> if isinstance(index, slice): <NEW_LINE> <INDENT> return [(key, self._dict[key]) for key in self._list[index]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key = self._list[index] <NEW_LINE> return (key, self._dict[key])
Return the item as position *index*.
625941b4ec188e330fd5a57a
def FCStoCSV(Couples, Samples, txtFile): <NEW_LINE> <INDENT> for C in Couples: <NEW_LINE> <INDENT> Samples[(C)]._report(txtFile)
Create CSV.
625941b4c432627299f04a17
def attempt_assign(self, hailer, time): <NEW_LINE> <INDENT> x1 = hailer.coords_dropoff[0] <NEW_LINE> y1 = hailer.coords_dropoff[1] <NEW_LINE> x0 = hailer.coords_pickup[0] <NEW_LINE> y0 = hailer.coords_pickup[1] <NEW_LINE> trip_dist = self.duration(x1, x0, y1, y0) <NEW_LINE> if trip_dist > 100 or trip_dist < 5: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> self.hail_id = hailer.uid <NEW_LINE> self.coords_pickup = hailer.coords_pickup <NEW_LINE> self.coords_dropoff = hailer.coords_dropoff <NEW_LINE> self.start_time = time <NEW_LINE> self.trip_duration = trip_dist <NEW_LINE> return True
The minimum distance a driver is willing to take a rider is 5 blocks; hails that do not meet this distance are (politely) ignored. The maximum distance a driver is willing to take a rider is 100 blocks; hails that exceed this distance are also ignored.
625941b4ec188e330fd5a57b
def stop(self): <NEW_LINE> <INDENT> self._sp = True <NEW_LINE> try: <NEW_LINE> <INDENT> self.join() <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass
Stop the thread
625941b41f037a2d8b945fd1
def test_not_gate(self): <NEW_LINE> <INDENT> d = self.w.request_chunk(0, 0) <NEW_LINE> @d.addCallback <NEW_LINE> def cb(chunk): <NEW_LINE> <INDENT> for i, o in ((True, False), (False, True)): <NEW_LINE> <INDENT> chunk.set_block((2, 1, 1), blocks["sand"].slot) <NEW_LINE> chunk.set_block((3, 1, 1), blocks["redstone-torch"].slot) <NEW_LINE> orientation = blocks["lever"].orientation("+x") <NEW_LINE> iblock, imetadata = truth_to_block(i, blocks["lever"].slot, orientation) <NEW_LINE> chunk.set_block((1, 1, 1), iblock) <NEW_LINE> chunk.set_metadata((1, 1, 1), imetadata) <NEW_LINE> orientation = blocks["redstone-torch"].orientation("-x") <NEW_LINE> chunk.set_metadata((3, 1, 1), orientation) <NEW_LINE> circuit = list(self.hook.run_circuit(1, 1, 1))[0] <NEW_LINE> self.hook.run_circuit(*circuit) <NEW_LINE> block = chunk.get_block((3, 1, 1)) <NEW_LINE> metadata = chunk.get_metadata((3, 1, 1)) <NEW_LINE> self.assertEqual((block, metadata), truth_to_block(o, block, metadata)) <NEW_LINE> <DEDENT> <DEDENT> return d
NOT gates should work.
625941b497e22403b379cd6b
def _get_restart_suppress(self): <NEW_LINE> <INDENT> return self.__restart_suppress
Getter method for restart_suppress, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/adjacencies/adjacency/state/restart_suppress (boolean) YANG Description: When set to true, adjacency is not advertised. The SA bit is used by a starting router to request that its neighbor suppress advertisement of the adjacency to the starting router in the neighbor's LSPs.
625941b43346ee7daa2b2b3b
def resume(self, update=None): <NEW_LINE> <INDENT> if self._asyncoro: <NEW_LINE> <INDENT> return self._asyncoro._resume(self, update, AsynCoro._Suspended) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.warning('resume: coroutine %s/%s removed?', self.name, self._id) <NEW_LINE> return -1
May be used with 'yield'. Resume/wakeup this coro and send 'update' to it. The resuming coro gets 'update' for the 'yield' that caused it to suspend. If coro is currently not suspended/sleeping, resume is ignored.
625941b4627d3e7fe0d68c21
def __matmul__(self, other): <NEW_LINE> <INDENT> from . import tools as _tools <NEW_LINE> diff = len(self.n) - len(other.m) <NEW_LINE> L = self if diff >= 0 else _tools.kron(self, matrix(_tools.ones(1, abs(diff)))) <NEW_LINE> R = other if diff <= 0 else _tools.kron(other, matrix(_tools.ones(1, abs(diff)))) <NEW_LINE> c = matrix() <NEW_LINE> c.n = L.n.copy() <NEW_LINE> c.m = R.m.copy() <NEW_LINE> res = _vector.vector() <NEW_LINE> res.d = L.tt.d <NEW_LINE> res.n = c.n * c.m <NEW_LINE> if L.is_complex or R.is_complex: <NEW_LINE> <INDENT> res.r = _core_f90.core.zmat_mat( L.n, L.m, R.m, _np.array( L.tt.core, dtype=_np.complex), _np.array( R.tt.core, dtype=_np.complex), L.tt.r, R.tt.r) <NEW_LINE> res.core = _core_f90.core.zresult_core.copy() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> res.r = _core_f90.core.dmat_mat( L.n, L.m, R.m, _np.real( L.tt.core), _np.real( R.tt.core), L.tt.r, R.tt.r) <NEW_LINE> res.core = _core_f90.core.result_core.copy() <NEW_LINE> <DEDENT> _core_f90.core.dealloc() <NEW_LINE> res.get_ps() <NEW_LINE> c.tt = res <NEW_LINE> return c
Multiplication of two TT-matrices
625941b491af0d3eaac9b7e6
def detect(seg_maps, timer, image_w, image_h, min_area_thresh=10, seg_map_thresh=0.9, ratio = 1): <NEW_LINE> <INDENT> if len(seg_maps.shape) == 4: <NEW_LINE> <INDENT> seg_maps = seg_maps[0, :, :, ] <NEW_LINE> <DEDENT> kernals = [] <NEW_LINE> one = np.ones_like(seg_maps[..., 0], dtype=np.uint8) <NEW_LINE> zero = np.zeros_like(seg_maps[..., 0], dtype=np.uint8) <NEW_LINE> thresh = seg_map_thresh <NEW_LINE> for i in range(seg_maps.shape[-1]-1, -1, -1): <NEW_LINE> <INDENT> kernal = np.where(seg_maps[..., i]>thresh, one, zero) <NEW_LINE> kernals.append(kernal) <NEW_LINE> thresh = seg_map_thresh*ratio <NEW_LINE> <DEDENT> start = time.time() <NEW_LINE> mask_res, label_values = pse(kernals, min_area_thresh) <NEW_LINE> timer['pse'] = time.time()-start <NEW_LINE> mask_res = np.array(mask_res) <NEW_LINE> mask_res_resized = cv2.resize(mask_res, (image_w, image_h), interpolation=cv2.INTER_NEAREST) <NEW_LINE> boxes = [] <NEW_LINE> for label_value in label_values: <NEW_LINE> <INDENT> points = np.argwhere(mask_res_resized==label_value) <NEW_LINE> points = points[:, (1,0)] <NEW_LINE> rect = cv2.minAreaRect(points) <NEW_LINE> box = cv2.boxPoints(rect) <NEW_LINE> boxes.append(box) <NEW_LINE> <DEDENT> return np.array(boxes), kernals, timer
restore text boxes from score map and geo map :param seg_maps: :param timer: :param min_area_thresh: :param seg_map_thresh: threshhold for seg map :param ratio: compute each seg map thresh :return:
625941b4a4f1c619b28afe16
def __repr__(self): <NEW_LINE> <INDENT> return '<Buy id:%s user_id:%s title:%s>' % (self.id, self.user_id, self.title)
docstring for __repr__
625941b47cff6e4e81117759
@pytest.fixture() <NEW_LINE> def apigw_event(): <NEW_LINE> <INDENT> return { "Records": [ { "cf": { "config": { "distributionId": "EXAMPLE" }, "request": { "uri": "/baked/[email protected]:301d5176-9ace-4219-b44b-85dcf781e1e3.html", "method": "GET", "clientIp": "2001:cdba::3257:9652", "headers": { "user-agent": [ { "key": "User-Agent", "value": "Test Agent" } ], "host": [ { "key": "Host", "value": "d123.cf.net" } ], "cookie": [ { "key": "Cookie", "value": "SomeCookie=1; AnotherOne=A; X-Experiment-Name=B" } ] } } } } ] }
Generates API GW Event
625941b45166f23b2e1a4f2b
def involve(self): <NEW_LINE> <INDENT> self.free = False
Make this field involve that means that it is not possible to stand there ships
625941b4cc40096d61595727
def eye_p(nq): <NEW_LINE> <INDENT> return Pauli('I'*nq)
Given a number of qubits, returns the identity Pauli on that many qubits. :param int nq: Number of qubits upon which the returned Pauli acts. :rtype: :class:`qecc.Pauli` :returns: A Pauli operator acting as the identity on each of ``nq`` qubits.
625941b4cad5886f8bd26db5
def limitMagnitude(self, magnitude, direction): <NEW_LINE> <INDENT> return 1.0
Return the scaling factor necessary to keep the wheel speed within its limits. :param magnitude: speed in feet per second :param direction: radians. 0 is right, positive counter-clockwise :return: 1.0 if wheel speed is within it limits, otherwise a value between 0 and 1 to scale the wheel down to its maximum speed.
625941b4cdde0d52a9e52e01
def execute_flow(user_input): <NEW_LINE> <INDENT> if (len(user_input) < MINIMUM_COMMAND_LINE_ARGS or not AVAILABLE_FLOWS.get(user_input[FLOW_NAME])): <NEW_LINE> <INDENT> exit_program("The flow does not exist or there is not enough params to process") <NEW_LINE> <DEDENT> if user_input[FLOW_NAME] == 'help': <NEW_LINE> <INDENT> exit_program() <NEW_LINE> <DEDENT> flow = AVAILABLE_FLOWS.get(user_input[FLOW_NAME]) <NEW_LINE> flow_arguments = user_input[FIRST_FLOW_PARAM: ] <NEW_LINE> if len(flow_arguments) != len(flow.get('arguments')): <NEW_LINE> <INDENT> exit_program("Too much arguments") <NEW_LINE> <DEDENT> for index, required_argument in enumerate(flow['arguments']): <NEW_LINE> <INDENT> if not user_input[index + FLOW_NAME].strip(): <NEW_LINE> <INDENT> exit_program("There's an invalid argument") <NEW_LINE> <DEDENT> <DEDENT> processor = flow['processor'](flow_arguments) <NEW_LINE> processor.process()
Usage: Basic structure: python3 main.py <FLOW> <FLOW_ARGUMENTS> Create a new project: python3 main.py project <PROJECT_NAME> Create a new blog post: python3 main.py post <POST_NAME> <POST_CATEGORY> <PROJECT_NAME>
625941b44e4d5625662d41b2
def resize(size, *args): <NEW_LINE> <INDENT> size = int(size) <NEW_LINE> for group in as_list(*args): <NEW_LINE> <INDENT> if group.min_size <= size: <NEW_LINE> <INDENT> as_resize(group, size) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mico.output.error("(%s) desired size %d must be higher than min size %d" %(group.name, size, group.min_size))
Resize autoscaling group to a specified size. When issuing rezie you change AutoScaling Group desired capacity. It ignores cooldown time. :param size: Desired capacity For example:: mico ec2.as resize 10 'apache-*'
625941b4fb3f5b602dac346a
def api_query_dict( self, api_type: BINANCE_API_TYPE, method: str, options: Optional[Dict] = None, ) -> Dict: <NEW_LINE> <INDENT> result = self.api_query(api_type, method, options) <NEW_LINE> if not isinstance(result, dict): <NEW_LINE> <INDENT> error_msg = f'Expected dict but did not get it in {self.name} api response.' <NEW_LINE> log.error(f'{error_msg}. Got: {result}') <NEW_LINE> raise RemoteError(error_msg) <NEW_LINE> <DEDENT> return result
May raise RemoteError and BinancePermissionError due to api_query
625941b4d8ef3951e3243311
def getInvoice(self): <NEW_LINE> <INDENT> return StripeInvoice(self.getJSONFromString(self._output.get('Response', [])))
a reference to a Stripe Invoice object
625941b4d7e4931a7ee9dcef
def next_trial(self): <NEW_LINE> <INDENT> self.stopping_rule() <NEW_LINE> if not self.data_obj.control: <NEW_LINE> <INDENT> self.data_obj.test_done = True <NEW_LINE> self.data_obj.update() <NEW_LINE> self.data_obj.to_csv() <NEW_LINE> self.data_obj.to_localdb(summary_method, self.instr) <NEW_LINE> self.close() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if self.data_obj.data: <NEW_LINE> <INDENT> self.data_obj.update() <NEW_LINE> self.data_obj.to_csv() <NEW_LINE> <DEDENT> self.current_trial = self.data_obj.control.pop(0) <NEW_LINE> _, _, self.phase, self.trialn, self.sequence = self.current_trial <NEW_LINE> if self.trialn == 0: <NEW_LINE> <INDENT> self.central_widget = InstructWidget(self) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.central_widget = RespondWidget(self) <NEW_LINE> <DEDENT> self.setCentralWidget(self.central_widget)
Checks to see if the test should stop, saves the data, then either quits or moves on to the next trial.
625941b44527f215b584c22f
def expand_foreach(globs, block, outfile): <NEW_LINE> <INDENT> block = tuple(block) <NEW_LINE> for path in match_globs(globs): <NEW_LINE> <INDENT> expand_foreach_file(path, block, outfile)
Expand a foreach block for each file matching one of globs. Write the results to outfile.
625941b44e696a04525c9228
def get_number_of_pages(table): <NEW_LINE> <INDENT> print("Getting number of pages.") <NEW_LINE> s = re.findall('Page\s1/[0-9]+', str(table.find_all('tr')[0].find_all('td')[0]))[0] <NEW_LINE> return int(s[s.find('/')+1:])
Return the number of pages written in the td of the first tr :param table: HTML table :return: int which rappresents the number of pages
625941b45fdd1c0f98dc0005
def make_polys(P, M, cap_bottom, cap_top): <NEW_LINE> <INDENT> poly_list = [] <NEW_LINE> for i in range(P - 1): <NEW_LINE> <INDENT> for j in range(M - 1): <NEW_LINE> <INDENT> poly_list.append([i * M + j, i * M + j + 1, (i + 1) * M + j + 1, (i + 1) * M + j]) <NEW_LINE> <DEDENT> poly_list.append([(i + 1) * M - 1, i * M, (i + 1) * M, (i + 2) * M - 1]) <NEW_LINE> <DEDENT> if cap_bottom: <NEW_LINE> <INDENT> cap = [j for j in reversed(range(M))] <NEW_LINE> poly_list.append(cap) <NEW_LINE> <DEDENT> if cap_top: <NEW_LINE> <INDENT> cap = [(P - 1) * M + j for j in range(M)] <NEW_LINE> poly_list.append(cap) <NEW_LINE> <DEDENT> return poly_list
Generate the super-ellipsoid polygons for the given parameters P : number of parallels (= number of points in a meridian) M : number of meridians (= number of points in a parallel) cap_bottom : turn on/off the bottom cap generation cap_top : turn on/off the top cap generation
625941b4bde94217f3682bd1