code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def key_for_code(code): <NEW_LINE> <INDENT> handle = Handle() <NEW_LINE> try: <NEW_LINE> <INDENT> res = handle.checked_call('KeyForCode', {'Code': code}) <NEW_LINE> if 'KeyEvent' in res: <NEW_LINE> <INDENT> action = KeyAction('') <NEW_LINE> action.bson_obj = res['KeyEvent'] <NEW_LINE> return action <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> handle.close() | Create a KeyAction with the given key code.
The result is None if the code is not found.
The result will have no 'event', so it will be
necessary to run with_event() on it. | 625941b382261d6c526ab249 |
def test_game_size(): <NEW_LINE> <INDENT> assert utils.game_size(2000, 10) == 1442989326579174917694151 <NEW_LINE> assert np.all( utils.game_size([10, 20, 100], 10) == [92378, 10015005, 4263421511271] ) <NEW_LINE> assert np.all( utils.game_size(10, [10, 20, 100]) == [92378, 20030010, 42634215112710] ) <NEW_LINE> assert np.all( utils.game_size([100, 20, 10], [10, 20, 100]) == [4263421511271, 68923264410, 42634215112710] ) <NEW_LINE> assert utils.game_size_inv(1442989326579174917694151, 2000) == 10 <NEW_LINE> assert np.all( utils.game_size_inv([92378, 10015005, 4263421511271], [10, 20, 100]) == 10 ) <NEW_LINE> assert np.all( utils.game_size_inv([92378, 20030010, 42634215112710], 10) == [10, 20, 100] ) <NEW_LINE> assert np.all( utils.game_size_inv([4263421511271, 68923264410, 42634215112710], [100, 20, 10]) == [10, 20, 100] ) <NEW_LINE> assert np.all(utils.game_size_inv(100, [1, 5, 20]) == [100, 4, 2]) | Test game size | 625941b3293b9510aa2c3040 |
def get_scheme_from_file(filename): <NEW_LINE> <INDENT> with open(filename, 'rb') as f: <NEW_LINE> <INDENT> return retype.match(f.readline().split('{')[0]).groups()[0].lower() | Return the scheme associated with filename. | 625941b3507cdc57c6306a78 |
@pytest.fixture(scope='session') <NEW_LINE> def empty_cfg(tmpdir_factory): <NEW_LINE> <INDENT> path = tmpdir_factory.getbasetemp().join('config', 'test_empty.cfg') <NEW_LINE> path.write('', ensure=True) <NEW_LINE> return config.Config(str(path)) | An empty test config file | 625941b355399d3f0558845a |
def transaction(self, func, *watches, **kwargs): <NEW_LINE> <INDENT> shard_hint = kwargs.pop('shard_hint', None) <NEW_LINE> pipe = self.pipeline(True, shard_hint) <NEW_LINE> while 1: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> pipe.watch(*watches) <NEW_LINE> func(pipe) <NEW_LINE> return pipe.execute() <NEW_LINE> <DEDENT> except WatchError: <NEW_LINE> <INDENT> continue | Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single arguement which is a Pipeline object. | 625941b31f037a2d8b945fa5 |
def get_input(text): <NEW_LINE> <INDENT> return input(text) | Get input from user.
:param text: Text to print before taking input.
:return: Input of user. | 625941b356ac1b37e6263f89 |
def get_dynamic_rmse(self): <NEW_LINE> <INDENT> i_doy = np.argsort(np.mod( self.X[self.start:self.here, 1] - self.X[self.here + self.consecutive, 1], self.ndays))[:self.min_obs] <NEW_LINE> rmse = np.zeros(len(self.test_indices), np.float32) <NEW_LINE> for i_b, b in enumerate(self.test_indices): <NEW_LINE> <INDENT> m = self.models[b] <NEW_LINE> rmse[i_b] = np.sqrt(np.sum( (self.Y[b, :].take(i_doy) - m.predict(self.X.take(i_doy, axis=0))) ** 2) / i_doy.size) <NEW_LINE> <DEDENT> return rmse | Return the dynamic RMSE for each model
Dynamic RMSE refers to the Root Mean Squared Error calculated using
`self.min_obs` number of observations closest in day of year to the
observation `self.consecutive` steps into the future. Goal is to
reduce false-positives during seasonal transitions (high variance in
the signal) while decreasing omission during stable times of year.
Returns:
np.ndarray: NumPy array containing dynamic RMSE of each tested model | 625941b31f037a2d8b945fa6 |
def register_builder(self, key, builder): <NEW_LINE> <INDENT> self._builder[key] = builder | Registering a builder, respective key will be added to private _builder | 625941b3498bea3a759b9859 |
def dft_naive(x): <NEW_LINE> <INDENT> N = x.shape[0] <NEW_LINE> n = np.arange(N) <NEW_LINE> k = np.arange(N).reshape(-1, 1) <NEW_LINE> cos = np.cos(2*np.pi*k*n/N) <NEW_LINE> sin = np.sin(2*np.pi*k*n/N) <NEW_LINE> X = (cos - 1j*sin) @ x <NEW_LINE> return X | Computes the naive DFT of a sequence.
The DFT is computed as defined by:
.. math::
:label: eq-naive-dft
X_k = \sum^{N-1}_{n=0}x_n e^{-j2\pi kn/N}
where :math:`X_k` is the :math:`k`th Fourier coefficients, :math:`x_n`
is the :math:`n`th sample of the input signal and :math:`N` is the size
of the input signal :math:`x`.
Parameters
----------
x : np.ndarray
Input sequence.
Returns
-------
np.ndarray:
Fourier coefficients of the input signal.
| 625941b367a9b606de4a7c64 |
def write(s): <NEW_LINE> <INDENT> pass | Write a string to the file.
| 625941b391f36d47f21ac29c |
def autoscaler_pool_settings(self, **kwargs): <NEW_LINE> <INDENT> pool_name = kwargs["pool_name"] <NEW_LINE> value_map = { "pg_autoscale_mode": kwargs.get("pg_autoscale_mode"), "target_size_ratio": kwargs.get("target_size_ratio"), "target_size_bytes": kwargs.get("target_size_bytes"), "pg_num_min": kwargs.get("pg_num_min"), } <NEW_LINE> for val in value_map.keys(): <NEW_LINE> <INDENT> if val in kwargs.keys(): <NEW_LINE> <INDENT> if not self.set_pool_property( pool=pool_name, props=val, value=value_map[val] ): <NEW_LINE> <INDENT> log.error(f"failed to set property {val} on pool {pool_name}") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return True | Sets various options on pools wrt PG Autoscaler
Args:
**kwargs: various kwargs to be sent
Supported kw args:
1. pg_autoscale_mode: PG saler mode for the indivudial pool. Values-> on, warn, off. (str)
2. target_size_ratio: ratio of cluster pool will utilize. Values -> 0 - 1. (float)
3. target_size_bytes: size the pool is assumed to utilize. eg: 10T (str)
4. pg_num_min: minimum pg's for a pool. (int)
Returns: | 625941b34e4d5625662d418d |
def relative_rate(self): <NEW_LINE> <INDENT> return _cdma_swig.flag_gen_sptr_relative_rate(self) | relative_rate(flag_gen_sptr self) -> double | 625941b399fddb7c1c9de142 |
def p_PRINT(t): <NEW_LINE> <INDENT> pass | PRINT : PRINT_KEYWORD OPEN_PARENTHESES M | 625941b3cad5886f8bd26d89 |
def set_canceled(self): <NEW_LINE> <INDENT> self.canceled = True | Set canceled. | 625941b3ab23a570cc24ff2e |
def _is_rare_abbrev_type(self, cur_tok, next_tok): <NEW_LINE> <INDENT> if cur_tok.abbr or not cur_tok.sentbreak: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> typ = cur_tok.type_no_sentperiod <NEW_LINE> count = self._type_fdist[typ] + self._type_fdist[typ[:-1]] <NEW_LINE> if typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if next_tok.tok[:1] in self._lang_vars.internal_punctuation: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif next_tok.first_lower: <NEW_LINE> <INDENT> typ2 = next_tok.type_no_sentperiod <NEW_LINE> typ2ortho_context = self._params.ortho_context[typ2] <NEW_LINE> if (typ2ortho_context & _ORTHO_BEG_UC) and not ( typ2ortho_context & _ORTHO_MID_UC ): <NEW_LINE> <INDENT> return True | A word type is counted as a rare abbreviation if...
- it's not already marked as an abbreviation
- it occurs fewer than ABBREV_BACKOFF times
- either it is followed by a sentence-internal punctuation
mark, *or* it is followed by a lower-case word that
sometimes appears with upper case, but never occurs with
lower case at the beginning of sentences. | 625941b3dc8b845886cb52db |
def get_flat_image(self, full_output=False): <NEW_LINE> <INDENT> return convert_image_to_array(self.flat, full_output) | Returns the flat image.
Args
----
full_output : boolean, optional
Passed to converImageToArray function
Returns
-------
dark_image : 2D numpy array
The dark image
output_obj : ImageInfo, optional
Object containing information about the image, if full_output=True | 625941b321a7993f00bc7a8f |
def test_get_instance(self): <NEW_LINE> <INDENT> region = get_data_region("us-east-1", datas) <NEW_LINE> inst = get_instance("m3.large", region) <NEW_LINE> self.assertEqual("2", inst['vCPU']) | Configuration file | 625941b3eab8aa0e5d26d905 |
def within_class_scatter(data,label): <NEW_LINE> <INDENT> labelset = set(label) <NEW_LINE> dim = data.shape[1] <NEW_LINE> row = data.shape[0] <NEW_LINE> Sw = np.zeros((dim,dim)) <NEW_LINE> for i in labelset: <NEW_LINE> <INDENT> pos = np.where(label == i) <NEW_LINE> X = data[pos] <NEW_LINE> possize = np.size(pos) <NEW_LINE> mean = np.mean(X,0) <NEW_LINE> mean = np.array([mean]) <NEW_LINE> S = np.dot((X-mean).T,(X-mean)) <NEW_LINE> Sw = Sw + (possize/row)*S <NEW_LINE> <DEDENT> return Sw | within class scatter matrix | 625941b3fb3f5b602dac343e |
def quest_LAL(exo, cor): <NEW_LINE> <INDENT> A, B, C = geo.choix_points(3) <NEW_LINE> nom = shuffle_nom([A, B, C]) <NEW_LINE> c = 0.1 * random.randint(40, 70) <NEW_LINE> b = 0.1 * random.randint(20, 100) <NEW_LINE> angBAC = 3 * random.randint(7, 50) <NEW_LINE> exo.append(u"\\item Trace un triangle $%s$ tel que $%s%s=\\unit[%s]{cm}$, $%s%s=\\unit[%s]{cm}$ et $\\widehat{%s%s%s}=%s\\degres$" % (nom, A, B, decimaux(c), A, C, decimaux(b), B, A, C, angBAC)) <NEW_LINE> cor.append(u"\\item Trace un triangle $%s$ tel que $%s%s=\\unit[%s]{cm}$, $%s%s=\\unit[%s]{cm}$ et $\\widehat{%s%s%s}=%s\\degres$.\\par" % (nom, A, B, decimaux(c), A, C, decimaux(b), B, A, C, angBAC)) <NEW_LINE> cor.append(u"\\begin{pspicture}(%s,%s)(%s,%s)" % (min(0, b * cos(angBAC)) - 0.4, -1, max(b, b * cos(angBAC)) + 0.4, b * sin(angBAC) + 1)) <NEW_LINE> cor.append(u"\\pstTriangle(0,0){%s}(%s;%s){%s}(%s,0){%s}" % (A, b, angBAC, C, c, B)) <NEW_LINE> cor.append(u"\\color{enonce}\\pstMarkAngle[linecolor=enonce]{%s}{%s}{%s}{%s\\degres}" % (B, A, C, angBAC)) <NEW_LINE> cor.append(u"\\pstLineAB[nodesepB=-1]{%s}{%s}" % (A, C)) <NEW_LINE> cor.append(u"\\pstRotation[RotAngle=7,PointSymbol=none,PointName=none]{%s}{%s}[C_1]" % (A, C)) <NEW_LINE> cor.append(u"\\pstRotation[RotAngle=-7,PointSymbol=none,PointName=none]{%s}{%s}[C_2]" % (A, C)) <NEW_LINE> cor.append(u"\\pstArcOAB[linecolor=calcul]{%s}{C_2}{C_1}" % (A)) <NEW_LINE> cor.append(cotation((0, 0), (c, 0), decimaux(c), couleur="enonce")) <NEW_LINE> if angBAC < 111: <NEW_LINE> <INDENT> x_C, y_C = b * cos(angBAC), b * sin(angBAC) <NEW_LINE> cor.append(cotation_h((0, 0), (x_C, y_C), decimaux(b), couleur="enonce")) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x_C, y_C = b * cos(angBAC), b * sin(angBAC) <NEW_LINE> cor.append(cotation((x_C, y_C), (0, 0), decimaux(b), couleur="enonce")) <NEW_LINE> <DEDENT> cor.append(u"\\end{pspicture}") | on donne un angle et les longueurs de ses deux côtés | 625941b3e8904600ed9f1cd0 |
def test_4_sampling(self): <NEW_LINE> <INDENT> tests = [([MILSERVICE], ([MEDICALSERV, SAIMAASEAL, METROPOLIS], [0, 0, 0]), ["MILSERVICE", "MEDICALSERV", "SAIMAASEAL", "METROPOLIS"], 0.183)] <NEW_LINE> for query, (E,e), fields, answer in tests: <NEW_LINE> <INDENT> prob = [approximate_distribution(self.network, query, E, e) for _ in range(3)] <NEW_LINE> estr = "Calculating P({}|{})".format(fields[0], " & ".join(fields[1:])) <NEW_LINE> self.assertTrue(any([is_answer_close(answer, p, EPSILON = 3e-2) for p in prob]), "Error {}; True answer {} while yours: {}".format(estr,answer, round(np.average(prob), 3))) | Check `approximate_distribution`. | 625941b33539df3088e2e0f2 |
def optimal_transport_presolve_2(Y, X, Y_w=None, X_w=None): <NEW_LINE> <INDENT> if X_w is None: <NEW_LINE> <INDENT> X_w = np.ones(X.shape[0]) <NEW_LINE> <DEDENT> if Y_w is None: <NEW_LINE> <INDENT> Y_w = np.ones(Y.shape[0]) <NEW_LINE> <DEDENT> bary_X = np.average(X,axis=0,weights=X_w) <NEW_LINE> bary_Y = np.average(Y,axis=0,weights=Y_w) <NEW_LINE> r_Y = furthest_point(Y, bary_Y) <NEW_LINE> X_hull = ConvexHull(X) <NEW_LINE> points = X_hull.points <NEW_LINE> simplices = X_hull.simplices <NEW_LINE> dmin = distance_point_line(points[simplices[0][0]], points[simplices[0][1]], bary_X) <NEW_LINE> for simplex in simplices: <NEW_LINE> <INDENT> d = distance_point_line(points[simplex[0]], points[simplex[1]], bary_X) <NEW_LINE> if d < dmin: <NEW_LINE> <INDENT> dmin = d <NEW_LINE> <DEDENT> <DEDENT> r_X = dmin <NEW_LINE> ratio = r_X / r_Y <NEW_LINE> psi_tilde0 = 0.5 * ratio * (np.power(Y[:,0]-bary_Y[0],2)+np.power(Y[:,1]-bary_Y[1],2)) + bary_X[0]*(Y[:,0]) + bary_X[1]*(Y[:,1]) <NEW_LINE> psi0 = np.power(Y[:,0],2) + np.power(Y[:,1],2) - 2*psi_tilde0 <NEW_LINE> return psi0 | This function calculates first estimation of the potential.
Parameters
----------
Y : 2D array
Target samples
Y_w : 1D array
Weights associated to Y
X : 2D array
Source samples
X_w : 1D array
Weights asociated to X
Returns
-------
psi0 : 1D array
Convex estimation of the potential. Its gradient
send Y convex hull into X convex hull. | 625941b3925a0f43d2549c1a |
def setup_sync(): <NEW_LINE> <INDENT> viewers = nuke.selectedNodes('Viewer') <NEW_LINE> viewer_levels = {} <NEW_LINE> remove_viewers = [] <NEW_LINE> if viewers: <NEW_LINE> <INDENT> for viewer in viewers: <NEW_LINE> <INDENT> group = '.'.join(viewer.fullName().split('.')[:-1]) <NEW_LINE> if not group: <NEW_LINE> <INDENT> group = 'root' <NEW_LINE> <DEDENT> group_viewers = viewer_levels.get(group, []) <NEW_LINE> group_viewers.append(viewer) <NEW_LINE> viewer_levels[group] = group_viewers <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> viewers = nuke.allNodes('Viewer') <NEW_LINE> viewer_levels['group'] = viewers <NEW_LINE> <DEDENT> for level in viewer_levels.keys(): <NEW_LINE> <INDENT> if len(viewer_levels[level]) <= 1: <NEW_LINE> <INDENT> del viewer_levels[level] <NEW_LINE> <DEDENT> <DEDENT> bad_viewers = [] <NEW_LINE> for viewers in viewer_levels.values(): <NEW_LINE> <INDENT> for viewer in viewers: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> linked_viewers = _extract_viewer_list(viewer) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> bad_viewers.append(viewer) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> remove_viewers.extend(linked_viewers) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for rm_viewer in list(remove_viewers): <NEW_LINE> <INDENT> for viewers in viewer_levels.values(): <NEW_LINE> <INDENT> if rm_viewer in viewers: <NEW_LINE> <INDENT> remove_viewers.remove(rm_viewer) <NEW_LINE> <DEDENT> <DEDENT> if rm_viewer in bad_viewers: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> remove_viewers.remove(rm_viewer) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if remove_viewers: <NEW_LINE> <INDENT> for viewer in set(remove_viewers): <NEW_LINE> <INDENT> viewer['knobChanged'].setValue('') <NEW_LINE> _remove_knobs(viewer) <NEW_LINE> <DEDENT> <DEDENT> for viewers in viewer_levels.values(): <NEW_LINE> <INDENT> for viewer in bad_viewers: <NEW_LINE> <INDENT> if viewer in viewers: <NEW_LINE> <INDENT> viewers.remove(viewer) <NEW_LINE> <DEDENT> <DEDENT> for viewer in viewers: <NEW_LINE> <INDENT> _add_sync_knobs(viewer) <NEW_LINE> _set_callback(viewer, viewers) | Sets up a viewerSync between a group of Viewer nodes.
This sets up callbacks between either all selected viewers, or all viewers
at the current node graph level (as defined by what nuke.allNodes()
returns). It also sets up a series of settings on the Viewer nodes
themselves, controlling which knobs get synced between the Viewers.
Before setting up the viewers, we check the current knobChanged value.
Often that value is a viewerSync callback already. If so, we deactivate
that viewerSync group before continuing. If the callback is foreign (not
a viewerSync callback), we leave it alone and remove that Viewer from the
viewerSync group, rather than mess up another python process.
Args:
N/A
Returns:
None
Raises:
N/A | 625941b315baa723493c3d19 |
def __wait_for_cleep_process(self, module_name): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> time.sleep(1.0) <NEW_LINE> resp = requests.post(self.CLEEP_COMMAND_URL, json={ 'command': 'get_modules_updates', 'to': 'update' }) <NEW_LINE> resp.raise_for_status() <NEW_LINE> resp_json = resp.json() <NEW_LINE> if resp_json['error']: <NEW_LINE> <INDENT> raise Exception('Get_modules_updates command failed') <NEW_LINE> <DEDENT> module_updates = resp_json['data'].get(module_name) <NEW_LINE> self.logger.debug('Updates: %s' % module_updates) <NEW_LINE> if not module_updates: <NEW_LINE> <INDENT> raise Exception('No "%s" application info in updates data' % module_name) <NEW_LINE> <DEDENT> if module_updates['processing'] == False: <NEW_LINE> <INDENT> if module_updates['update']['failed']: <NEW_LINE> <INDENT> raise Exception('Application "%s" installation failed' % module_name) <NEW_LINE> <DEDENT> break | Wait for end of current Cleep process (install, update...) | 625941b3236d856c2ad44587 |
def get_type(type, calltype, iid_is=None, size_is=None): <NEW_LINE> <INDENT> while isinstance(type, xpidl.Typedef): <NEW_LINE> <INDENT> type = type.realtype <NEW_LINE> <DEDENT> if isinstance(type, xpidl.Builtin): <NEW_LINE> <INDENT> if type.name == 'string' and size_is is not None: <NEW_LINE> <INDENT> return xpt.StringWithSizeType(size_is, size_is) <NEW_LINE> <DEDENT> elif type.name == 'wstring' and size_is is not None: <NEW_LINE> <INDENT> return xpt.WideStringWithSizeType(size_is, size_is) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tag = TypeMap[type.name] <NEW_LINE> isPtr = (tag == xpt.Type.Tags.char_ptr or tag == xpt.Type.Tags.wchar_t_ptr) <NEW_LINE> return xpt.SimpleType(tag, pointer=isPtr, reference=False) <NEW_LINE> <DEDENT> <DEDENT> if isinstance(type, xpidl.Array): <NEW_LINE> <INDENT> return xpt.ArrayType(get_type(type.type, calltype, iid_is), size_is, size_is) <NEW_LINE> <DEDENT> if isinstance(type, xpidl.Interface) or isinstance(type, xpidl.Forward): <NEW_LINE> <INDENT> xptiface = None <NEW_LINE> for i in ifaces: <NEW_LINE> <INDENT> if i.name == type.name: <NEW_LINE> <INDENT> xptiface = i <NEW_LINE> <DEDENT> <DEDENT> if not xptiface: <NEW_LINE> <INDENT> xptiface = xpt.Interface(name=type.name) <NEW_LINE> ifaces.append(xptiface) <NEW_LINE> <DEDENT> return xpt.InterfaceType(xptiface) <NEW_LINE> <DEDENT> if isinstance(type, xpidl.Native): <NEW_LINE> <INDENT> if type.specialtype: <NEW_LINE> <INDENT> isPtr = (type.isPtr(calltype) or type.isRef(calltype)) and not type.specialtype == 'jsval' <NEW_LINE> isRef = type.isRef(calltype) and not type.specialtype == 'jsval' <NEW_LINE> return xpt.SimpleType(TypeMap[type.specialtype], pointer=isPtr, reference=isRef) <NEW_LINE> <DEDENT> elif iid_is is not None: <NEW_LINE> <INDENT> return xpt.InterfaceIsType(iid_is) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return xpt.SimpleType(TypeMap['void'], pointer=True, reference=False) <NEW_LINE> <DEDENT> <DEDENT> raise Exception("Unknown type!") | Return the appropriate xpt.Type object for this param | 625941b38a43f66fc4b53e18 |
def test_githuborg_initerror(githuborg_fixture): <NEW_LINE> <INDENT> githuborg_mock, _, _ = githuborg_fixture <NEW_LINE> githuborg_mock.side_effect = github.BadCredentialsException('', '') <NEW_LINE> with pytest.raises(ValueError): <NEW_LINE> <INDENT> GitHubOrg('org', auth=False) | Test initializing GitHub API failed | 625941b35fc7496912cc372d |
def get_test_data_folder() -> str: <NEW_LINE> <INDENT> return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') | Return absolute path to test data folder. | 625941b356b00c62f0f14403 |
def __init__(self): <NEW_LINE> <INDENT> self.files = {} <NEW_LINE> engine = db_connect() <NEW_LINE> create_deals_table(engine) <NEW_LINE> self.Session = sessionmaker(bind=engine) | Initializes database connection and sessionmaker.
Creates deals table. | 625941b3b57a9660fec33626 |
def set_Filter(self, value): <NEW_LINE> <INDENT> super(AggregatedListInputSet, self)._set_input('Filter', value) | Set the value of the Filter input for this Choreo. ((optional, string) A filter expression for narrowing results in the form: {field_name} {comparison_string} {literal_string} (e.g. name eq my_instance). Comparison strings can be eq (equals) or ne (not equals).) | 625941b36e29344779a623be |
def find_description(self, description, cats=None): <NEW_LINE> <INDENT> result = list() <NEW_LINE> if cats is None: <NEW_LINE> <INDENT> cats = self.cats <NEW_LINE> <DEDENT> for cat in cats: <NEW_LINE> <INDENT> if cat.get_description(description): <NEW_LINE> <INDENT> result.append(cat) <NEW_LINE> <DEDENT> <DEDENT> return result | find cat with description
:param str description: description to search for, can be regex
:param list cats: optional list of Cats to search
:return: list of Cats
:rtype: list | 625941b38c0ade5d55d3e766 |
def updateXAxis(self, update_immediately: bool = False) -> None: <NEW_LINE> <INDENT> if len(self._curves) == 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> min_x = self.plotItem.getAxis('bottom').range[0] <NEW_LINE> max_range = max([curve.max_x() for curve in self._curves]) <NEW_LINE> if min_x == 0: <NEW_LINE> <INDENT> min_x = time.time() <NEW_LINE> self._min_x = min_x <NEW_LINE> self._starting_timestamp = min_x - 60 <NEW_LINE> <DEDENT> elif min_x < self._min_x: <NEW_LINE> <INDENT> self._min_x = min_x <NEW_LINE> if not self._archive_request_queued: <NEW_LINE> <INDENT> self._archive_request_queued = True <NEW_LINE> QTimer.singleShot(1000, self.requestDataFromArchiver) <NEW_LINE> <DEDENT> <DEDENT> if self.plotItem.getViewBox().state['autoRange'][1]: <NEW_LINE> <INDENT> self.plotItem.setXRange(min_x, max_range, padding=0.0, update=update_immediately) | Manages the requests to archiver appliance. When the user pans or zooms the x axis to the left,
a request will be made for backfill data | 625941b3d7e4931a7ee9dcc3 |
@login_required <NEW_LINE> def get_page_list(request): <NEW_LINE> <INDENT> if request.method == 'GET': <NEW_LINE> <INDENT> pages = BasicPage.objects.get_all_basic() <NEW_LINE> mainmemu = mm.objects.get(menu_name='english') <NEW_LINE> pages_dict = {"name":'english' ,"pages": [] } <NEW_LINE> for page in mainmemu.get_children(): <NEW_LINE> <INDENT> pages_dict['pages'].append({ "name": page.menu_name ,"id" : page.content_object.id ,"left" : page.lft ,"right" : page.rgt ,"menu_name" : page.menu_name ,"slug" : page.content_object.slug ,"description": page.content_object.description ,"containers" : {} ,"pages" : [] ,"displayed" : page.display ,"published" : page.content_object.status }) <NEW_LINE> <DEDENT> return HttpResponse(simplejson.dumps(pages_dict), mimetype='application/javascript') | return a json dictionnay of all available page
@return: ordered dictionnary
menu_id : { id : int
,menu_name : string
,lft : int (element on the left)
,rgt : int (element on the right)
,slug : string
,description : string
,containers : {
container_id : { name : string
, type : string
}
}
,pages: { (recursive) }
,display_in_menu : boolean
,published: int (0 = draft, 1=published)
} | 625941b37b180e01f3dc45b0 |
def set_state(self, state): <NEW_LINE> <INDENT> self.n_unk,self.n_consumed,self.unk_prob,self.consumed_prob = state | Set the number of consumed words | 625941b350812a4eaa59c0ce |
def find_arxml_files(search_path): <NEW_LINE> <INDENT> for subdir, _dirs, files in os.walk(search_path): <NEW_LINE> <INDENT> for file in files: <NEW_LINE> <INDENT> if file.endswith(".arxml"): <NEW_LINE> <INDENT> yield os.path.join(subdir, file) | Generates a list of arxml files
Args:
search_path: The directory where to look for arxml files
Yields:
arxml_file_path | 625941b3a79ad161976cbeee |
def __init__(self,login, password, parent=None): <NEW_LINE> <INDENT> super().__init__(parent) <NEW_LINE> self.setupUi(self) <NEW_LINE> self.db = AccesBdd(login, password) <NEW_LINE> self.actionEnregistrer.setEnabled(False) <NEW_LINE> self.actionMise_jour.setEnabled(False) <NEW_LINE> self.tableWidget_table_etalonnage.setColumnWidth(0,200) <NEW_LINE> self.tableWidget_table_etalonnage.setColumnWidth(1,200) <NEW_LINE> self.tableWidget_table_etalonnage.setColumnWidth(2,200) <NEW_LINE> self.tableWidget_table_etalonnage.setColumnWidth(3,200) <NEW_LINE> self.tableWidget_table_etalonnage.setColumnWidth(4,200) <NEW_LINE> self.tableWidget_table_etalonnage.setColumnWidth(5,200) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(0,200) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(1,200) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(2,200) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(3,100) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(4,100) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(5,150) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(6,100) <NEW_LINE> self.tableWidget_modelisation.setColumnWidth(7,150) <NEW_LINE> self.x_array = [] <NEW_LINE> self.y_array = [] <NEW_LINE> self.y_modelise = [] <NEW_LINE> self.qwtPlot.setCanvasBackground(Qt.gray) <NEW_LINE> self.qwtPlot.setAxisTitle(QwtPlot.xBottom, 'Temperature') <NEW_LINE> self.qwtPlot.setAxisAutoScale(QwtPlot.xBottom) <NEW_LINE> self.qwtPlot.setAxisTitle(QwtPlot.yLeft, 'Correction') <NEW_LINE> self.qwtPlot.setAxisAutoScale(QwtPlot.yLeft) <NEW_LINE> self.curve = QwtPlotCurve('Correction = f(Tlue)') <NEW_LINE> self.curve_2 = QwtPlotCurve('y = f(x)') | Constructor
@param parent reference to the parent widget (QWidget) | 625941b3167d2b6e31218946 |
def dive_sphere(inputfile, outputfile, onlyfrag1): <NEW_LINE> <INDENT> frags_db = FragmentsDb(inputfile) <NEW_LINE> nodes = {} <NEW_LINE> samples = len(frags_db) <NEW_LINE> sql = 'SELECT frag_id, pdb_code, het_code FROM fragments' <NEW_LINE> if onlyfrag1: <NEW_LINE> <INDENT> sql += ' WHERE frag_id LIKE "%_frag1"' <NEW_LINE> frags_db.cursor.execute('SELECT count(*) FROM fragments WHERE frag_id LIKE "%_frag1"') <NEW_LINE> samples = frags_db.cursor.fetchone()[0] <NEW_LINE> <DEDENT> rnd = 1. <NEW_LINE> offset = 2. / samples <NEW_LINE> increment = math.pi * (3. - math.sqrt(5.)); <NEW_LINE> frag_ids = frags_db.cursor.execute(sql) <NEW_LINE> for i, frag in enumerate(frag_ids): <NEW_LINE> <INDENT> y = ((i * offset) - 1) + (offset / 2); <NEW_LINE> r = math.sqrt(1 - pow(y, 2)) <NEW_LINE> phi = ((i + rnd) % samples) * increment <NEW_LINE> x = math.cos(phi) * r <NEW_LINE> z = math.sin(phi) * r <NEW_LINE> node_info = { 'Path': [], 'Coordinates': [x, y, z], 'Categories': [frag[1], frag[2]], 'Properties': [] } <NEW_LINE> nodes[frag[0]] = node_info <NEW_LINE> <DEDENT> json.dump(nodes, outputfile) | Export fragments as DiVE formatted sphere
Args:
inputfile (str): fragments db input file
outputfile (file): fragments dive output file
onlyfrag1 (bool): Only \*_frag1 | 625941b3627d3e7fe0d68bf6 |
def get_probability_is_argument(mdl, fm, predicate_words,pos=5): <NEW_LINE> <INDENT> X = [] <NEW_LINE> X_new = [] <NEW_LINE> X.append([] ) <NEW_LINE> X_new.append([[predicate_words , "PRED"]]) <NEW_LINE> score = [] <NEW_LINE> for p in range(pos): <NEW_LINE> <INDENT> n=1 <NEW_LINE> X_new, rs_scores, X, myscores = get_scores_all(mdl, fm, X, X_new, num_select=n) <NEW_LINE> score.append((myscores, rs_scores)) <NEW_LINE> <DEDENT> return score | calculate the probability that a word is an argument of a predicate
:param mdl:
:param fm:
:return: | 625941b3d99f1b3c44c67348 |
def domains_check(domain_check_list: list, checker_name: str, n_threads=n_threads, regexp=regexp, timeout=timeout, verbose=verbose): <NEW_LINE> <INDENT> opend=[] <NEW_LINE> total_num_sites = len(domain_check_list) <NEW_LINE> log.info(f"[O] Количество {checker_name} для проверки: {str(total_num_sites)}") <NEW_LINE> if total_num_sites == 0: <NEW_LINE> <INDENT> log.critical("Nothing to do") <NEW_LINE> input("Нажмите Enter чтобы выйти...") <NEW_LINE> exit(0) <NEW_LINE> <DEDENT> url_list_lock = Lock() <NEW_LINE> workerthreadlist=[] <NEW_LINE> for x in range(0,n_threads-1): <NEW_LINE> <INDENT> newthread = WorkerThread(domain_check_list,url_list_lock,regexp,timeout,verbose,total_num_sites) <NEW_LINE> workerthreadlist.append(newthread) <NEW_LINE> newthread.start() <NEW_LINE> <DEDENT> while len(workerthreadlist) > 0: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> workerthreadlist = [t.join(1) for t in workerthreadlist if t is not None and t.isAlive()] <NEW_LINE> <DEDENT> except KeyboardInterrupt: <NEW_LINE> <INDENT> log.warning("\nCtrl-c! Остановка всех потоков...") <NEW_LINE> for t in workerthreadlist: <NEW_LINE> <INDENT> t.kill_received = True <NEW_LINE> <DEDENT> exit(0) <NEW_LINE> <DEDENT> <DEDENT> print() <NEW_LINE> perc = len(opend)*100/total_num_sites <NEW_LINE> print(colored("[f]",'cyan'), end="") if perc else print(colored("[ok]",'cyan'),end="") <NEW_LINE> print (colored(f" Процент открывшихся {checker_name}: {str(perc)}%", 'cyan')) <NEW_LINE> if perc: <NEW_LINE> <INDENT> log.warning(f"[f] Открывшиеся {checker_name}:") <NEW_LINE> for url in opend: <NEW_LINE> <INDENT> log.warning(f"\t[f] {url}") | Запускает проверку списка сайтов и следит, чтобы не было lock | 625941b324f1403a9260091a |
def is_non_science_or_lab(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> is_non_science(self) <NEW_LINE> return <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if not self._is_on_mountain(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> raise KeyError(f"{self._log_prefix}: Required key is missing and this is a mountain science observation") | Pseudo method to determine whether this is a lab or non-science
header.
Raises
------
KeyError
If this is a science observation and on the mountain. | 625941b3b5575c28eb68dda5 |
def build_model(self): <NEW_LINE> <INDENT> states = layers.Input(shape=(self.state_size,), name='states') <NEW_LINE> actions = layers.Input(shape=(self.action_size,), name='actions') <NEW_LINE> net_states = layers.Dense(units=32, activation='tanh')(states) <NEW_LINE> net_states = layers.Dense(units=64, activation='tanh')(net_states) <NEW_LINE> net_actions = layers.Dense(units=32, activation='tanh')(actions) <NEW_LINE> net_actions = layers.Dense(units=64, activation='tanh')(net_actions) <NEW_LINE> net = layers.Add()([net_states, net_actions]) <NEW_LINE> net = layers.Activation('tanh')(net) <NEW_LINE> Q_values = layers.Dense(units=1, name='q_values')(net) <NEW_LINE> self.model = models.Model(inputs=[states, actions], outputs=Q_values) <NEW_LINE> optimizer = optimizers.Adam() <NEW_LINE> self.model.compile(optimizer=optimizer, loss='mse') <NEW_LINE> action_gradients = K.gradients(Q_values, actions) <NEW_LINE> self.get_action_gradients = K.function( inputs=[*self.model.input, K.learning_phase()], outputs=action_gradients) | Build a critic (value) network that maps (state, action) pairs -> Q-values. | 625941b3b7558d58953c4cc8 |
def _do_add_property(self, updates, change): <NEW_LINE> <INDENT> key = change['path'][1] <NEW_LINE> if key in updates: <NEW_LINE> <INDENT> msg = _("Property %s already present.") <NEW_LINE> raise webob.exc.HTTPConflict(msg % key) <NEW_LINE> <DEDENT> updates[key] = change['value'] | Add a new image property, ensuring it does not already exist. | 625941b338b623060ff0ab9e |
def start(update_queue, response_queue, stop_page=True, port=5000, secret_key=os.urandom(24)): <NEW_LINE> <INDENT> process = multiprocessing.Process(target=new_server, args=(update_queue, response_queue, stop_page, port, secret_key)) <NEW_LINE> process.daemon = True <NEW_LINE> process.start() | Start new server on `port`.
This function create new daemon process and start it. | 625941b30a50d4780f666c37 |
def get_revisions_until_request(self): <NEW_LINE> <INDENT> for r in self.current_page.revisions(content=True): <NEW_LINE> <INDENT> if '{{Löschantragstext' not in r.text: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> user = None if r.anon else r.user <NEW_LINE> yield user, r.timestamp | Read the version history until the deletion template was found. | 625941b30a366e3fb873e5be |
def save(self): <NEW_LINE> <INDENT> saved_file = os.path.join(self.__path, self.name) <NEW_LINE> with open(saved_file, 'wb') as file: <NEW_LINE> <INDENT> my_pickler = pickle.Pickler(file) <NEW_LINE> my_pickler.dump(self) | Save a game in a file. File name is game name. | 625941b37047854f462a11b6 |
def FreezeCore(self,*args): <NEW_LINE> <INDENT> pass | FreezeCore(self: QuaternionAnimationUsingKeyFrames,isChecking: bool) -> bool
Makes this instance of System.Windows.Media.Animation.QuaternionAnimationUsingKeyFrames object
unmodifiable or determines whether it can be made unmodifiable.
isChecking: true to check if this instance can be frozen; false to freeze this instance.
Returns: If isChecking is true,this method returns true if this instance can be made read-only,or false
if it cannot be made read-only. If isChecking is false,this method returns true if this
instance is now read-only,or false if it cannot be made read-only,with the side effect of
having begun to change the frozen status of this object. | 625941b3498bea3a759b985a |
def parse_str_for_metadata(main_content_str): <NEW_LINE> <INDENT> meta_dict = {} <NEW_LINE> try: <NEW_LINE> <INDENT> meta_match = gv.re_metadata_finder.findall(main_content_str) <NEW_LINE> assert len(meta_match) == 1 <NEW_LINE> meta_str = meta_match[0] <NEW_LINE> meta_list = [e for e in meta_str.split('\n') if e != ''] <NEW_LINE> for entry in meta_list: <NEW_LINE> <INDENT> temp_list = entry.split(gv.metadata_split_marker) <NEW_LINE> if len(temp_list) == 2: <NEW_LINE> <INDENT> name, content = [e.strip() for e in temp_list] <NEW_LINE> meta_dict[name] = content <NEW_LINE> <DEDENT> <DEDENT> return meta_dict <NEW_LINE> <DEDENT> except AssertionError: <NEW_LINE> <INDENT> return {} | Look for metadata within the delimiters
'## BEGIN METADATA ##' and '## END METADATA ##'
(which are on their own separate lines)
of the form to place in HTML format of the form
<meta name="{name}" content="{content}">.
Returns a string of these HTML-formatted metadata
(or the empty string if no metadata exists).
In the file, the metadata should be separated by two colons:
{name}::{content} | 625941b316aa5153ce362220 |
def _write_after(self, indx): <NEW_LINE> <INDENT> writing_after_indx=[(line, sls) for (line, i, sls) in self._writing_after if i == indx] <NEW_LINE> for noteline, sls in writing_after_indx: <NEW_LINE> <INDENT> self.parent.write(noteline, sls) | Write everything we've been waiting to. | 625941b355399d3f0558845c |
def _prepare_downloaded_spm_auditory_data(subject_dir): <NEW_LINE> <INDENT> subject_data = {} <NEW_LINE> for file_name in SPM_AUDITORY_DATA_FILES: <NEW_LINE> <INDENT> file_path = os.path.join(subject_dir, file_name) <NEW_LINE> if os.path.exists(file_path): <NEW_LINE> <INDENT> subject_data[file_name] = file_path <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('%s missing from filelist!' % file_name) <NEW_LINE> return None <NEW_LINE> <DEDENT> <DEDENT> _subject_data = {} <NEW_LINE> _subject_data['func'] = sorted( [subject_data[x] for x in subject_data.keys() if re.match('^fM00223_0\d\d\.img$', os.path.basename(x))]) <NEW_LINE> for x in _subject_data['func']: <NEW_LINE> <INDENT> vol = nib.load(x) <NEW_LINE> if len(vol.shape) == 4: <NEW_LINE> <INDENT> vol = nib.Nifti1Image(get_data(vol)[:, :, :, 0], vol.affine) <NEW_LINE> nib.save(vol, x) <NEW_LINE> <DEDENT> <DEDENT> _subject_data['anat'] = [subject_data[x] for x in subject_data.keys() if re.match('^sM00223_002\.img$', os.path.basename(x))][0] <NEW_LINE> vol = nib.load(_subject_data['anat']) <NEW_LINE> if len(vol.shape) == 4: <NEW_LINE> <INDENT> vol = nib.Nifti1Image(get_data(vol)[:, :, :, 0], vol.affine) <NEW_LINE> nib.save(vol, _subject_data['anat']) <NEW_LINE> <DEDENT> return Bunch(**_subject_data) | Uncompresses downloaded spm_auditory dataset and organizes
the data into apprpriate directories.
Parameters
----------
subject_dir: string
Path to subject's data directory.
Returns
-------
_subject_data: skl.Bunch object
Scikit-Learn Bunch object containing data of a single subject
from the SPM Auditory dataset. | 625941b31b99ca400220a858 |
def create_parser() -> argparse.ArgumentParser: <NEW_LINE> <INDENT> parser = argparse.ArgumentParser() <NEW_LINE> parser.add_argument("-i", "--input", default="input/", help="Path to directory with input data.") <NEW_LINE> parser.add_argument("-o", "--output", default="output/", help="Directory where to store results.") <NEW_LINE> parser.add_argument("-r", "--random", default=42, type=int, help="Random seed.") <NEW_LINE> parser.add_argument("-n", "--number", default=3, type=int, help="Number of nearest neighbors") <NEW_LINE> parser.add_argument("-l", "--language", default="en_core_web_lg", type=str, help="Default value to load spacy.") <NEW_LINE> return parser | Initialize the command line argument parser. | 625941b3d6c5a10208143dee |
def tick(self, force_update=False): <NEW_LINE> <INDENT> now = datetime.datetime.now( TZOffset()).replace(second=0, microsecond=0) <NEW_LINE> if now == self.last_tick and not force_update: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> if self.settings.remind_idle > datetime.timedelta(0): <NEW_LINE> <INDENT> if self.last_tick and now - self.last_tick > self.settings.remind_idle: <NEW_LINE> <INDENT> self.time_before_idle = self.last_tick <NEW_LINE> self.resume_from_idle() <NEW_LINE> <DEDENT> screensaving = self.screensaver and self.screensaver.GetActive() <NEW_LINE> if not screensaving == self.screensaving: <NEW_LINE> <INDENT> self.screensaving = screensaving <NEW_LINE> if screensaving: <NEW_LINE> <INDENT> self.time_before_idle = self.last_tick <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if now - self.time_before_idle > self.settings.remind_idle: <NEW_LINE> <INDENT> self.resume_from_idle() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if self.tasks.check_reload(): <NEW_LINE> <INDENT> self.set_up_task_list() <NEW_LINE> <DEDENT> self.last_tick = now <NEW_LINE> last_time = self.timelog.window.last_time() <NEW_LINE> if not self.inserting_old_time: <NEW_LINE> <INDENT> if last_time is None: <NEW_LINE> <INDENT> if self.time_label.get_text() != 'Arrival message:': <NEW_LINE> <INDENT> self.time_label.set_text(now.strftime("Arrival message:")) <NEW_LINE> self.process_new_day_tasks() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.time_label.set_text(format_duration(now - last_time)) <NEW_LINE> if not self.lock: <NEW_LINE> <INDENT> self.delete_footer() <NEW_LINE> self.add_footer() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return True | Tick every second. | 625941b366673b3332b91e3f |
def _desc(self): <NEW_LINE> <INDENT> return '%s(%s%s)' % (self.__class__.__name__, self._delay, '' if self._value is None else (', value=%s' % self._value)) | Return a string *Timeout(delay[, value=value])*. | 625941b3711fe17d82542125 |
def config_transport(transports, transport, transport_config): <NEW_LINE> <INDENT> lgr.debug('configuring transport...') <NEW_LINE> if hasattr(transports, transport): <NEW_LINE> <INDENT> transport_instance = getattr(transports, transport)(transport_config) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lgr.error('could not find transport: {0}. please make sure the ' 'transport you\'re calling exists.'.format(transport)) <NEW_LINE> raise FeedrError('missing transport: {0}'.format(transport)) <NEW_LINE> <DEDENT> client = transport_instance.configure() <NEW_LINE> return transport_instance, client | returns a configured instance and client for the transport
:param transports: transport classes to choose from.
:param string transport: transport to use
:param dict transport_config: transport configuration | 625941b323849d37ff7b2e41 |
def smbus_open(bus_str, po): <NEW_LINE> <INDENT> global bus <NEW_LINE> global i2c_msg <NEW_LINE> m = re.match(r'(smbus|i2c):([0-9]+)', bus_str) <NEW_LINE> if m: <NEW_LINE> <INDENT> po.api_type = str(m.group(1)) <NEW_LINE> bus_index = int(m.group(2),0) <NEW_LINE> if po.dry_run: <NEW_LINE> <INDENT> bus = SMBusMock(bus_index) <NEW_LINE> bus.pec = True <NEW_LINE> if po.api_type == "i2c": <NEW_LINE> <INDENT> class i2c_msg(i2c_msg_mock): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> return <NEW_LINE> <DEDENT> import smbus2 <NEW_LINE> bus = smbus2.SMBus(bus_index) <NEW_LINE> bus.pec = True <NEW_LINE> if po.api_type == "i2c": <NEW_LINE> <INDENT> from smbus2 import i2c_msg <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> raise ValueError("Unrecognized bus definition") | Opens the System Managememnt Bus over I2C interface
The current implementation is for Rapsberry Pi. | 625941b331939e2706e4cc1a |
def __init__(self): <NEW_LINE> <INDENT> print("Initializing") | Add any initialization parameters. These will be passed at runtime from the graph definition parameters defined in your seldondeployment kubernetes resource manifest. | 625941b33617ad0b5ed67ca7 |
@click.command('new') <NEW_LINE> @click.argument('language', type=click.Choice(['python3', 'r']), default='python3') <NEW_LINE> @click.option('--mem', type=int, default=None, help='Memory allocated for this notebook in MiB.') <NEW_LINE> @click.option('--cpu', type=int, default=None, help='CPU available for this notebook in 1/1000 of a core.') <NEW_LINE> def notebooks_new_cmd(language='python3', mem=None, cpu=None): <NEW_LINE> <INDENT> client = civis.APIClient() <NEW_LINE> kwargs = {'memory': mem, 'cpu': cpu} <NEW_LINE> kwargs = {k: v for k, v in kwargs.items() if v is not None} <NEW_LINE> new_nb = client.notebooks.post(language=language, **kwargs) <NEW_LINE> print("Created new {language} notebook with ID {id} in Civis Platform" " (https://platform.civisanalytics.com/#/notebooks/{id})." .format(language=language, id=new_nb.id)) <NEW_LINE> _notebooks_up(new_nb.id) <NEW_LINE> _notebooks_open(new_nb.id) | Create a new notebook and open it in the browser. | 625941b3925a0f43d2549c1b |
def do_determine_instance_ip(self): <NEW_LINE> <INDENT> options = self.options <NEW_LINE> logging.debug('Looking up IP address for "%s"...', options.deploy_google_instance) <NEW_LINE> response = check_subprocess( 'gcloud compute instances describe' ' --format json' ' --account {gcloud_account}' ' --project {project} --zone {zone} {instance}' .format(gcloud_account=options.deploy_hal_google_service_account, project=options.deploy_google_project, zone=options.deploy_google_zone, instance=options.deploy_google_instance), stderr=subprocess.PIPE) <NEW_LINE> nic = decode_json(response)['networkInterfaces'][0] <NEW_LINE> use_internal_ip = options.deploy_google_use_internal_ip <NEW_LINE> if use_internal_ip: <NEW_LINE> <INDENT> logging.debug('Using internal IP=%s', nic['networkIP']) <NEW_LINE> return nic['networkIP'] <NEW_LINE> <DEDENT> ip = nic['accessConfigs'][0]['natIP'] <NEW_LINE> logging.debug('Using natIP=%s', ip) <NEW_LINE> return ip | Implements GenericVmValidateBomDeployer interface. | 625941b30c0af96317bb7f92 |
def autoRepeatDelay(self): <NEW_LINE> <INDENT> return 0 | autoRepeatDelay(self) -> int | 625941b3baa26c4b54cb0ecf |
def _handle_exception(self, id_, data, message): <NEW_LINE> <INDENT> self.execute_error.emit('dependency', id_, data) <NEW_LINE> raise RuntimeError(message) | Slot performed if the threaded method has raised an exception. | 625941b37047854f462a11b7 |
def load_networks(shared, task_id, device, logger, mnet, hnet, hhnet=None, dis=None): <NEW_LINE> <INDENT> ckpt_dict, score = tckpt.load_checkpoint(shared.ckpt_mnet_fn % task_id, mnet, device=device, ret_performance_score=True) <NEW_LINE> if hnet is not None: <NEW_LINE> <INDENT> tckpt.load_checkpoint(shared.ckpt_hnet_fn % task_id, hnet, device=device) <NEW_LINE> <DEDENT> if hhnet is not None: <NEW_LINE> <INDENT> tckpt.load_checkpoint(shared.ckpt_hhnet_fn % task_id, hhnet, device=device) <NEW_LINE> <DEDENT> if dis is not None: <NEW_LINE> <INDENT> tckpt.load_checkpoint(shared.ckpt_dis_fn % task_id, dis, device=device) <NEW_LINE> <DEDENT> logger.debug('Loaded network(s) for task %d from checkpoint,' % task_id + 'that has a performance score of %f.' % score) <NEW_LINE> return score | Load checkpointed networks.
Args:
shared (argparse.Namespace): Miscellaneous data shared among training
functions (contains filenames).
task_id (int): On which task have the networks been trained last.
device: PyTorch device.
logger: Console (and file) logger.
mnet: The main network.
hnet: The hypernetwork. May be ``None``.
hhnet (optional): The hyper-hypernetwork.
dis (optional): The discriminator network.
Returns:
(float): The performance score of the checkpoint. | 625941b3004d5f362079a0e1 |
def open(filename, mode="rb", encoding=None, errors=None, newline=None, block_size=BLOCKSIZE_DEFAULT, block_linked=True, compression_level=COMPRESSIONLEVEL_MIN, content_checksum=False, block_checksum=False, auto_flush=False, return_bytearray=False, source_size=0): <NEW_LINE> <INDENT> if 't' in mode: <NEW_LINE> <INDENT> if 'b' in mode: <NEW_LINE> <INDENT> raise ValueError('Invalid mode: %r' % (mode,)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if encoding is not None: <NEW_LINE> <INDENT> raise ValueError( "Argument 'encoding' not supported in binary mode" ) <NEW_LINE> <DEDENT> if errors is not None: <NEW_LINE> <INDENT> raise ValueError("Argument 'errors' not supported in binary mode") <NEW_LINE> <DEDENT> if newline is not None: <NEW_LINE> <INDENT> raise ValueError("Argument 'newline' not supported in binary mode") <NEW_LINE> <DEDENT> <DEDENT> _mode = mode.replace('t', '') <NEW_LINE> binary_file = LZ4FrameFile( filename, mode=_mode, block_size=block_size, block_linked=block_linked, compression_level=compression_level, content_checksum=content_checksum, block_checksum=block_checksum, auto_flush=auto_flush, return_bytearray=return_bytearray, source_size=source_size, ) <NEW_LINE> if 't' in mode: <NEW_LINE> <INDENT> return io.TextIOWrapper(binary_file, encoding, errors, newline) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return binary_file | Open an LZ4Frame-compressed file in binary or text mode.
``filename`` can be either an actual file name (given as a str, bytes, or
PathLike object), in which case the named file is opened, or it can be an
existing file object to read from or write to.
The ``mode`` argument can be ``'r'``, ``'rb'`` (default), ``'w'``,
``'wb'``, ``'x'``, ``'xb'``, ``'a'``, or ``'ab'`` for binary mode, or
``'rt'``, ``'wt'``, ``'xt'``, or ``'at'`` for text mode.
For binary mode, this function is equivalent to the `LZ4FrameFile`
constructor: `LZ4FrameFile(filename, mode, ...)`.
For text mode, an `LZ4FrameFile` object is created, and wrapped in an
``io.TextIOWrapper`` instance with the specified encoding, error handling
behavior, and line ending(s).
Args:
filename (str, bytes, os.PathLike): file name or file object to open
Keyword Args:
mode (str): mode for opening the file
encoding (str): the name of the encoding that will be used for
encoding/deconging the stream. It defaults to
``locale.getpreferredencoding(False)``. See ``io.TextIOWrapper``
for further details.
errors (str): specifies how encoding and decoding errors are to be
handled. See ``io.TextIOWrapper`` for further details.
newline (str): controls how line endings are handled. See
``io.TextIOWrapper`` for further details.
return_bytearray (bool): When ``False`` a bytes object is returned
from the calls to methods of this class. When ``True`` a bytearray
object will be returned. The default is ``False``.
source_size (int): Optionally specify the total size of the
uncompressed data. If specified, will be stored in the compressed
frame header as an 8-byte field for later use during decompression.
Default is 0 (no size stored). Only used for writing compressed
files.
block_size (int): Compressor setting. See
`lz4.frame.LZ4FrameCompressor`.
block_linked (bool): Compressor setting. See
`lz4.frame.LZ4FrameCompressor`.
compression_level (int): Compressor setting. See
`lz4.frame.LZ4FrameCompressor`.
content_checksum (bool): Compressor setting. See
`lz4.frame.LZ4FrameCompressor`.
block_checksum (bool): Compressor setting. See
`lz4.frame.LZ4FrameCompressor`.
auto_flush (bool): Compressor setting. See
`lz4.frame.LZ4FrameCompressor`. | 625941b3f548e778e58cd324 |
def discriminator(x, convolutional=True, n_features=32, rgb=False, reuse=False): <NEW_LINE> <INDENT> with tf.variable_scope('discriminator', reuse=reuse): <NEW_LINE> <INDENT> return encoder( x=x, convolutional=convolutional, filter_sizes=[5, 5, 5, 5], dimensions=[ n_features, n_features * 2, n_features * 4, n_features * 8 ] if convolutional else [n_features, 128, 256], activation=tf.nn.relu6, output_activation=None, reuse=reuse) | Summary
Parameters
----------
x : TYPE
Description
convolutional : bool, optional
Description
n_features : int, optional
Description
rgb : bool, optional
Description
reuse : bool, optional
Description
Returns
-------
name : TYPE
Description | 625941b315baa723493c3d1a |
def page_not_found(request): <NEW_LINE> <INDENT> from django.shortcuts import render_to_response <NEW_LINE> response = render_to_response('404.html', {}) <NEW_LINE> response.status_code = 404 <NEW_LINE> return response | 全局404设置 | 625941b3ab23a570cc24ff30 |
def test_create(self): <NEW_LINE> <INDENT> row = baker.make(self.to_bake) <NEW_LINE> self.assertTrue(isinstance(row, self.model)) | verify object can be created | 625941b31f5feb6acb0c4906 |
def handle_data_wire3(self, pins): <NEW_LINE> <INDENT> clk, dio, stb = pins <NEW_LINE> if self.bitcount >= 8: <NEW_LINE> <INDENT> self.handle_byte_wire3() <NEW_LINE> self.clear_data() <NEW_LINE> self.ss_byte = self.samplenum <NEW_LINE> <DEDENT> self.bits.insert(0, [dio, self.samplenum, self.samplenum]) <NEW_LINE> self.databyte >>= 1 <NEW_LINE> self.databyte |= (dio << 7) <NEW_LINE> if self.bitcount > 0: <NEW_LINE> <INDENT> self.bits[1][2] = self.samplenum <NEW_LINE> <DEDENT> self.bitcount += 1 | Process data bits at CLK rising edge for 3-wire bus. | 625941b3099cdd3c635f0a06 |
def get_group_feed(self, group): <NEW_LINE> <INDENT> Url = BASE_URL + 'me/groups//{0}/'.format(group) <NEW_LINE> res = self._get(url=Url) <NEW_LINE> id = res['data'][0]['id'] <NEW_LINE> Url2 = BASE_URL + '{0}/feed'.format(id) <NEW_LINE> return self._get(url=Url2) | get the newsfeed of a group | 625941b33539df3088e2e0f4 |
def setReportInstructions(self, assessmentNumber = 1, reportBy = 2): <NEW_LINE> <INDENT> if not isinstance(assessmentNumber, int): <NEW_LINE> <INDENT> raise CourseSubreportError("Invalid assessmentNumber parameter: must be an integer.") <NEW_LINE> <DEDENT> elif assessmentNumber < 1: <NEW_LINE> <INDENT> raise CourseSubreportError("Invalid assessmentNumber parameter: must be an integer greater than 1.") <NEW_LINE> <DEDENT> if not isinstance(reportBy, int): <NEW_LINE> <INDENT> raise CourseSubreportError("Invalid reportBy parameter: must be a int.") <NEW_LINE> <DEDENT> self.assessmentNumber = assessmentNumber <NEW_LINE> self.reportBy = reportBy | Constructor method.
@param int assessmentNumber : Integer that indicates the number of the assessment to which the answers that must be counted were given.
@param int reportBy : Integer that marks if the course subreport should be made by class number, professor or course.
@return :
@author | 625941b394891a1f4081b851 |
def has_children(self, obj): <NEW_LINE> <INDENT> return len(self.get_children(obj)) > 0 | Returns whether the object has children.
| 625941b315baa723493c3d1b |
def get_user_self(self, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('callback'): <NEW_LINE> <INDENT> return self.get_user_self_with_http_info(**kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.get_user_self_with_http_info(**kwargs) <NEW_LINE> return data | Get current user
Get the currently authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_user_self(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str include: Extra fields to include in the response
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread. | 625941b3a05bb46b383ec5d7 |
def linear_forward_test_case(): <NEW_LINE> <INDENT> np.random.seed(1) <NEW_LINE> A = np.random.randn(3, 2) <NEW_LINE> W = np.random.randn(1, 3) <NEW_LINE> b = np.random.randn(1, 1) <NEW_LINE> return A, W, b | 【LINEAR】线性部分
linear_forward()函数测试样例。
:return:
X = np.array([[ 1.62434536, -0.61175641],
[-0.52817175, -1.07296862],
[ 0.86540763, -2.3015387 ]])
W = np.array([[ 1.74481176, -0.7612069 , 0.3190391 ]])
b = np.array([[-0.24937038]])) | 625941b3097d151d1a222c0d |
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, background, play_button): <NEW_LINE> <INDENT> screen.fill(ai_settings.bg_color) <NEW_LINE> background.blitme() <NEW_LINE> for bullet in bullets.sprites(): <NEW_LINE> <INDENT> bullet.draw_bullet() <NEW_LINE> <DEDENT> ship.blitme() <NEW_LINE> aliens.draw(screen) <NEW_LINE> sb.show_score() <NEW_LINE> if not stats.game_active: <NEW_LINE> <INDENT> play_button.draw_button() <NEW_LINE> <DEDENT> pygame.display.flip() | Update images on the screen and flip to the new screen. | 625941b356b00c62f0f14405 |
def maxPoints(self, points): <NEW_LINE> <INDENT> result = 0 <NEW_LINE> for i in range(len(points)): <NEW_LINE> <INDENT> maps = {float("-inf"): 1} <NEW_LINE> same = 0 <NEW_LINE> for j in range(i + 1, len(points)): <NEW_LINE> <INDENT> if points[j].x == points[i].x and points[j].y == points[i].y: <NEW_LINE> <INDENT> same += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if points[j].x == points[i].x: <NEW_LINE> <INDENT> key = float("inf") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key = (float)(points[j].y - points[i].y) / (points[j].x - points[i].x) <NEW_LINE> <DEDENT> if key not in maps: <NEW_LINE> <INDENT> maps[key] = 1 <NEW_LINE> <DEDENT> maps[key] += 1 <NEW_LINE> <DEDENT> <DEDENT> for k in maps: <NEW_LINE> <INDENT> result = max(result, maps[k] + same) <NEW_LINE> <DEDENT> <DEDENT> return result | :type points: List[Point]
:rtype: int | 625941b36e29344779a623c0 |
def xyz2input(filename): <NEW_LINE> <INDENT> abiinput = AbinitInput() <NEW_LINE> atomdict = atomic_symbol() <NEW_LINE> rf = open(filename, 'r') <NEW_LINE> natom = int(rf.readline()) <NEW_LINE> typat = [] <NEW_LINE> znucl = [] <NEW_LINE> xangst = [] <NEW_LINE> ntypat = 0 <NEW_LINE> rf.readline() <NEW_LINE> data = rf.readlines() <NEW_LINE> for i in range(natom): <NEW_LINE> <INDENT> atom = data[i].split() <NEW_LINE> atomnumber = atomdict[atom[0]] <NEW_LINE> if atomnumber not in znucl: <NEW_LINE> <INDENT> ntypat += 1 <NEW_LINE> znucl.append(atomnumber) <NEW_LINE> <DEDENT> typat.append(znucl.index(atomnumber) + 1) <NEW_LINE> xangst += [float(atom[1]), float(atom[2]), float(atom[3])] <NEW_LINE> <DEDENT> abiinput.variables['natom'] = np.array([natom]) <NEW_LINE> abiinput.variables['znucl'] = np.array(znucl) <NEW_LINE> abiinput.variables['ntypat'] = np.array([ntypat]) <NEW_LINE> abiinput.variables['typat'] = np.array(typat) <NEW_LINE> abiinput.variables['xcart'] = angstrom_bohr * np.array(xangst) <NEW_LINE> return abiinput | Reads a .xyz and return an ABINIT input
as a python dictionary | 625941b36e29344779a623c1 |
def workspace_open_requested(self): <NEW_LINE> <INDENT> if self.openWorkspaceCB.isVisible(): <NEW_LINE> <INDENT> return self.openWorkspaceCB.isChecked() <NEW_LINE> <DEDENT> return True | Returns a boolean of whether the user requested the workspace be opened
on model success. | 625941b3cc40096d61595701 |
def cross_validation(train_data, train_labels, k_range=np.arange(1,16)): <NEW_LINE> <INDENT> train_data = np.array(train_data) <NEW_LINE> train_labels = np.array(train_labels) <NEW_LINE> accuracy_array = [] <NEW_LINE> for k in k_range: <NEW_LINE> <INDENT> X = train_data <NEW_LINE> kf = KFold(n_splits=10, shuffle=True) <NEW_LINE> accuracy_temp = [] <NEW_LINE> for train, test in kf.split(X): <NEW_LINE> <INDENT> knn = KNearestNeighbor(train_data[train], train_labels[train]) <NEW_LINE> acc_score_test = classification_accuracy(knn, k, train_data[test], train_labels[test]) <NEW_LINE> accuracy_temp.append(acc_score_test) <NEW_LINE> <DEDENT> accuracy_mean = np.mean(accuracy_temp) <NEW_LINE> accuracy_array.append(accuracy_mean) <NEW_LINE> <DEDENT> return accuracy_array | Perform 10-fold cross validation to find the best value for k
Note: Previously this function took knn as an argument instead of train_data,train_labels.
The intention was for students to take the training data from the knn object - this should be clearer
from the new function signature. | 625941b34e696a04525c91ff |
def get_exposed_url_endpoints(self): <NEW_LINE> <INDENT> return [ 'dashboard.index', 'dashboard.get_by_sever_id', 'dashboard.get_by_database_id', 'dashboard.session_stats', 'dashboard.get_session_stats_by_sever_id', 'dashboard.get_session_stats_by_database_id', 'dashboard.tps_stats', 'dashboard.tps_stats_by_server_id', 'dashboard.tps_stats_by_database_id', 'dashboard.ti_stats', 'dashboard.ti_stats_by_server_id', 'dashboard.ti_stats_by_database_id', 'dashboard.to_stats', 'dashboard.to_stats_by_server_id', 'dashboard.to_stats_by_database_id', 'dashboard.bio_stats', 'dashboard.bio_stats_by_server_id', 'dashboard.bio_stats_by_database_id', 'dashboard.activity', 'dashboard.get_activity_by_server_id', 'dashboard.get_activity_by_database_id', 'dashboard.locks', 'dashboard.get_locks_by_server_id', 'dashboard.get_locks_by_database_id', 'dashboard.prepared', 'dashboard.get_prepared_by_server_id', 'dashboard.get_prepared_by_database_id', 'dashboard.config', 'dashboard.get_config_by_server_id', ] | Returns:
list: a list of url endpoints exposed to the client. | 625941b323e79379d52ee313 |
def test_IndexAccessNonExisting(self): <NEW_LINE> <INDENT> iLen = len(self.List) <NEW_LINE> for iIndex in range(1, 5): <NEW_LINE> <INDENT> gTarget = list(self.List) <NEW_LINE> lstTest = [9] <NEW_LINE> lstTest.extend(self.List) <NEW_LINE> self.TestFunction(gTarget, -iLen - iIndex, 9) <NEW_LINE> self.assertListEqual(gTarget, lstTest, msg = 'before') <NEW_LINE> gTarget = list(self.List) <NEW_LINE> lstTest = list(self.List) <NEW_LINE> lstTest.append(9) <NEW_LINE> self.TestFunction(gTarget, iLen + iIndex - 1, 9) <NEW_LINE> self.assertListEqual(gTarget, lstTest, msg = 'after') | Checks that the index argument outside the range results in the new
element being added in front (for negative indexes) or after (for the
positive index) existing elements - only for mutable sequences.
Implements tests ID TEST-T-510. Covers requirements REQ-FUN-530. | 625941b3460517430c393f3b |
def erdos_renyi(n,m,wantRank=False): <NEW_LINE> <INDENT> V = [] <NEW_LINE> E = [] <NEW_LINE> rank_ls = [] <NEW_LINE> ls = [] <NEW_LINE> E_set = set() <NEW_LINE> for i in range(n): <NEW_LINE> <INDENT> V.append(str(i)) <NEW_LINE> <DEDENT> i = 0 <NEW_LINE> while i < m: <NEW_LINE> <INDENT> u = random.choice(V) <NEW_LINE> v = random.choice(V) <NEW_LINE> if u == v: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif frozenset([u,v]) in E_set: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> E_set.add(frozenset([u,v])) <NEW_LINE> ls.append([frozenset([u,v]),i]) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> <DEDENT> for item in E_set: <NEW_LINE> <INDENT> E.append(set_to_list(item)) <NEW_LINE> <DEDENT> for item in ls: <NEW_LINE> <INDENT> rank_ls.append([set_to_list(item[0]),item[1]]) <NEW_LINE> <DEDENT> if wantRank == True: <NEW_LINE> <INDENT> return V, E, rank_ls <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return V, E | generates a graph (returned as a list of nodes and a list of edges) using the Erdos Renyi model.
also returns a rank_ls that gives the order in which the edges were generated. | 625941b30a50d4780f666c38 |
def chooseOntologyPath(self): <NEW_LINE> <INDENT> path = self.path.text() <NEW_LINE> path = QFileDialog.getExistingDirectory(self, 'Choose Directory', path) <NEW_LINE> self.path.setText(path) | Choose a path from the QFileDialog. | 625941b3be8e80087fb209f8 |
def opt_algo(D, w, n): <NEW_LINE> <INDENT> h = 0 <NEW_LINE> b_h = w <NEW_LINE> k_h = floor(D/(pow(2.,n) * b_h)) <NEW_LINE> b = b_h <NEW_LINE> r = D % (pow(2.,n) * b_h) <NEW_LINE> k = k_h <NEW_LINE> itera = 0 <NEW_LINE> verif = bool(1) <NEW_LINE> while verif: <NEW_LINE> <INDENT> if (D % (pow(2.,n) * b_h)) == 0: <NEW_LINE> <INDENT> b = b_h <NEW_LINE> r = 0 <NEW_LINE> k = k_h <NEW_LINE> verif = bool(0) <NEW_LINE> <DEDENT> elif (D % (pow(2.,n) * b_h)) > r: <NEW_LINE> <INDENT> b = b_h; <NEW_LINE> r = (D % (pow(2.,n) * b_h)); <NEW_LINE> k = k_h; <NEW_LINE> <DEDENT> if h == floor(w/2): <NEW_LINE> <INDENT> verif = bool(0) <NEW_LINE> <DEDENT> h = min( floor(w/2), h + max(1,floor(b_h - D/(pow(2.,n) * (k_h+1)))) ); <NEW_LINE> b_h = w - h; <NEW_LINE> k_h = floor(D/(pow(2.,n) * b_h)); <NEW_LINE> itera = itera + 1; <NEW_LINE> <DEDENT> b = int(b) <NEW_LINE> r = int(r) <NEW_LINE> k = int(k) <NEW_LINE> arr_sizes = [b, r, k, itera] <NEW_LINE> return arr_sizes | Solves the tiling problem
patitioning the interval [0, D-1] into k subintervals of size
2^n b and one final subinterval of size r = D - k 2^n b
Input:
D = dimension of the original array
w = approximate estimation of value for b
n = desideres level of refinement (e.g. : n = 0 => maximum level of refinement; n =1 => number of point divided by 2^1=2; n = 2 => number of point divided by 2^2=4;)
Output:
arr_sizes = [b, r, k, itera]
b = normalized size of standard blocks (size of standard blocks = b * 2^n)
r = rest (if not equal to 0, is the size of the last block)
k = number of standard blocks
itera = number of itarations to converge | 625941b3377c676e91271f5c |
def get_world_bits(): <NEW_LINE> <INDENT> import stat <NEW_LINE> world_mask = stat.S_IRWXO <NEW_LINE> world_bits = { 'r' : stat.S_IROTH, 'w' : stat.S_IWOTH, 'x' : stat.S_IXOTH, 'rw' : stat.S_IROTH|stat.S_IWOTH, 'rx' : stat.S_IROTH|stat.S_IXOTH, 'wx' : stat.S_IWOTH|stat.S_IXOTH, 'rwx' : stat.S_IRWXO, } <NEW_LINE> world_bits['---'] = 0 <NEW_LINE> world_bits['r--'] = world_bits['r'] <NEW_LINE> world_bits['-w-'] = world_bits['w'] <NEW_LINE> world_bits['--x'] = world_bits['x'] <NEW_LINE> world_bits['rw-'] = world_bits['rw'] <NEW_LINE> world_bits['r-x'] = world_bits['rx'] <NEW_LINE> world_bits['-wx'] = world_bits['wx'] <NEW_LINE> return world_mask, world_bits | Helper function for get_permissions() and change_permissions().
Same as get_owner_bits() except for group. | 625941b33c8af77a43ae3550 |
def create_user(self,name, date_of_birth, contact_number, password, **extra_fields): <NEW_LINE> <INDENT> errors = {} <NEW_LINE> if not name: <NEW_LINE> <INDENT> errors['name'] = _('Name must be set') <NEW_LINE> <DEDENT> if not date_of_birth: <NEW_LINE> <INDENT> errors['date_of_birth'] = _('Date of Birth must be set') <NEW_LINE> <DEDENT> if not contact_number: <NEW_LINE> <INDENT> errors['contact_number'] = _('Contact number must be set') <NEW_LINE> <DEDENT> if not password: <NEW_LINE> <INDENT> errors['password'] = _('Password must be set') <NEW_LINE> <DEDENT> if errors: <NEW_LINE> <INDENT> raise ValidationError(errors) <NEW_LINE> <DEDENT> validate_date_of_birth(date_of_birth) <NEW_LINE> validate_contact_number(contact_number) <NEW_LINE> user = self.model( name= name, date_of_birth=date_of_birth, contact_number=contact_number, **extra_fields) <NEW_LINE> user.set_password(password) <NEW_LINE> user.save() <NEW_LINE> return user | Create and save an user with the given contact number and password. | 625941b36aa9bd52df036b4c |
def _write_encrypted_pem(self, passphrase): <NEW_LINE> <INDENT> key = PKey() <NEW_LINE> key.generate_key(TYPE_RSA, 128) <NEW_LINE> pemFile = self.mktemp() <NEW_LINE> fObj = open(pemFile, 'w') <NEW_LINE> pem = dump_privatekey(FILETYPE_PEM, key, "blowfish", passphrase) <NEW_LINE> fObj.write(pem.decode('ascii')) <NEW_LINE> fObj.close() <NEW_LINE> return pemFile | Write a new private key out to a new file, encrypted using the given
passphrase. Return the path to the new file. | 625941b363b5f9789fde6e8f |
def search_single_page(self, iter_num=0): <NEW_LINE> <INDENT> self.dry_run() <NEW_LINE> self._test_config() <NEW_LINE> self._HTML_DATA = self.get_html() <NEW_LINE> self._SOUPED_HTML_DATA = self._soup_data() <NEW_LINE> self._RESULTS_MAIN += self.get_search_results() <NEW_LINE> self._RESULTS_KEYWORDS += self.get_related_keywords() <NEW_LINE> self._NEXT_PAGE_URL = self._get_next_page() | 1. Perform a dry run
2. shift _DEFAULT_SCRAPE_METHOD if needed
3. get results | 625941b326238365f5f0ec13 |
def new_salary(self): <NEW_LINE> <INDENT> print("The new salary of this employee is: " + str(self.salary)) | Prints an employees new salary after raise. | 625941b3b7558d58953c4cc9 |
def simulate_static(self, steps, time = None, solution = solve_type.RK4, collect_dynamic = False): <NEW_LINE> <INDENT> dyn_output = None; <NEW_LINE> dyn_time = None; <NEW_LINE> if (collect_dynamic == True): <NEW_LINE> <INDENT> dyn_output = []; <NEW_LINE> dyn_time = []; <NEW_LINE> dyn_output.append(self._outputs); <NEW_LINE> dyn_time.append(0); <NEW_LINE> <DEDENT> for step in range(0, steps, 1): <NEW_LINE> <INDENT> self._outputs = self._calculate_states(step); <NEW_LINE> if (collect_dynamic == True): <NEW_LINE> <INDENT> dyn_output.append(self._outputs); <NEW_LINE> dyn_time.append(step); <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dyn_output = self._outputs; <NEW_LINE> dyn_time = step; <NEW_LINE> <DEDENT> <DEDENT> self._pointer_dynamic = dyn_output; <NEW_LINE> return (dyn_time, dyn_output); | !
@brief Performs static simulation of pulse coupled neural network.
@param[in] steps (uint): Number steps of simulations during simulation.
@param[in] time (double): Can be ingored - steps are used instead of time of simulation.
@param[in] solution (solve_type): Type of solution (solving).
@param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.
@return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,
otherwise returns only last values (last step of simulation) of dynamic. | 625941b345492302aab5e06a |
def create_and_link_vrf_table(self, vrf_conf): <NEW_LINE> <INDENT> route_family = vrf_conf.route_family <NEW_LINE> if route_family == VRF_RF_IPV4: <NEW_LINE> <INDENT> vrf_table = Vrf4Table <NEW_LINE> <DEDENT> elif route_family == VRF_RF_IPV6: <NEW_LINE> <INDENT> vrf_table = Vrf6Table <NEW_LINE> <DEDENT> elif route_family == VRF_RF_L2_EVPN: <NEW_LINE> <INDENT> vrf_table = VrfEvpnTable <NEW_LINE> <DEDENT> elif route_family == VRF_RF_IPV4_FLOWSPEC: <NEW_LINE> <INDENT> vrf_table = Vrf4FlowSpecTable <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('Unsupported route family for VRF: %s' % route_family) <NEW_LINE> <DEDENT> vrf_table = vrf_table(vrf_conf, self._core_service, self._signal_bus) <NEW_LINE> table_id = (vrf_conf.route_dist, route_family) <NEW_LINE> self._tables[table_id] = vrf_table <NEW_LINE> assert vrf_table is not None <NEW_LINE> LOG.debug('Added new VrfTable with route_dist:%s and route_family:%s', vrf_conf.route_dist, route_family) <NEW_LINE> import_rts = vrf_conf.import_rts <NEW_LINE> if import_rts: <NEW_LINE> <INDENT> self._link_vrf_table(vrf_table, import_rts) <NEW_LINE> <DEDENT> return vrf_table | Factory method to create VRF table for given `vrf_conf`.
Adds mapping to this table with appropriate scope. Also, adds mapping
for import RT of this VRF to created table to facilitate
importing/installing of paths from global tables.
Returns created table. | 625941b3b57a9660fec33629 |
def verify_petition_token(request): <NEW_LINE> <INDENT> if not request: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> challengerToken = request.swagger_data.get('token', '') <NEW_LINE> petitionId = request.swagger_data.get('petition', '') <NEW_LINE> try: <NEW_LINE> <INDENT> sig, timestamp = base64.b64decode(challengerToken).split(',') <NEW_LINE> token = _generate_signature(petitionId, timestamp) <NEW_LINE> if base64.b64encode(token) == challengerToken: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False <NEW_LINE> <DEDENT> except (TypeError, ValueError): <NEW_LINE> <INDENT> return False | Verify the token for given request.
A 'token' and a 'petition' must be present in the `request`.
Implementation note:
- we decode the token (base64) -> '{token},{timestamp}'
- we extract the timestamp
- we generate the same token with extracted timestamp and given
petition ID.
- if the generated token matches with '{token}', it's considered
verified. | 625941b321a7993f00bc7a92 |
def add_ordering(self, *ordering): <NEW_LINE> <INDENT> fields = [] <NEW_LINE> errors = [] <NEW_LINE> for item in ordering: <NEW_LINE> <INDENT> udf = self.process_as_udf(item) <NEW_LINE> if udf: <NEW_LINE> <INDENT> fields.append(udf) <NEW_LINE> <DEDENT> elif ORDER_PATTERN.match(item): <NEW_LINE> <INDENT> fields.append(item) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> errors.append(item) <NEW_LINE> <DEDENT> <DEDENT> if errors: <NEW_LINE> <INDENT> raise FieldError('Invalid order_by arguments: %s' % errors) <NEW_LINE> <DEDENT> if ordering: <NEW_LINE> <INDENT> self.order_by.extend(fields) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.default_ordering = False | This method was copied and modified from django core. In
particular, each field should be checked against UDF_ORDER_PATTERN
via 'process_as_udf' | 625941b3796e427e537b036c |
def find_supported_translations(self): <NEW_LINE> <INDENT> url = "https://www.biblegateway.com/versions/" <NEW_LINE> translations = list() <NEW_LINE> page = urlopen(url) <NEW_LINE> soup = BeautifulSoup(page.read()) <NEW_LINE> trans = soup.findAll("tr", {"class":"language-row"}) <NEW_LINE> for t in trans: <NEW_LINE> <INDENT> if not t.find("a").has_attr("title"): <NEW_LINE> <INDENT> t_text = t.find("td", {"class":"translation-name"}).get_text() <NEW_LINE> t_name = t_text[:t_text.rfind("(") - 1] <NEW_LINE> t_abbreviation = t_text[t_text.rfind("(") + 1:t_text.rfind(")")] <NEW_LINE> t_language = t["data-language"] <NEW_LINE> if t.find("span", {"class":"testament"}): <NEW_LINE> <INDENT> section = t.find("span", {"class":"testament"}).get_text() <NEW_LINE> if section == "OT": <NEW_LINE> <INDENT> t_has_ot = True <NEW_LINE> t_has_nt = False <NEW_LINE> t_has_deut = False <NEW_LINE> <DEDENT> elif section == "NT": <NEW_LINE> <INDENT> t_has_ot = False <NEW_LINE> t_has_nt = True <NEW_LINE> t_has_deut = False <NEW_LINE> <DEDENT> elif section == "with Apocrypha": <NEW_LINE> <INDENT> t_has_ot = True <NEW_LINE> t_has_nt = True <NEW_LINE> t_has_deut = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> t_has_ot = True <NEW_LINE> t_has_nt = True <NEW_LINE> t_has_deut = False <NEW_LINE> <DEDENT> new_trans = Translation(t_name, t_abbreviation, t_language, t_has_ot, t_has_nt, t_has_deut) <NEW_LINE> translations.append(new_trans) <NEW_LINE> <DEDENT> <DEDENT> translations.append(Translation("JPS Tanakh", "JPS", "en", True, False, False)) <NEW_LINE> return translations | Retrieves a list of supported translations from BibleGateway's translation
page. | 625941b323e79379d52ee314 |
def get_data(self, channels=None, maxlen=None, fmt="rows"): <NEW_LINE> <INDENT> with self.data_lock: <NEW_LINE> <INDENT> if fmt=="columns": <NEW_LINE> <INDENT> return self.table_accum.get_data_columns(channels=channels,maxlen=maxlen) <NEW_LINE> <DEDENT> elif fmt=="rows": <NEW_LINE> <INDENT> return self.table_accum.get_data_rows(channels=channels,maxlen=maxlen) <NEW_LINE> <DEDENT> elif fmt=="dict": <NEW_LINE> <INDENT> return self.table_accum.get_data_dict(channels=channels,maxlen=maxlen) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("unrecognized data format: {}".format(fmt)) | Get accumulated table data.
Args:
channels: list of channels to get; all channels by default
maxlen: maximal column length (if stored length is larger, return last `maxlen` rows)
fmt (str): return format; can be ``"rows"`` (list of rows), ``"columns"`` (list of columns), or ``"dict"`` (dictionary of named columns) | 625941b3d268445f265b4c1f |
def test_proxy_ip(wsgi_app): <NEW_LINE> <INDENT> assert ( wsgi_app( {"REMOTE_ADDR": "10.0.0.99", "HTTP_X_FORWARDED_FOR": "11.22.33.44"}, None ) == "ip=11.22.33.44, ff=" ) | Remote addr is proxy, returns the forward. | 625941b355399d3f0558845e |
def select_installed_version(plugin): <NEW_LINE> <INDENT> assert 'installed_version' in plugin <NEW_LINE> installed = plugin['installed_version'] <NEW_LINE> for version in plugin['versions']: <NEW_LINE> <INDENT> if version['version'] == installed: <NEW_LINE> <INDENT> return version | Return the installed version of the `plugin`.
This function raises an AssertionError if the plugin is not installed.
Eg. 'installed_version' is not in `plugin`. | 625941b31b99ca400220a85a |
def rotatedDigits(self, N): <NEW_LINE> <INDENT> nums = 0 <NEW_LINE> for i in range(1, N+1): <NEW_LINE> <INDENT> stri = str(i) <NEW_LINE> valid1, valid2 = False, True <NEW_LINE> for j in stri: <NEW_LINE> <INDENT> if j in ["2", "5", "6", "9"]: <NEW_LINE> <INDENT> valid1 = True <NEW_LINE> <DEDENT> elif j not in ["1", "0", "8"]: <NEW_LINE> <INDENT> valid2 = False <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if valid1 and valid2: <NEW_LINE> <INDENT> nums += 1 <NEW_LINE> <DEDENT> <DEDENT> return nums | :type N: int
:rtype: int | 625941b3a4f1c619b28afdf6 |
def check(self, mailfrom, recipient): <NEW_LINE> <INDENT> return | Check if mailfrom/recipient combination is whitelisted. | 625941b3046cf37aa974caf6 |
def test_random_non_multiple(self): <NEW_LINE> <INDENT> shuffle = True <NEW_LINE> batch_size = 3 <NEW_LINE> n = 10 <NEW_LINE> np.random.seed(0) <NEW_LINE> n_batches = int(np.ceil(n * 1. / batch_size)) <NEW_LINE> sample_indices = [] <NEW_LINE> batch_indices = [] <NEW_LINE> for batch in indices_generator(shuffle, batch_size, n): <NEW_LINE> <INDENT> new_sample_indices, new_batch_index = batch <NEW_LINE> sample_indices += new_sample_indices.tolist() <NEW_LINE> batch_indices.append(new_batch_index) <NEW_LINE> <DEDENT> assert sample_indices != np.arange(n).tolist() <NEW_LINE> assert batch_indices != np.arange(n_batches).tolist() <NEW_LINE> assert set(sample_indices) == set(np.arange(n)) <NEW_LINE> assert set(batch_indices) == set(np.arange(n_batches)) | Test indices generator when number of samples
is not a multiple of the batch size
and when generation is random | 625941b3099cdd3c635f0a07 |
def getval(self, key): <NEW_LINE> <INDENT> if not key: <NEW_LINE> <INDENT> print ("getval command expects: key") <NEW_LINE> return <NEW_LINE> <DEDENT> kpath = key.split('.') <NEW_LINE> cv = self.wallet.wallet_config <NEW_LINE> try: <NEW_LINE> <INDENT> for k in kpath: <NEW_LINE> <INDENT> cv = cv[k] <NEW_LINE> <DEDENT> print (json.dumps(cv)) <NEW_LINE> <DEDENT> except (KeyError, TypeError): <NEW_LINE> <INDENT> print ("could not find the key: %s" % key) | Returns the value for a given key in the config.
Key is expressed like so: key.subkey.subsubkey | 625941b331939e2706e4cc1c |
def get_user(self, id): <NEW_LINE> <INDENT> return self._connection.get_user(id) | Returns a :class:`User` with the given ID. If not found, returns None. | 625941b356ac1b37e6263f8d |
def __init__(self, name=None, parent=None, discovered=None, part_id=None, device=None, serial=None, manufacturer=None): <NEW_LINE> <INDENT> self._name = None <NEW_LINE> self._parent = None <NEW_LINE> self._discovered = None <NEW_LINE> self._part_id = None <NEW_LINE> self._device = None <NEW_LINE> self._serial = None <NEW_LINE> self._manufacturer = None <NEW_LINE> self.name = name <NEW_LINE> self.parent = parent <NEW_LINE> if discovered is not None: <NEW_LINE> <INDENT> self.discovered = discovered <NEW_LINE> <DEDENT> if part_id is not None: <NEW_LINE> <INDENT> self.part_id = part_id <NEW_LINE> <DEDENT> self.device = device <NEW_LINE> if serial is not None: <NEW_LINE> <INDENT> self.serial = serial <NEW_LINE> <DEDENT> if manufacturer is not None: <NEW_LINE> <INDENT> self.manufacturer = manufacturer | Data48 - a model defined in Swagger | 625941b367a9b606de4a7c68 |
def disable_interspersed_args(self): <NEW_LINE> <INDENT> assert self.option_parser_kind == MAIN_USE_OPTPARSE <NEW_LINE> self.option_parser.disable_interspersed_args() | See optparse.disable_interspersed_args in standard python library.
This function is now deprecated and is only supported
if self.option_parser_kind == MAIN_USE_OPTPARSE.
Use self.option_parser.disable_interspersed_args instead. | 625941b33317a56b86939a16 |
def get_outputs(self, index: int=0) -> List[node.Node]: <NEW_LINE> <INDENT> outs = [] <NEW_LINE> for edge in self.outputs[index].edges: <NEW_LINE> <INDENT> other_socket = edge.get_other_socket(self.outputs[index]) <NEW_LINE> outs.append(other_socket.node) <NEW_LINE> <DEDENT> return outs | .
Todo:
* May be able to remove this.(Used for Undo function. Not used) | 625941b399fddb7c1c9de146 |
def GetCurrentParameters(self): <NEW_LINE> <INDENT> return _itkShapePriorSegmentationLevelSetImageFilterPython.itkShapePriorSegmentationLevelSetImageFilterIF2IF2F_GetCurrentParameters(self) | GetCurrentParameters(self) -> itkArrayD | 625941b33539df3088e2e0f6 |
Subsets and Splits