code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def __init__(self, name, ip_addr, prefix_len, mtu, cm, sw_proto, sdn_ctrl_port): <NEW_LINE> <INDENT> super(OvsBridge, self).__init__(name, ip_addr, prefix_len, mtu, cm) <NEW_LINE> if OvsBridge.brctl is None or OvsBridge.iptool is None: <NEW_LINE> <INDENT> raise RuntimeError("openvswitch-switch was not found" if not OvsBridge.brctl else "iproute2 was not found") <NEW_LINE> <DEDENT> self._patch_port = "pp-"+self.name[:12] <NEW_LINE> Modlib.runshell([OvsBridge.brctl, "--may-exist", "add-br", self.name]) <NEW_LINE> if ip_addr and prefix_len: <NEW_LINE> <INDENT> net = "{0}/{1}".format(ip_addr, prefix_len) <NEW_LINE> Modlib.runshell([OvsBridge.iptool, "addr", "flush", "dev", self.name]) <NEW_LINE> Modlib.runshell([OvsBridge.iptool, "addr", "add", net, "dev", self.name]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Modlib.runshell( ["sysctl", "net.ipv6.conf.{}.disable_ipv6=1".format(self.name)]) <NEW_LINE> Modlib.runshell([OvsBridge.iptool, "addr", "flush", self.name]) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> Modlib.runshell([OvsBridge.brctl, "set", "int", self.name, "mtu_request=" + str(self.mtu)]) <NEW_LINE> <DEDENT> except RuntimeError as e: <NEW_LINE> <INDENT> self.cm.log("LOG_WARNING", "The following error occurred while setting MTU for OVS bridge: %s", e) <NEW_LINE> <DEDENT> if sw_proto.casefold() == "STP".casefold(): <NEW_LINE> <INDENT> self.stp(True) <NEW_LINE> <DEDENT> elif sw_proto.casefold() == "BF".casefold(): <NEW_LINE> <INDENT> self.add_sdn_ctrl(sdn_ctrl_port) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.cm.log("LOG_INFO", f"No switch protocol specified for {name}") <NEW_LINE> <DEDENT> Modlib.runshell([OvsBridge.iptool, "link", "set", "dev", self.name, "up"]) | Initialize an OpenvSwitch bridge object. | 625941b25fc7496912cc371d |
def mark(self, i, j): <NEW_LINE> <INDENT> if not (0 <= i <= 2 and 0<= j <= 2): <NEW_LINE> <INDENT> raise IndexError('Invalid board position') <NEW_LINE> <DEDENT> if self._board[i][j] != ' ': <NEW_LINE> <INDENT> raise ValueError('Position occupied') <NEW_LINE> <DEDENT> if self.winner() is not None: <NEW_LINE> <INDENT> raise ValueError('Game is already complete') <NEW_LINE> <DEDENT> self._board[i][j] = self._player <NEW_LINE> if self._player == 'X': <NEW_LINE> <INDENT> self._player = 'O' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._player = 'X' | Put X or O at position (i, j) | 625941b232920d7e50b27f6b |
def __init__(self, items): <NEW_LINE> <INDENT> super(ImageToText, self).__init__() <NEW_LINE> self.items: [] = items <NEW_LINE> self.index_list = [] <NEW_LINE> self.index = [] | Args:
items: list,元素是Item
index_list: 根据描述物体的顺序放置它们的编号
index: 每句话所涉及到的物体的编号 | 625941b2b545ff76a8913bb8 |
def add_groups(self, group_ids=None): <NEW_LINE> <INDENT> return self.groups.save( [self.groups.factory({'member_group_id': x}) for x in group_ids]) | Convenience method for adding groups to a Member
:param group_ids: Set of Group identifiers to add
:type group_ids: :class:`list` of :class:`int`
:rtype: :class:`None`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mbr = acct.members[123]
>>> mbr.add_groups([1024, 1025])
None | 625941b24f88993c3716be0d |
def __init__(self, resp): <NEW_LINE> <INDENT> if isinstance(resp, str): <NEW_LINE> <INDENT> self.message = resp <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.url = resp.geturl() <NEW_LINE> self.code = resp.getcode() <NEW_LINE> self.stack = None <NEW_LINE> for line in resp.read().split("\n"): <NEW_LINE> <INDENT> if self.stack == None: <NEW_LINE> <INDENT> m = re.search("<pre>(.*)", line) <NEW_LINE> if m != None: <NEW_LINE> <INDENT> self.stack = m.group(1) <NEW_LINE> m = re.search("^.+\.([^\.]+: .*)$", self.stack) <NEW_LINE> if m != None: <NEW_LINE> <INDENT> self.message = m.group(1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.message = line <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> m = re.search("(.*)</pre>", line) <NEW_LINE> if m != None: <NEW_LINE> <INDENT> self.stack = self.stack + "\n" + m.group(1) <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.stack = self.stack + "\n" + line | Construct exception by providing response object. | 625941b291af0d3eaac9b7ab |
def error_func(x, a0, a1, a2, a3): <NEW_LINE> <INDENT> return (a0 / 2) * sp.special.erfc((a1 - x) / a2) + a3 | Defines a complementary error function of the form
(a0/2)*sp.special.erfc((a1-x)/a2) + a3 | 625941b276d4e153a657e8c7 |
def test_xyz_calcs(stream_digest: digest.StreamDigest, scan: client.LidarScan) -> None: <NEW_LINE> <INDENT> xyz_from_docs = reference.xyz_proj(stream_digest.meta, scan) <NEW_LINE> xyzlut = client.XYZLut(stream_digest.meta) <NEW_LINE> xyz_from_lut = xyzlut(scan) <NEW_LINE> assert np.allclose(xyz_from_docs, xyz_from_lut) | Compare the optimized xyz projection to a reference implementation. | 625941b2a79ad161976cbede |
def __str__(self): <NEW_LINE> <INDENT> result = [] <NEW_LINE> for conf in self.configuration: <NEW_LINE> <INDENT> result.append(conf) <NEW_LINE> <DEDENT> result.reverse() <NEW_LINE> res_str = str(result) <NEW_LINE> return res_str | Return string representation for Mancala board | 625941b2f548e778e58cd315 |
def getFileByName(dbSession, user_id, name): <NEW_LINE> <INDENT> f = dbSession.query(File).filter(File.user_id == user_id).filter(File.name == name) <NEW_LINE> if f: <NEW_LINE> <INDENT> return f.one_or_none() <NEW_LINE> <DEDENT> return None | gets a file by user and id | 625941b27b180e01f3dc45a4 |
def get_learning_rate(global_step, one_epoch_step, lr, decay_step, decay_rate, lr_baseline): <NEW_LINE> <INDENT> learning_rate = tf.train.exponential_decay(lr, global_step, decay_step * one_epoch_step, decay_rate, staircase = True) <NEW_LINE> learning_rate = tf.maximum(learning_rate, lr_baseline) <NEW_LINE> return learning_rate | get learning rate start at lr and decay DECAY RATE in each DACAY STEP and do not less than 0.0003
:param global_step:
:param one_epoch_step:
:param lr:
:param decay_step:
:param decay_rate:
:param lr_baseline:
:return: | 625941b2293b9510aa2c3032 |
def equal_column(new_table, where_query): <NEW_LINE> <INDENT> list_keys = list(new_table._all_data.keys()) <NEW_LINE> rows = new_table.num_rows() <NEW_LINE> list_comparison = where_query.split('=') <NEW_LINE> first_column = list_comparison[0] <NEW_LINE> second_column = list_comparison[1] <NEW_LINE> index_deletion = [] <NEW_LINE> for i in range(rows): <NEW_LINE> <INDENT> if (new_table._all_data[first_column][i] != (new_table._all_data [second_column][i])): <NEW_LINE> <INDENT> index_deletion.append(i) <NEW_LINE> <DEDENT> <DEDENT> index_deletion.reverse() <NEW_LINE> for i in index_deletion: <NEW_LINE> <INDENT> for keys in new_table._all_data: <NEW_LINE> <INDENT> useless_value = new_table._all_data[keys].pop(i) <NEW_LINE> <DEDENT> <DEDENT> list_keys = list(new_table._all_data.keys()) <NEW_LINE> new_table._rows = len(new_table._all_data[list_keys[0]]) | (Table, string) -> NoneType
Does the where token for when column=column2
Compares when the requested column = the correct value
and deletes any values within the table that do not have the
the requested value
REQ: where_query is of the form column=column2
REQ: the columns are within the table
REQ: the table contains values
>>> x = Table()
>>> x.set_dict({'t': ['m1', 'm2', 'm3', 'm1', 'm2', 'm3'],
'title': ['m1', 'm1', 'm1', 'm2', 'm2', 'm2'],
'year': ['5', '5', '5', '6', '6', '6'],
'y': ['5', '6', '7', '5', '6', '7'],
'money': ['1.0', '2.0', '3.0', '1.0', '2.0', '3.0']})
>>> equal_column(x,'y=year')
>>> x._all_data == {'title': ['m1', 'm2'], 'year': ['5', '6'],
'y': ['5', '6'], 't': ['m1', 'm2'],
'money': ['1.0', '2.0']}
True
>>> x = Table()
>>> x.set_dict({'t': ['m1', 'm2', 'm3', 'm1', 'm2', 'm3'],
'title': ['m1', 'm1', 'm1', 'm2', 'm2', 'm2'],
'year': ['5', '5', '5', '6', '6', '6'],
'y': ['5', '6', '7', '5', '6', '7'],
'money': ['1.0', '2.0', '3.0', '1.0', '2.0', '3.0']})
>>> equal_column(x, 't=title')
>>> x == {'money': ['1.0', '2.0'], 't': ['m1', 'm2'],
'title': ['m1', 'm2'], 'year': ['5', '6'],
'y': ['5', '6']}
True | 625941b20a366e3fb873e5ae |
def ExportGetCoordinateSystem(self,pModelObject,pCoordinateSystem): <NEW_LINE> <INDENT> pass | ExportGetCoordinateSystem(self: ICDelegate,pModelObject: dotModelObject_t,pCoordinateSystem: dotCoordinateSystem_t) -> (int,dotModelObject_t,dotCoordinateSystem_t) | 625941b230bbd722463cbb60 |
def resetUseParentModel(self): <NEW_LINE> <INDENT> self.setUseParentModel(False) | equivalent to setUseParentModel(False) | 625941b2d164cc6175782ae5 |
def ready_to_schedule_operation(op, has_executed, graph): <NEW_LINE> <INDENT> dependencies = set(filter(lambda v: isinstance(v, Operation), nx.ancestors(graph, op))) <NEW_LINE> return dependencies.issubset(has_executed) | Determines if a Operation is ready to be scheduled for execution based on
what has already been executed.
Args:
op:
The Operation object to check
has_executed: set
A set containing all operations that have been executed so far
graph:
The networkx graph containing the operations and data nodes
Returns:
A boolean indicating whether the operation may be scheduled for
execution based on what has already been executed. | 625941b28c3a873295158156 |
def _backward_Concat(name=None, attr=None, out=None, **kwargs): <NEW_LINE> <INDENT> return (0,) | Parameters
----------
name : string, optional.
Name of the resulting symbol.
Returns
-------
Symbol
The result symbol. | 625941b273bcbd0ca4b2be15 |
def top_losses(self, k, largest=True): <NEW_LINE> <INDENT> return self.losses.topk(k, largest=largest) | `k` largest(/smallest) losses | 625941b2711fe17d82542115 |
def _display_search_results(self, current_ind): <NEW_LINE> <INDENT> self.valid_inputs = [] <NEW_LINE> clear_screen() <NEW_LINE> self._setup() <NEW_LINE> for i in range(MAX_PER_PAGE): <NEW_LINE> <INDENT> ind = i + current_ind <NEW_LINE> if ind + 1 > len(self.search_res): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> self.valid_inputs.append(str(ind + 1)) <NEW_LINE> q = self.search_res[ind] <NEW_LINE> print( '\n[{}] {}\n' '\tCreationDate: {}\tScore: {}\tAnswerCount: {}'.format( ind + 1, q['Title'], q['CreationDate'], q['Score'], q['AnswerCount'] ) ) <NEW_LINE> <DEDENT> return MAX_PER_PAGE | Displays up to 10 search results at a time. Displays the title, creation date, score, and answer count of each
retrieved question.
:param current_ind: integer representing the number of search results that have already been displayed
:return: None if all search results have been printed or the number of search results that have been printed | 625941b2d6c5a10208143dde |
def set_normal_table(self,pos,normal_table): <NEW_LINE> <INDENT> if pos == 0: <NEW_LINE> <INDENT> self.normal_table_1 = normal_table <NEW_LINE> <DEDENT> elif pos == 1: <NEW_LINE> <INDENT> self.normal_table_2 = normal_table <NEW_LINE> <DEDENT> elif pos == 2: <NEW_LINE> <INDENT> self.normal_table_3 = normal_table <NEW_LINE> <DEDENT> elif pos == 3: <NEW_LINE> <INDENT> self.normal_table_4 = normal_table | set the normal_table at position pos, position is based 0 | 625941b230dc7b766590170b |
def test_create_an_item_usefulness(self): <NEW_LINE> <INDENT> data = factory.build( dict, FACTORY_CLASS=ItemUsefulnessFactory) <NEW_LINE> data['item'] = 1 <NEW_LINE> data['usefulness'] = 2 <NEW_LINE> url = reverse('itemusefulness-list', kwargs={ 'item_pk': data['item']}) <NEW_LINE> response = self.client.post(url, data, format='json') <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_201_CREATED) <NEW_LINE> self.assertIsInstance(response.data, dict) <NEW_LINE> self.assertEqual( sorted(response.data.keys()), ITEMUSEFULNESS_FIELDS) <NEW_LINE> url = reverse( 'itemusefulness-detail', kwargs={'item_pk': response.data['item']['id'], 'pk': response.data['id']} ) <NEW_LINE> response_get = self.client.get(url) <NEW_LINE> self.assertEqual(response_get.status_code, status.HTTP_200_OK) <NEW_LINE> self.assertIsInstance(response_get.data, dict) | Ensure we can create an ItemUsefulness object | 625941b2287bf620b61d380a |
def update_observation(self, observation, action, reward): <NEW_LINE> <INDENT> assert observation == self.n_stages <NEW_LINE> for start_node in reward: <NEW_LINE> <INDENT> for end_node in reward[start_node]: <NEW_LINE> <INDENT> y = reward[start_node][end_node] <NEW_LINE> old_mean, old_std = self.posterior[start_node][end_node] <NEW_LINE> old_precision = 1. / (old_std**2) <NEW_LINE> noise_precision = 1. / (self.sigma_tilde**2) <NEW_LINE> new_precision = old_precision + noise_precision <NEW_LINE> new_mean = (noise_precision * (np.log(y) + 0.5 / noise_precision) + old_precision * old_mean) / new_precision <NEW_LINE> new_std = np.sqrt(1. / new_precision) <NEW_LINE> self.posterior[start_node][end_node] = (new_mean, new_std) | Updates observations for binomial bridge.
Args:
observation - number of stages
action - path chosen by the agent (not used)
reward - dict of dict reward[start_node][end_node] = stochastic_time | 625941b231939e2706e4cc0e |
def setAddress(self, address): <NEW_LINE> <INDENT> super(Ui_AddressWidget, self).setAddress(address) <NEW_LINE> self.setData(0, QtCore.Qt.UserRole, self.address) | Set address to object (for QT UI) | 625941b21f037a2d8b945f97 |
def stopper(self, block_identifier: BlockIdentifier = 'latest'): <NEW_LINE> <INDENT> result = self.sc.stopper(block_identifier=block_identifier) <NEW_LINE> return result | is Paused | 625941b24f6381625f1147df |
def _createNumpyData(self, num_examples, example_shape): <NEW_LINE> <INDENT> np.random.seed(self.RANDOM_SEED) <NEW_LINE> data_shape = [num_examples] + example_shape <NEW_LINE> return np.random.randn(*data_shape) | Return np array of num_examples data points where each data point has shape example_shape. | 625941b2507cdc57c6306a6a |
def parser_parameter(content): <NEW_LINE> <INDENT> parameter_dic = {} <NEW_LINE> if isinstance(content, MultiDictView): <NEW_LINE> <INDENT> for key, value in content.items(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> parameter_dic[str(key, encoding='utf-8')] = value.decode('utf-8') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif isinstance(content, dict): <NEW_LINE> <INDENT> for key, value in content.items(): <NEW_LINE> <INDENT> parameter_dic[key] = value <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> content = unquote(content, 'utf-8') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> content = content <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> content_json = json.loads(content) <NEW_LINE> for key, value in content_json.items(): <NEW_LINE> <INDENT> parameter_dic[key] = value <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> if content is not None and content != '': <NEW_LINE> <INDENT> if '{' == content[0] and '}' == content[-1]: <NEW_LINE> <INDENT> parameter_dic[content] = '' <NEW_LINE> <DEDENT> elif '[' == content[0] and ']' == content[-1]: <NEW_LINE> <INDENT> parameter_dic[content] = '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for i in content.split('&'): <NEW_LINE> <INDENT> if '=' in i: <NEW_LINE> <INDENT> key = i[0: i.index('=')] <NEW_LINE> value = i[i.index('=') + 1:] <NEW_LINE> parameter_dic[key] = value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key = i <NEW_LINE> value = '' <NEW_LINE> parameter_dic[key] = value <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return parameter_dic | 将参数转化为字典格式,便于参数收集
:param content: 需要解析的内容
:return: parameter_dic: {"a": "test1", "b": "test2"} | 625941b25166f23b2e1a4ef8 |
def create_ts_visu_for_table(model,tablePath="root.newTable",visuPath="root.visu"): <NEW_LINE> <INDENT> model.create_node_from_path(visuPath, {"type": "widget"}) <NEW_LINE> model.create_nodes_from_template(visuPath,modeltemplates.timeseriesWidget) <NEW_LINE> model.delete_node(visuPath+'.buttons.button1') <NEW_LINE> model.add_forward_refs(visuPath+'.selectedVariables',[tablePath+'.variables.myvariable0']) <NEW_LINE> model.add_forward_refs(visuPath+'.selectableVariables',[tablePath+'.variables']) <NEW_LINE> model.add_forward_refs(visuPath+'.table', [tablePath]) <NEW_LINE> model.set_value(visuPath+".hasBackground",False) | adds a stardard ts visu for an existing table, makes all the hooks right | 625941b2de87d2750b85fb26 |
def GsUtil_cp(self, src, dest, stdin=None): <NEW_LINE> <INDENT> raise NotImplementedError() | Runs gsutil cp |src| |dest|
Args:
src: The file path or url to copy from.
dest: The file path or url to copy to.
stdin: If src is '-', this is used as the stdin to give to gsutil. The
effect is that text in stdin is copied to |dest|. | 625941b23346ee7daa2b2b00 |
def _read_config_files(self, cfg_dict, infiles): <NEW_LINE> <INDENT> def normalize_sym_value(sym, vtype, inval): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> normval = sym.normalize_and_validate(inval) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> normval = sym.normalize_and_validate(inval, lenient=True) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> self.logger.warning( "invalid %s value %r for %s symbol %s", vtype, inval, sym.type_name, sym.name ) <NEW_LINE> raise <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.logger.warning( ( 'improper %s value %r for %s symbol %s, ' 'normalized to %r' ), vtype, inval, sym.type_name, sym.name, normval ) <NEW_LINE> <DEDENT> <DEDENT> return normval <NEW_LINE> <DEDENT> get_symbol_name = self.convert_option_to_symbol_name <NEW_LINE> reader = self._get_config_file_reader() <NEW_LINE> kconfig_syms = self._kconfig_symbols <NEW_LINE> for infile_item in infiles: <NEW_LINE> <INDENT> if isinstance(infile_item, tuple): <NEW_LINE> <INDENT> infile_path, infile_name = infile_item <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> infile_path, infile_name = infile_item, None <NEW_LINE> <DEDENT> self.logger.debug( "Reading config file %r", infile_name or infile_path ) <NEW_LINE> for lino, option, value in reader.read_file( infile_path, filename=infile_name ): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> symbol_name = get_symbol_name(option, lenient=False) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> self.logger.warning( "Failed to get symbol name for %s, ignoring.", symbol_name ) <NEW_LINE> continue <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> sym = kconfig_syms[symbol_name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> self.logger.debug("Read unknown symbol %s", symbol_name) <NEW_LINE> if value is None: <NEW_LINE> <INDENT> self.logger.debug( "Cannot infer type of %s (not set), ignoring", symbol_name ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.logger.debug( "Adding unknown symbol %s as new %s symbol", symbol_name, value[0] ) <NEW_LINE> sym = kconfig_syms.add_unknown_symbol( value[0].value, symbol_name ) <NEW_LINE> cfg_dict[sym] = value[-1] <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> cfg_dict[sym] = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> normval = normalize_sym_value(sym, value[0], value[1]) <NEW_LINE> cfg_dict[sym] = normval <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return cfg_dict | Reads a zero or (preferably) more config files and stores
a mapping :: <kconfig symbol> => <value> in the given config dict.
@raises ValueError: bad option name
(propagated from convert_option_to_symbol_name)
or bad option value
(propagated from ConfigFileReader.unpack_value)
@param cfg_dict: dict for storing config options (symbol => value)
@type cfg_dict: C{dict}:: L{AbstractKconfigSymbol} => _
@param infiles: a list containg input files
or 2-tuples input file X input file name
@type infiles: C{list} of C{str}|2-tuple(C{str}, C{str}|C{None})
@return: cfg_dict | 625941b23617ad0b5ed67c97 |
def delete(self, obj, id=None, **kwargs): <NEW_LINE> <INDENT> if inspect.isclass(obj): <NEW_LINE> <INDENT> if not issubclass(obj, GitlabObject): <NEW_LINE> <INDENT> raise GitlabError("Invalid class: %s" % obj) <NEW_LINE> <DEDENT> <DEDENT> params = {obj.idAttr: id if id else getattr(obj, obj.idAttr)} <NEW_LINE> params.update(kwargs) <NEW_LINE> missing = [] <NEW_LINE> for k in itertools.chain(obj.requiredUrlAttrs, obj.requiredDeleteAttrs): <NEW_LINE> <INDENT> if k not in params: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> params[k] = getattr(obj, k) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> missing.append(k) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if missing: <NEW_LINE> <INDENT> raise GitlabDeleteError('Missing attribute(s): %s' % ", ".join(missing)) <NEW_LINE> <DEDENT> obj_id = params[obj.idAttr] if obj._id_in_delete_url else None <NEW_LINE> url = self._construct_url(id_=obj_id, obj=obj, parameters=params) <NEW_LINE> if obj._id_in_delete_url: <NEW_LINE> <INDENT> params.pop(obj.idAttr) <NEW_LINE> <DEDENT> r = self._raw_delete(url, **params) <NEW_LINE> raise_error_from_response(r, GitlabDeleteError, expected_code=[200, 204]) <NEW_LINE> return True | Delete an object on the GitLab server.
Args:
obj (object or id): The object, or the class of the object to
delete. If it is the class, the id of the object must be
specified as the `id` arguments.
id: ID of the object to remove. Required if `obj` is a class.
**kwargs: Additional arguments to send to GitLab.
Returns:
bool: True if the operation succeeds.
Raises:
GitlabConnectionError: If the server cannot be reached.
GitlabDeleteError: If the server fails to perform the request. | 625941b221bff66bcd6846f6 |
def __init__(self, state_size, action_size, random_seed): <NEW_LINE> <INDENT> self.state_size = state_size <NEW_LINE> self.action_size = action_size <NEW_LINE> self.seed = random.seed(random_seed) <NEW_LINE> self.actor_local = Actor(state_size, action_size, random_seed).to(device) <NEW_LINE> self.actor_target = Actor(state_size, action_size, random_seed).to(device) <NEW_LINE> self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) <NEW_LINE> self.critic_local = Critic(state_size, action_size, random_seed).to(device) <NEW_LINE> self.critic_target = Critic(state_size, action_size, random_seed).to(device) <NEW_LINE> self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) <NEW_LINE> self.noise = OUNoise((20, action_size), random_seed) <NEW_LINE> self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed) <NEW_LINE> self.step_num = 0 <NEW_LINE> self.logfile_losses = open('out-losses.txt', 'w') | Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed | 625941b2dc8b845886cb52cd |
def get_dependency_graph(region_defs): <NEW_LINE> <INDENT> graph = {} <NEW_LINE> name_to_sort_name = {} <NEW_LINE> for sort_name, rdef in six.iteritems(region_defs): <NEW_LINE> <INDENT> name, sel = rdef.name, rdef.select <NEW_LINE> if name in name_to_sort_name: <NEW_LINE> <INDENT> msg = 'region %s/%s already defined!' % (sort_name, name) <NEW_LINE> raise ValueError(msg) <NEW_LINE> <DEDENT> name_to_sort_name[name] = sort_name <NEW_LINE> if name not in graph: <NEW_LINE> <INDENT> graph[name] = [0] <NEW_LINE> <DEDENT> for parent in get_parents(sel): <NEW_LINE> <INDENT> graph[name].append(parent) <NEW_LINE> <DEDENT> if rdef.get('parent', None) is not None: <NEW_LINE> <INDENT> graph[name].append(rdef.parent) <NEW_LINE> <DEDENT> <DEDENT> return graph, name_to_sort_name | Return a dependency graph and a name-sort name mapping for given
region definitions. | 625941b2d8ef3951e32432d6 |
def level(self, name, no=None, color=None, icon=None): <NEW_LINE> <INDENT> if not isinstance(name, str): <NEW_LINE> <INDENT> raise TypeError( "Invalid level name, it should be a string, not: '%s'" % type(name).__name__ ) <NEW_LINE> <DEDENT> if no is color is icon is None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self._core.levels[name] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise ValueError("Level '%s' does not exist" % name) from None <NEW_LINE> <DEDENT> <DEDENT> if name not in self._core.levels: <NEW_LINE> <INDENT> if no is None: <NEW_LINE> <INDENT> raise ValueError( "Level '%s' does not exist, you have to create it by specifying a level no" % name ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> old_color, old_icon = "", " " <NEW_LINE> <DEDENT> <DEDENT> elif no is not None: <NEW_LINE> <INDENT> raise TypeError("Level '%s' already exists, you can't update its severity no" % name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _, no, old_color, old_icon = self.level(name) <NEW_LINE> <DEDENT> if color is None: <NEW_LINE> <INDENT> color = old_color <NEW_LINE> <DEDENT> if icon is None: <NEW_LINE> <INDENT> icon = old_icon <NEW_LINE> <DEDENT> if not isinstance(no, int): <NEW_LINE> <INDENT> raise TypeError( "Invalid level no, it should be an integer, not: '%s'" % type(no).__name__ ) <NEW_LINE> <DEDENT> if no < 0: <NEW_LINE> <INDENT> raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no) <NEW_LINE> <DEDENT> ansi = Colorizer.ansify(color) <NEW_LINE> level = Level(name, no, color, icon) <NEW_LINE> with self._core.lock: <NEW_LINE> <INDENT> self._core.levels[name] = level <NEW_LINE> self._core.levels_ansi_codes[name] = ansi <NEW_LINE> for handler in self._core.handlers.values(): <NEW_LINE> <INDENT> handler.update_format(name) <NEW_LINE> <DEDENT> <DEDENT> return level | Add, update or retrieve a logging level.
Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
tag and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
level, you should necessarily use its name, the severity number is not linked back to levels
name (this implies that several levels can share the same severity).
To add a new level, its ``name`` and its ``no`` are required. A ``color`` and an ``icon``
can also be specified or will be empty by default.
To update an existing level, pass its ``name`` with the parameters to be changed. It is not
possible to modify the ``no`` of a level once it has been added.
To retrieve level information, the ``name`` solely suffices.
Parameters
----------
name : |str|
The name of the logging level.
no : |int|
The severity of the level to be added or updated.
color : |str|
The color markup of the level to be added or updated.
icon : |str|
The icon of the level to be added or updated.
Returns
-------
``Level``
A |namedtuple| containing information about the level.
Raises
------
ValueError
If there is no level registered with such ``name``.
Examples
--------
>>> level = logger.level("ERROR")
>>> print(level)
Level(name='ERROR', no=40, color='<red><bold>', icon='❌')
>>> logger.add(sys.stderr, format="{level.no} {level.icon} {message}")
1
>>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
Level(name='CUSTOM', no=15, color='<blue>', icon='@')
>>> logger.log("CUSTOM", "Logging...")
15 @ Logging...
>>> logger.level("WARNING", icon=r"/!\")
Level(name='WARNING', no=30, color='<yellow><bold>', icon='/!\\')
>>> logger.warning("Updated!")
30 /!\ Updated! | 625941b23eb6a72ae02ec272 |
def Pack(self): <NEW_LINE> <INDENT> CheckErr(self._fdt_obj.pack(), 'pack') <NEW_LINE> self.Refresh() | Pack the device tree down to its minimum size
When nodes and properties shrink or are deleted, wasted space can
build up in the device tree binary. | 625941b2eab8aa0e5d26d8f7 |
def _estimate_from_active_time(self, cpu_active_time, freqs, idle_states, combine): <NEW_LINE> <INDENT> power = 0 <NEW_LINE> ret = {} <NEW_LINE> assert all(0.0 <= a <= 1.0 for a in cpu_active_time) <NEW_LINE> for node in self.root.iter_nodes(): <NEW_LINE> <INDENT> if not node.active_states or not node.idle_states: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> cpus = tuple(node.cpus) <NEW_LINE> freq = freqs[cpus[0]] <NEW_LINE> active_time = max(cpu_active_time[c] for c in cpus) <NEW_LINE> active_power = node.active_states[freq].power * active_time <NEW_LINE> _idle_power = max(node.idle_states[idle_states[c]] for c in cpus) <NEW_LINE> idle_power = _idle_power * (1 - active_time) <NEW_LINE> if combine: <NEW_LINE> <INDENT> ret[cpus] = active_power + idle_power <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret[cpus] = {} <NEW_LINE> ret[cpus]["active"] = active_power <NEW_LINE> ret[cpus]["idle"] = idle_power <NEW_LINE> <DEDENT> <DEDENT> return ret | Helper for estimate_from_cpu_util
Like estimate_from_cpu_util but uses active time i.e. proportion of time
spent not-idle in the range 0.0 - 1.0.
If combine=False, return idle and active power as separate components. | 625941b23539df3088e2e0e4 |
def _nre(Z, geom): <NEW_LINE> <INDENT> nre = 0. <NEW_LINE> for at1 in range(geom.shape[0]): <NEW_LINE> <INDENT> for at2 in range(at1): <NEW_LINE> <INDENT> dist = np.linalg.norm(geom[at1] - geom[at2]) <NEW_LINE> nre += Z[at1] * Z[at2] / dist <NEW_LINE> <DEDENT> <DEDENT> return nre | Nuclear repulsion energy | 625941b25510c4643540f194 |
def get_roles_for_user(self, user): <NEW_LINE> <INDENT> return self.model.model['g']['g'].rm.get_roles(user) | gets the roles that a user has. | 625941b28a43f66fc4b53e0b |
def exit_config_mode(self, exit_config: str = "exit", pattern: str = "") -> str: <NEW_LINE> <INDENT> return self.exit_enable_mode(exit_command=exit_config) | Use equivalent enable method. | 625941b23539df3088e2e0e5 |
def lt(self, i, j): <NEW_LINE> <INDENT> if self.peers[i].rank == self.peers[j].rank: <NEW_LINE> <INDENT> return self.peers[i].order < self.peers[j].order <NEW_LINE> <DEDENT> return self.peers[i].rank < self.peers[j].rank | Compare the priority of two peers.
Primary comparator will be the rank of each peer. If the ``rank`` is
same then compare the ``order``. The ``order`` attribute of the peer
tracks the heap push order of the peer. This help solve the imbalance
problem caused by randomization when deal with same rank situation.
:param i: ith peer
:param j: jth peer
:return: True or False | 625941b26e29344779a623b0 |
def load_wn18rr(data_home=None): <NEW_LINE> <INDENT> if data_home is None: <NEW_LINE> <INDENT> data_home = get_data_home() <NEW_LINE> <DEDENT> data_path = data_home + '/WN18RR' <NEW_LINE> if not exists(data_path): <NEW_LINE> <INDENT> makedirs(data_path, exist_ok=True) <NEW_LINE> urlretrieve("https://graphs.telecom-paristech.fr/data/torchkge/kgs/WN18RR.zip", data_home + '/WN18RR.zip') <NEW_LINE> with zipfile.ZipFile(data_home + '/WN18RR.zip', 'r') as zip_ref: <NEW_LINE> <INDENT> zip_ref.extractall(data_home) <NEW_LINE> <DEDENT> remove(data_home + '/WN18RR.zip') <NEW_LINE> <DEDENT> df1 = read_csv(data_path + '/train.txt', sep='\t', header=None, names=['from', 'rel', 'to']) <NEW_LINE> df2 = read_csv(data_path + '/valid.txt', sep='\t', header=None, names=['from', 'rel', 'to']) <NEW_LINE> df3 = read_csv(data_path + '/test.txt', sep='\t', header=None, names=['from', 'rel', 'to']) <NEW_LINE> df = concat([df1, df2, df3]) <NEW_LINE> kg = KnowledgeGraph(df) <NEW_LINE> return kg.split_kg(sizes=(len(df1), len(df2), len(df3))) | Load WN18RR dataset. See `here
<https://arxiv.org/abs/1707.01476>`__ for paper by Dettmers et
al. originally presenting the dataset.
Parameters
----------
data_home: str, optional
Path to the `torchkge_data` directory (containing data folders). If
files are not present on disk in this directory, they are downloaded
and then placed in the right place.
Returns
-------
kg_train: torchkge.data_structures.KnowledgeGraph
kg_val: torchkge.data_structures.KnowledgeGraph
kg_test: torchkge.data_structures.KnowledgeGraph | 625941b2b5575c28eb68dd96 |
@typemap <NEW_LINE> def cosine_distance(x, y, name=''): <NEW_LINE> <INDENT> from cntk.cntk_py import cosine_distance <NEW_LINE> dtype = get_data_type(x, y) <NEW_LINE> x = sanitize_input(x, dtype) <NEW_LINE> y = sanitize_input(y, dtype) <NEW_LINE> return cosine_distance(x, y, name) | Computes the cosine distance between ``x`` and ``y``:
Example:
>>> a = np.asarray([-1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1]).reshape(3,2,2)
>>> b = np.asarray([1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1]).reshape(3,2,2)
>>> x = C.input_variable(shape=(2,))
>>> y = C.input_variable(shape=(2,))
>>> C.cosine_distance(x,y).eval({x:a,y:b}) # doctest: +SKIP
array([[-0.99999982, 0.99999982],
[ 0.99999982, 0. ],
[ 0. , -0.99999982]], dtype=float32)
Args:
x: numpy array or any :class:`~cntk.ops.functions.Function` that outputs a tensor
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.ops.functions.Function` | 625941b2a79ad161976cbedf |
def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False): <NEW_LINE> <INDENT> assert img1.shape == img2.shape, ( f'Image shapes are differnet: {img1.shape}, {img2.shape}.') <NEW_LINE> if input_order not in ['HWC', 'CHW']: <NEW_LINE> <INDENT> raise ValueError( f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') <NEW_LINE> <DEDENT> img1 = reorder_image(img1, input_order=input_order) <NEW_LINE> img2 = reorder_image(img2, input_order=input_order) <NEW_LINE> if crop_border != 0: <NEW_LINE> <INDENT> img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...] <NEW_LINE> img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] <NEW_LINE> <DEDENT> if test_y_channel: <NEW_LINE> <INDENT> img1 = to_y_channel(img1) <NEW_LINE> img2 = to_y_channel(img2) <NEW_LINE> <DEDENT> mse = np.mean((img1 - img2)**2) <NEW_LINE> if mse == 0: <NEW_LINE> <INDENT> return float('inf') <NEW_LINE> <DEDENT> return 20. * np.log10(255. / np.sqrt(mse)) | Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result. | 625941b26fece00bbac2d4d4 |
def main(self): <NEW_LINE> <INDENT> best = self.start <NEW_LINE> h0 = self.h_value_tem(best) <NEW_LINE> init_open = [best[0], best[1], 0, 0, 0, h0] <NEW_LINE> self.open = numpy.column_stack((self.open, init_open)) <NEW_LINE> ite = 1 <NEW_LINE> while ite <= 1000: <NEW_LINE> <INDENT> if self.open.shape[1] == 0: <NEW_LINE> <INDENT> print('没有搜索到路径!') <NEW_LINE> return <NEW_LINE> <DEDENT> self.open = self.open.T[numpy.lexsort(self.open)].T <NEW_LINE> best = self.open[:, 0] <NEW_LINE> print('检验第%s次当前点坐标*******************' % ite) <NEW_LINE> print(best) <NEW_LINE> self.closed = numpy.c_[self.closed, best] <NEW_LINE> if best[0] == 15 and best[1] == 15: <NEW_LINE> <INDENT> print('搜索成功!') <NEW_LINE> return <NEW_LINE> <DEDENT> self.child_point(best) <NEW_LINE> print(self.open) <NEW_LINE> self.open = numpy.delete(self.open, 0, axis=1) <NEW_LINE> ite = ite+1 | main函数
:return: | 625941b2a79ad161976cbee0 |
def callFromThread(self, f, *args, **kw): <NEW_LINE> <INDENT> assert callable(f), "%s is not callable" % f <NEW_LINE> with NullContext(): <NEW_LINE> <INDENT> self._io_loop.add_callback(f, *args, **kw) | See `twisted.internet.interfaces.IReactorThreads.callFromThread` | 625941b294891a1f4081b844 |
def as_string(self): <NEW_LINE> <INDENT> writer = outparser.Writer() <NEW_LINE> with io.StringIO() as stream: <NEW_LINE> <INDENT> writer.write_stream(self._entry2db(), stream) <NEW_LINE> string = stream.getvalue() <NEW_LINE> <DEDENT> string = string.strip() <NEW_LINE> return string | Return entry as formatted bibtex string. | 625941b267a9b606de4a7c57 |
def CompareParameters(self, params): <NEW_LINE> <INDENT> source_len, target_len = len(self.ordered_params), len(params) <NEW_LINE> edit_lists = [[]] <NEW_LINE> distance = [[]] <NEW_LINE> for i in range(target_len+1): <NEW_LINE> <INDENT> edit_lists[0].append(['I'] * i) <NEW_LINE> distance[0].append(i) <NEW_LINE> <DEDENT> for j in range(1, source_len+1): <NEW_LINE> <INDENT> edit_lists.append([['D'] * j]) <NEW_LINE> distance.append([j]) <NEW_LINE> <DEDENT> for i in range(source_len): <NEW_LINE> <INDENT> for j in range(target_len): <NEW_LINE> <INDENT> cost = 1 <NEW_LINE> if self.ordered_params[i] == params[j]: <NEW_LINE> <INDENT> cost = 0 <NEW_LINE> <DEDENT> deletion = distance[i][j+1] + 1 <NEW_LINE> insertion = distance[i+1][j] + 1 <NEW_LINE> substitution = distance[i][j] + cost <NEW_LINE> edit_list = None <NEW_LINE> best = None <NEW_LINE> if deletion <= insertion and deletion <= substitution: <NEW_LINE> <INDENT> best = deletion <NEW_LINE> edit_list = list(edit_lists[i][j+1]) <NEW_LINE> edit_list.append('D') <NEW_LINE> <DEDENT> elif insertion <= substitution: <NEW_LINE> <INDENT> best = insertion <NEW_LINE> edit_list = list(edit_lists[i+1][j]) <NEW_LINE> edit_list.append('I') <NEW_LINE> edit_lists[i+1].append(edit_list) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> best = substitution <NEW_LINE> edit_list = list(edit_lists[i][j]) <NEW_LINE> if cost: <NEW_LINE> <INDENT> edit_list.append('S') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> edit_list.append('=') <NEW_LINE> <DEDENT> <DEDENT> edit_lists[i+1].append(edit_list) <NEW_LINE> distance[i+1].append(best) <NEW_LINE> <DEDENT> <DEDENT> return distance[source_len][target_len], edit_lists[source_len][target_len] | Computes the edit distance and list from the function params to the docs.
Uses the Levenshtein edit distance algorithm, with code modified from
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
Args:
params: The parameter list for the function declaration.
Returns:
The edit distance, the edit list. | 625941b2796e427e537b035b |
def get_root_classes_count(workspace_pid): <NEW_LINE> <INDENT> return get_class_links_qs(workspace_pid, 'is_a', 'classification_root').count() | Return the number of available root classes for the given workspace
project. | 625941b245492302aab5e05a |
def permute(self, pivotsM: "VectorSizeT", n: "size_t"=2) -> "math::linear::MatrixMxN< 7,2,double >::Like_T": <NEW_LINE> <INDENT> return _math_linear.Matrix7x2_permute(self, pivotsM, n) | permute(Matrix7x2 self, VectorSizeT pivotsM, size_t n=2) -> Matrix7x2 | 625941b255399d3f05588452 |
def add_value(self, name: str, value: Any): <NEW_LINE> <INDENT> apply_key_value(self.__data, name, value) | Adds a configuration value by name. The name can contain
paths to nested objects and list indices.
:param name: name of property to set
:param value: the value to set | 625941b2dc8b845886cb52ce |
def test_creds_update_failure_bad_http_response_code(self): <NEW_LINE> <INDENT> self._test_creds_update_failure( the_is_ok=True, the_code=httplib.INTERNAL_SERVER_ERROR) | Validates async_creds_deleter's behavior when
the update of credentials being deleted fail. | 625941b260cbc95b062c62e3 |
def get_default_attendance_sign_out(self, cr, uid, fields, context=None): <NEW_LINE> <INDENT> company_obj = self.pool.get('res.company') <NEW_LINE> company_id = company_obj._company_default_get(cr, uid, 'hr.attendance.importer', context=context) <NEW_LINE> company = company_obj.browse(cr, uid, company_id, context=context) <NEW_LINE> return {'attendance_sign_out': company.attendance_sign_out} | Get the default company sign out action | 625941b250812a4eaa59c0c1 |
def check_source_code(repo_url, branch): <NEW_LINE> <INDENT> init_py = None <NEW_LINE> init_py_txt = None <NEW_LINE> init_py = repo_url.replace( 'https://github.com/', 'https://raw.githubusercontent.com/') + '/' + branch + '/__init__.py' <NEW_LINE> init_py_txt = requests.get(init_py).text <NEW_LINE> try: <NEW_LINE> <INDENT> result = ast.parse(init_py_txt) <NEW_LINE> return True <NEW_LINE> <DEDENT> except SyntaxError as exc: <NEW_LINE> <INDENT> return False | Checks quality of code using ast | 625941b29b70327d1c4e0b6e |
def _airMass(self, angleFromVertical): <NEW_LINE> <INDENT> denom = math.cos(angleFromVertical) + 0.50572 * (96.07995 - angleFromVertical) ** -1.6364 <NEW_LINE> if denom >= 0: <NEW_LINE> <INDENT> return 1 / denom <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return -1 | Raw formula to calculate air mass
Parameters
----------
angleFromVertical: float
in radians
Returns
-------
float | 625941b2d164cc6175782ae7 |
def initialize_to_first_positive(self, training_data, labels): <NEW_LINE> <INDENT> for i in range(len(labels)): <NEW_LINE> <INDENT> if labels[i] == 'yes': <NEW_LINE> <INDENT> init_set = [training_data[i, :]] <NEW_LINE> return init_set | "
Returns list with one hypothesis which is equal to the first positive example | 625941b2796e427e537b035c |
def read_trigger_config(filename): <NEW_LINE> <INDENT> trigger_file = open(filename, 'r') <NEW_LINE> lines = [] <NEW_LINE> for line in trigger_file: <NEW_LINE> <INDENT> line = line.rstrip() <NEW_LINE> if not (len(line) == 0 or line.startswith('//')): <NEW_LINE> <INDENT> lines.append(line) <NEW_LINE> <DEDENT> <DEDENT> triggerlist = [] <NEW_LINE> triggers = {} <NEW_LINE> for triggerstr in lines: <NEW_LINE> <INDENT> triggerparameters = triggerstr.split(',') <NEW_LINE> if triggerparameters[0] == 'ADD': <NEW_LINE> <INDENT> for triggerkey in triggerparameters[1:]: <NEW_LINE> <INDENT> triggerlist.append(triggers[triggerkey]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> triggername = triggerparameters[0] <NEW_LINE> triggertype = triggerparameters[1] <NEW_LINE> conditionaltriggers = ['NOT', 'AND', 'OR'] <NEW_LINE> if triggertype in conditionaltriggers: <NEW_LINE> <INDENT> subtriggers = [] <NEW_LINE> for triggerkey in triggerparameters[2:]: <NEW_LINE> <INDENT> subtriggers.append(triggers[triggerkey]) <NEW_LINE> <DEDENT> triggers[triggername] = buildtrigger(triggertype, subtriggers) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> triggers[triggername] = buildtrigger(triggertype, triggerparameters[2:]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return triggerlist | filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file. | 625941b27047854f462a11ab |
def __init__(self): <NEW_LINE> <INDENT> self.b = "" <NEW_LINE> self.k = 0 <NEW_LINE> self.k0 = 0 <NEW_LINE> self.FTense = None <NEW_LINE> self.j = 0 <NEW_LINE> self.RESULT = defaultdict(lambda :[]) <NEW_LINE> self.DICT = defaultdict(lambda :'') | The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called. | 625941b27c178a314d6ef1f2 |
@task <NEW_LINE> def install_google_oauth_creds(): <NEW_LINE> <INDENT> run('git clone [email protected]:nprapps/workinprivate.git /tmp/workinprivate-tmp') <NEW_LINE> run('cp /tmp/workinprivate-tmp/.google_oauth_credentials %s' % app_config.GOOGLE_OAUTH_CREDENTIALS_PATH) <NEW_LINE> run('rm -Rf /tmp/workinprivate-tmp') | Install Google Oauth credentials file (global) from workinprivate repo | 625941b2f9cc0f698b1403a1 |
def call(self, cmdobj, args, msg=None, cmdset=None, noansi=True, caller=None, receiver=None, cmdstring=None, obj=None): <NEW_LINE> <INDENT> caller = caller if caller else self.char1 <NEW_LINE> receiver = receiver if receiver else caller <NEW_LINE> cmdobj.caller = caller <NEW_LINE> cmdobj.cmdname = cmdstring if cmdstring else cmdobj.key <NEW_LINE> cmdobj.raw_cmdname = cmdobj.cmdname <NEW_LINE> cmdobj.cmdstring = cmdobj.cmdname <NEW_LINE> cmdobj.args = args <NEW_LINE> cmdobj.cmdset = cmdset <NEW_LINE> cmdobj.session = SESSIONS.session_from_sessid(1) <NEW_LINE> cmdobj.account = self.account <NEW_LINE> cmdobj.raw_string = cmdobj.key + " " + args <NEW_LINE> cmdobj.obj = obj or (caller if caller else self.char1) <NEW_LINE> old_msg = receiver.msg <NEW_LINE> returned_msg = "" <NEW_LINE> try: <NEW_LINE> <INDENT> receiver.msg = Mock() <NEW_LINE> cmdobj.at_pre_cmd() <NEW_LINE> cmdobj.parse() <NEW_LINE> ret = cmdobj.func() <NEW_LINE> if isinstance(ret, types.GeneratorType): <NEW_LINE> <INDENT> ret.next() <NEW_LINE> <DEDENT> cmdobj.at_post_cmd() <NEW_LINE> <DEDENT> except StopIteration: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> except InterruptCommand: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> stored_msg = [args[0] if args and args[0] else kwargs.get("text", utils.to_str(kwargs, force_string=True)) for name, args, kwargs in receiver.msg.mock_calls] <NEW_LINE> stored_msg = [smsg[0] if isinstance(smsg, tuple) else smsg for smsg in stored_msg] <NEW_LINE> if msg is not None: <NEW_LINE> <INDENT> returned_msg = "||".join(_RE.sub("", mess) for mess in stored_msg) <NEW_LINE> returned_msg = ansi.parse_ansi(returned_msg, strip_ansi=noansi).strip() <NEW_LINE> if msg == "" and returned_msg or not returned_msg.startswith(msg.strip()): <NEW_LINE> <INDENT> sep1 = "\n" + "=" * 30 + "Wanted message" + "=" * 34 + "\n" <NEW_LINE> sep2 = "\n" + "=" * 30 + "Returned message" + "=" * 32 + "\n" <NEW_LINE> sep3 = "\n" + "=" * 78 <NEW_LINE> retval = sep1 + msg.strip() + sep2 + returned_msg + sep3 <NEW_LINE> raise AssertionError(retval) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> returned_msg = "\n".join(str(msg) for msg in stored_msg) <NEW_LINE> returned_msg = ansi.parse_ansi(returned_msg, strip_ansi=noansi).strip() <NEW_LINE> <DEDENT> receiver.msg = old_msg <NEW_LINE> <DEDENT> return returned_msg | Test a command by assigning all the needed
properties to cmdobj and running
cmdobj.at_pre_cmd()
cmdobj.parse()
cmdobj.func()
cmdobj.at_post_cmd()
The msgreturn value is compared to eventual
output sent to caller.msg in the game
Returns:
msg (str): The received message that was sent to the caller. | 625941b2d6c5a10208143de0 |
def svn_relpath_is_canonical(*args): <NEW_LINE> <INDENT> return _core.svn_relpath_is_canonical(*args) | svn_relpath_is_canonical(char const * relpath) -> svn_boolean_t | 625941b2ff9c53063f47bf99 |
@url('^reset/$', 'auth_reset_password') <NEW_LINE> def auth_reset_password(request, template='auth/reset.html'): <NEW_LINE> <INDENT> from forms import PasswordResetForm <NEW_LINE> from atrinsic.base.models import User <NEW_LINE> from atrinsic.util.backend import UserBackend <NEW_LINE> from django.contrib.auth.models import AnonymousUser <NEW_LINE> reset_auth = request.REQUEST.get('reset_auth', None) <NEW_LINE> reset = get_object_or_404(UserPasswordReset, reset=reset_auth) <NEW_LINE> if request.method == 'POST': <NEW_LINE> <INDENT> form = PasswordResetForm(request.POST) <NEW_LINE> if form.is_valid(): <NEW_LINE> <INDENT> reset.user.set_password(form.cleaned_data['password']) <NEW_LINE> reset.user.save() <NEW_LINE> user = authenticate(email=reset.user.email, password=form.cleaned_data['password']) <NEW_LINE> if user: <NEW_LINE> <INDENT> login(request, user) <NEW_LINE> if request.session.get("organization_id", None): <NEW_LINE> <INDENT> del request.session["organization_id"] <NEW_LINE> <DEDENT> reset.delete() <NEW_LINE> return HttpResponseRedirect(reverse('auth_choice')) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> form = PasswordResetForm() <NEW_LINE> <DEDENT> return render_to_response(template, { 'form' : form, 'reset_auth' : reset_auth, }, context_instance = RequestContext(request)) | View to allow users to reset their password. This view takes a GET/POST variable
of a UUID which was previously e-mailed to the User requesting a password reset.
If the UUID is valid, then display a PasswordResetForm allowing them to select
a new password | 625941b23317a56b86939a06 |
@plugin_function("group_replication.adoptFromIC") <NEW_LINE> def adoptFromIC(): <NEW_LINE> <INDENT> msg_output = "FAILED - Instance is not a PRIMARY" <NEW_LINE> if i_check_local_role() == "PRIMARY": <NEW_LINE> <INDENT> clusterAdmin, clusterAdminPassword, hostname, port = i_sess_identity("current") <NEW_LINE> host_list = i_get_other_node() <NEW_LINE> mysqlsh.globals.dba.get_cluster().dissolve({"interactive":False}) <NEW_LINE> create() <NEW_LINE> if len(host_list) != 0: <NEW_LINE> <INDENT> for secNode in host_list: <NEW_LINE> <INDENT> if shell.parse_uri(secNode)["port"] > 10000: <NEW_LINE> <INDENT> addInstance(clusterAdmin + ":" + clusterAdminPassword + "@" + secNode[:-1]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> addInstance(clusterAdmin + ":" + clusterAdminPassword + "@" + shell.parse_uri(secNode)["host"] + ":" + str(shell.parse_uri(secNode)["port"] - 10)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> i_drop_ic_metadata() <NEW_LINE> msg_output = "Successful conversion from InnoDB Cluster to Group Replication" <NEW_LINE> <DEDENT> return msg_output | Convert From InnoDB Cluster to native Group Replication
This function converts a MySQL InnoDB Cluster to a native Group Replication environment | 625941b2462c4b4f79d1d46a |
def add_taxon(self, taxon): <NEW_LINE> <INDENT> if not self._is_mutable: <NEW_LINE> <INDENT> raise KeyError("Taxon %s:'%s' cannot be added to an immutable TaxonSet." % (taxon.oid, taxon.label)) <NEW_LINE> <DEDENT> self.add(taxon) | Adds taxon to self. | 625941b2507cdc57c6306a6c |
def show_message( self, title, text, info_text=None, detailed_text=None, icon=QMessageBox.Critical ): <NEW_LINE> <INDENT> self.logger.info(info_text) | Log messages to standard logger. | 625941b21f037a2d8b945f99 |
def __init__(self): <NEW_LINE> <INDENT> self.Offset = None <NEW_LINE> self.Limit = None <NEW_LINE> self.Role = None <NEW_LINE> self.OrderType = None <NEW_LINE> self.KeyState = None <NEW_LINE> self.SearchKeyAlias = None <NEW_LINE> self.Origin = None <NEW_LINE> self.KeyUsage = None | :param Offset: 含义跟 SQL 查询的 Offset 一致,表示本次获取从按一定顺序排列数组的第 Offset 个元素开始,缺省为0
:type Offset: int
:param Limit: 含义跟 SQL 查询的 Limit 一致,表示本次最多获取 Limit 个元素。缺省值为10,最大值为200
:type Limit: int
:param Role: 根据创建者角色筛选,默认 0 表示用户自己创建的cmk, 1 表示授权其它云产品自动创建的cmk
:type Role: int
:param OrderType: 根据CMK创建时间排序, 0 表示按照降序排序,1表示按照升序排序
:type OrderType: int
:param KeyState: 根据CMK状态筛选, 0表示全部CMK, 1 表示仅查询Enabled CMK, 2 表示仅查询Disabled CMK,3 表示查询PendingDelete 状态的CMK(处于计划删除状态的Key),4 表示查询 PendingImport 状态的CMK
:type KeyState: int
:param SearchKeyAlias: 根据KeyId或者Alias进行模糊匹配查询
:type SearchKeyAlias: str
:param Origin: 根据CMK类型筛选, "TENCENT_KMS" 表示筛选密钥材料由KMS创建的CMK, "EXTERNAL" 表示筛选密钥材料需要用户导入的 EXTERNAL类型CMK,"ALL" 或者不设置表示两种类型都查询,大小写敏感。
:type Origin: str
:param KeyUsage: 根据CMK的KeyUsage筛选,ALL表示筛选全部,可使用的参数为:ALL 或 ENCRYPT_DECRYPT 或 ASYMMETRIC_DECRYPT_RSA_2048 或 ASYMMETRIC_DECRYPT_SM2,为空则默认筛选ENCRYPT_DECRYPT类型
:type KeyUsage: str | 625941b2046cf37aa974cae6 |
def test_ami_id(self): <NEW_LINE> <INDENT> test_string = "ami-id,data-api" <NEW_LINE> resolver = EFVersionResolver(TestEFVersionResolver.clients) <NEW_LINE> self.assertRegexpMatches(resolver.lookup(test_string), "^ami-[a-f0-9]{8}$") | Does ami-id,data-api resolve to an AMI id | 625941b28e7ae83300e4ad6d |
def click_title(self): <NEW_LINE> <INDENT> if self.roleName not in ["frame", "alert"]: <NEW_LINE> <INDENT> raise RuntimeError("Can't use click_title() on type=%s" % self.roleName) <NEW_LINE> <DEDENT> button = 1 <NEW_LINE> clickX, clickY = self.title_coordinates() <NEW_LINE> dogtail.rawinput.click(clickX, clickY, button) | Helper to click a window title bar, hitting the horizontal
center of the bar | 625941b24d74a7450ccd3f63 |
def cluster(self, window): <NEW_LINE> <INDENT> if len(self.time1) == 0 or len(self.time2) == 0: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> from pycbc.events import cluster_coincs <NEW_LINE> interval = self.attrs['timeslide_interval'] <NEW_LINE> cid = cluster_coincs(self.stat, self.time1, self.time2, self.timeslide_id, interval, window) <NEW_LINE> return self.select(cid) | Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id | 625941b2a934411ee3751436 |
def bar_plot(graph, node_num, fig): <NEW_LINE> <INDENT> nodes = ["node_" + str(i + 1) for i in range(graph.number_of_nodes())] <NEW_LINE> y_pos = np.arange(len(nodes)) + 0.5 <NEW_LINE> size = graph.number_of_nodes() <NEW_LINE> x_values = [0 for i in range(size)] <NEW_LINE> bars = plt.barh(y_pos, x_values, 0.4, align='center') <NEW_LINE> mybars = [PlotBars(bars[i]) for i in range(size)] <NEW_LINE> plt.yticks(y_pos, nodes) <NEW_LINE> plt.xlabel('Value') <NEW_LINE> plt.title('Packets in the data_stack of node_'+str(node_num)) <NEW_LINE> plt.xlim(0, size) <NEW_LINE> plt.ylim(0, size) <NEW_LINE> nodes_names = nx.get_node_attributes(graph, "name") <NEW_LINE> names_nodes = dict(zip(nodes_names.values(), nodes_names.keys())) <NEW_LINE> width_hist = names_nodes[str(node_num)].packet_history <NEW_LINE> print(width_hist) <NEW_LINE> ani = animation.FuncAnimation(fig, animate, frames=len(width_hist[0, :]), fargs=(mybars, width_hist), interval=100, blit=False, repeat=False) <NEW_LINE> ani.save('node_'+str(node_num)+'.mp4', writer='ffmpeg') | create barplot to visualize
the packet transmission | 625941b263b5f9789fde6e80 |
def _check_full_length(centroids): <NEW_LINE> <INDENT> centroids_ = numpy.empty(centroids.shape) <NEW_LINE> n, max_sz = centroids.shape[:2] <NEW_LINE> for i in range(n): <NEW_LINE> <INDENT> sz = ts_size(centroids[i]) <NEW_LINE> centroids_[i, :sz] = centroids[i, :sz] <NEW_LINE> if sz < max_sz: <NEW_LINE> <INDENT> centroids_[i, sz:] = centroids[i, sz-1] <NEW_LINE> <DEDENT> <DEDENT> return centroids_ | Check that provided centroids are full-length (ie. not padded with
nans).
If some centroids are found to be padded with nans, the last value is
repeated until the end. | 625941b267a9b606de4a7c58 |
def get_bid_count(p_market_data): <NEW_LINE> <INDENT> market_data = get_depth_market_data(p_market_data) <NEW_LINE> return market_data.BidCount | Bid Count
:param p_market_data:
:return: | 625941b2507cdc57c6306a6d |
def disable_profiling(self): <NEW_LINE> <INDENT> self._profiler.disable() <NEW_LINE> s = StringIO() <NEW_LINE> ps = pstats.Stats(self._profiler, stream=s).sort_stats("cumulative") <NEW_LINE> ps.print_stats(self.PSTATS_LIMIT) <NEW_LINE> log.debug(s.getvalue()) <NEW_LINE> log.debug("Agent profiling is disabled") <NEW_LINE> if self.DUMP_TO_FILE: <NEW_LINE> <INDENT> log.debug("Pstats dumps are enabled. Dumping pstats output to {0}" .format(self.STATS_DUMP_FILE)) <NEW_LINE> ps.dump_stats(self.STATS_DUMP_FILE) | Disable the profiler, and if necessary dump a truncated pstats output | 625941b25e10d32532c5eccb |
def clear_all_plots(self): <NEW_LINE> <INDENT> for plot_type in GlobalConfig.PLOT_TYPES: <NEW_LINE> <INDENT> self.ax[plot_type].cla() <NEW_LINE> self.ax[plot_type].set_title(plot_type.upper() + " vs TIME", fontsize=16) <NEW_LINE> self.ax[plot_type].set_ylabel(plot_type, fontsize=14) <NEW_LINE> self.ax[plot_type].set_xlabel("elapsed_time(ms)", fontsize=14) <NEW_LINE> <DEDENT> self.canvas.draw() | Clears all plots but conserves the title and the name of the X and Y axis. | 625941b2eab8aa0e5d26d8f9 |
def __interactive_mode__(self, command_name, columns, dct, null_value, parents=[], already_done=0): <NEW_LINE> <INDENT> stop = previous_value_is_set = 0 <NEW_LINE> previous_min = [n[1]for n in parents] <NEW_LINE> is_poll = command_name == 'poll' <NEW_LINE> is_create_domain = command_name == 'create_domain' <NEW_LINE> for row in columns: <NEW_LINE> <INDENT> name, min_max, allowed, msg_help, example, pattern, children = row <NEW_LINE> if is_poll and name == 'msg_id' and dct.get('op', [''])[0] == 'req': <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> min, max = min_max <NEW_LINE> if is_create_domain and name == 'val_ex_date' and re.search(ENUM_DOMAIN_TYPE_PATT, dct.get('name', [''])[0], re.I): <NEW_LINE> <INDENT> min = 1 <NEW_LINE> <DEDENT> utext, error = text_to_unicode(msg_help) <NEW_LINE> parents.append([name, min, max, 0, utext]) <NEW_LINE> if already_done: <NEW_LINE> <INDENT> min = 0 <NEW_LINE> <DEDENT> if min: <NEW_LINE> <INDENT> required_pos = 1 <NEW_LINE> if not previous_value_is_set and len(parents) > 1 and 0 in previous_min: <NEW_LINE> <INDENT> required_pos = 2 <NEW_LINE> min = 0 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> required_pos = 0 <NEW_LINE> <DEDENT> if len(children): <NEW_LINE> <INDENT> user_typed_null, stop = self.__interactive_mode_children_values__(dct, command_name, parents, name, min, max, children, null_value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> user_typed_null, stop = self.__interactive_mode_single_value__(dct, command_name, parents, name, min, max, allowed, msg_help, example, null_value, already_done, required_pos) <NEW_LINE> <DEDENT> parents.pop() <NEW_LINE> if user_typed_null and (already_done or required_pos == 2): break <NEW_LINE> if stop: break <NEW_LINE> if not user_typed_null: previous_value_is_set = 1 <NEW_LINE> <DEDENT> return user_typed_null, stop | Loop interactive input for all params of command. | 625941b2be383301e01b5232 |
def invoke_svn(argv, extra_env={}): <NEW_LINE> <INDENT> child_env = os.environ.copy() <NEW_LINE> child_env.update(extra_env) <NEW_LINE> child = subprocess.Popen([SVN, '--non-interactive'] + argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=child_env) <NEW_LINE> stdout, stderr = child.communicate() | Run svn with ARGV as argv[1:]. Return (exit_code, stdout, stderr). | 625941b26e29344779a623b2 |
def get_word2ids_from_tokens(word2id,tokens): <NEW_LINE> <INDENT> return [get_word2id(word2id,x) for x in tokens] | Get ID of tokens from word2id dictionary for all tokens in some previously tokenized text.
Parameters
----------
word2id: dictionary
Word (token) to ID (number) mapping.
tokens: list of strings
Tokens to look up in word2id mapping.
Returns
----------
list of ints (Token IDs) | 625941b20a366e3fb873e5b1 |
def move_cockroach_intelligently(self): <NEW_LINE> <INDENT> if self.poison > 0: <NEW_LINE> <INDENT> options = -1, 0, 1 <NEW_LINE> x = random.choice(options) <NEW_LINE> y = random.choice(options) <NEW_LINE> old_x, old_y = self.x, self.y <NEW_LINE> new_x, new_y = self.x + x, self.y + y <NEW_LINE> if (new_x, new_y) in Cell.C.keys() and not Cell.C[(new_x, new_y)].is_barrier: <NEW_LINE> <INDENT> Cockroach.Poison[(old_x, old_y)] -= (self.poison - 1) <NEW_LINE> if Cockroach.Poison[(old_x, old_y)] < 0: <NEW_LINE> <INDENT> Cockroach.Poison[(old_x, old_y)] = 0 <NEW_LINE> <DEDENT> self.x, self.y = new_x, new_y <NEW_LINE> Cockroach.Poison[(new_x, new_y)] += self.poison <NEW_LINE> if Cockroach.Poison[(new_x, new_y)] < 0: <NEW_LINE> <INDENT> Cockroach.Poison[(new_x, new_y)] = 0 | The cleaner cockroaches do not perform random walks. Instead they move to the worst infested cell. | 625941b2b57a9660fec3361a |
def _plot_frame(experiment_id_, file_ids, fig, axarr): <NEW_LINE> <INDENT> distr6_q2, distr6_q4, distr12_q2, distr12_q4 = _get_distrs(experiment_id_, file_ids) <NEW_LINE> x_vals, y_vals = np.array(range(resol)), np.array(range(resol)) <NEW_LINE> mesh = np.array(np.meshgrid(x_vals, y_vals)) <NEW_LINE> x_vals, y_vals = tuple(mesh.reshape(2, resol**2)) <NEW_LINE> rad = (max(x_vals)-min(x_vals))/(2.0*resol) <NEW_LINE> size = 125*(rad)**2 <NEW_LINE> sp1 = axarr[0, 0].scatter(x_vals, y_vals, c=distr6_q2, marker='s', s=size) <NEW_LINE> sp2 = axarr[0, 1].scatter(x_vals, y_vals, c=distr6_q4, marker='s', s=size) <NEW_LINE> sp3 = axarr[1, 0].scatter(x_vals, y_vals, c=distr12_q2, marker='s', s=size) <NEW_LINE> sp4 = axarr[1, 1].scatter(x_vals, y_vals, c=distr12_q4, marker='s', s=size) <NEW_LINE> fig.suptitle("angular correlations") <NEW_LINE> axarr[0, 0].set_title("K6 Q2") <NEW_LINE> axarr[0, 1].set_title("K6 Q4") <NEW_LINE> axarr[1, 0].set_title("K12 Q2") <NEW_LINE> axarr[1, 1].set_title("K12 Q4") <NEW_LINE> for ax_ in axarr.reshape(1, 4): <NEW_LINE> <INDENT> for ax in ax_: <NEW_LINE> <INDENT> ax.xaxis.set_visible(False) <NEW_LINE> ax.yaxis.set_visible(False) <NEW_LINE> <DEDENT> <DEDENT> cb1 = plt.colorbar(sp1, cax=axarr[0, 0]) <NEW_LINE> cb2 = plt.colorbar(sp2, cax=axarr[0, 1]) <NEW_LINE> cb3 = plt.colorbar(sp3, cax=axarr[1, 0]) <NEW_LINE> cb4 = plt.colorbar(sp4, cax=axarr[1, 1]) <NEW_LINE> plt.tight_layout() <NEW_LINE> plt.subplots_adjust(top=0.85) | Plots frame. | 625941b2bde94217f3682b9a |
def txt_chatroom_name_return(self, chatroom_name, window): <NEW_LINE> <INDENT> self.btn_confirm_creation_click(chatroom_name, window) <NEW_LINE> return 'break' | Handles event of a return being entered to the message box. | 625941b250812a4eaa59c0c2 |
def rotate_ship(self, image, angle): <NEW_LINE> <INDENT> orig_rect = image.get_rect() <NEW_LINE> rot_image = pygame.transform.rotate(image, angle) <NEW_LINE> self.rect = orig_rect.copy() <NEW_LINE> self.rect.center = rot_image.get_rect().center <NEW_LINE> self.sprite = rot_image.subsurface(self.rect).copy() <NEW_LINE> self.currentAngle = angle | rotate an image while keeping its center and size | 625941b27c178a314d6ef1f3 |
def group_iterator(grouping): <NEW_LINE> <INDENT> group_dist = {} <NEW_LINE> for (i, name) in enumerate(grouping): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> group_dist[name].append(i) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> group_dist[name] = [i] <NEW_LINE> <DEDENT> <DEDENT> return group_dist.iteritems() | Returns an iterator of values and indices for a grouping variable. | 625941b291af0d3eaac9b7af |
def plot(self, intervals=None, new_fig=True): <NEW_LINE> <INDENT> if new_fig: <NEW_LINE> <INDENT> p.figure() <NEW_LINE> <DEDENT> p.plot(self.x, self.y, ls='-', c='b', lw='1.5') <NEW_LINE> first_peak = None <NEW_LINE> last_peak = None <NEW_LINE> if self.peaks: <NEW_LINE> <INDENT> first_peak = min(self.peaks["peaks"][0]) <NEW_LINE> last_peak = max(self.peaks["peaks"][0]) <NEW_LINE> p.plot(self.peaks["peaks"][0], self.peaks["peaks"][1], 'rD', ms=10) <NEW_LINE> p.plot(self.peaks["valleys"][0], self.peaks["valleys"][1], 'yD', ms=5) <NEW_LINE> <DEDENT> if intervals is not None: <NEW_LINE> <INDENT> for interval in intervals: <NEW_LINE> <INDENT> if first_peak is not None: <NEW_LINE> <INDENT> if interval <= first_peak or interval >= last_peak: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> p.axvline(x=interval, ls='-.', c='g', lw='1.5') <NEW_LINE> if interval-1200 >= min(self.x): <NEW_LINE> <INDENT> p.axvline(x=interval-1200, ls=':', c='b', lw='0.5') <NEW_LINE> <DEDENT> if interval+1200 <= max(self.x): <NEW_LINE> <INDENT> p.axvline(x=interval+1200, ls=':', c='b', lw='0.5') <NEW_LINE> <DEDENT> if interval+2400 <= max(self.x): <NEW_LINE> <INDENT> p.axvline(x=interval+2400, ls='-.', c='r', lw='0.5') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> p.show() | This function plots histogram together with its smoothed
version and peak information if provided. Just intonation
intervals are plotted for a reference. | 625941b2d99f1b3c44c6733c |
def rfe(self, tp_annotations, fp_annotations, n_features, outreport): <NEW_LINE> <INDENT> aDF_std = self.__process_df(tp_annotations, fp_annotations) <NEW_LINE> feature_names = aDF_std.columns.drop(['is_valid']) <NEW_LINE> array = aDF_std.values <NEW_LINE> total_feats = array.shape[1] <NEW_LINE> X = array[:, 1:total_feats] <NEW_LINE> Y = array[:, 0] <NEW_LINE> model = LogisticRegression() <NEW_LINE> rfe = RFE(model, int(n_features)) <NEW_LINE> fit = rfe.fit(X, Y) <NEW_LINE> f = open(outreport, 'w') <NEW_LINE> f.write("Number of features: {0}\n".format(fit.n_features_)) <NEW_LINE> f.write("Selected Features: {0}\n".format(fit.support_)) <NEW_LINE> f.write("Feature Ranking: {0}\n".format(fit.ranking_)) <NEW_LINE> f.write("The selected features are:{0}\n".format(feature_names[fit.support_])) <NEW_LINE> f.write("All features are:{0}\n".format(feature_names)) <NEW_LINE> f.close <NEW_LINE> return outreport | Function to select the variant annotations that are more relevant for
predicting if a variant is real. This is achieved by running sklearn.feature_selection.RFE
method to perform Recursive Feature Elimination, which works by recursively considering
smaller and smaller sets of features
Parameters
----------
tp_annotations : filename
Path to file with the variant annotations derived from the call set with
the True positives
fp_annotations : filename
Path to file with the variant annotations derived from the call set with
the False positives
n_features : int
Number of features to select by RFE
outreport : filename
Filename used to write the report to
Returns
-------
filename Containing report on selected features | 625941b285dfad0860c3abf4 |
def test_off_chain_transactor_transaction_signer(self): <NEW_LINE> <INDENT> policy = make_policy("policy1", ["PERMIT_KEY " + self.public_key]) <NEW_LINE> self.permissions["transactor.transaction_signer"] = policy <NEW_LINE> batch = self._create_batches(1, 1)[0] <NEW_LINE> allowed = self.permission_verifier.check_off_chain_batch_roles(batch) <NEW_LINE> self.assertTrue(allowed) <NEW_LINE> policy = make_policy("policy1", ["PERMIT_KEY other"]) <NEW_LINE> self.permissions["transactor.transaction_signer"] = policy <NEW_LINE> batch = self._create_batches(1, 1)[0] <NEW_LINE> allowed = self.permission_verifier.check_off_chain_batch_roles(batch) <NEW_LINE> self.assertFalse(allowed) | Test that role:"transactor.transaction_signer" is checked
properly if in permissions.
1. Set policy to permit signing key. Batch should be allowed.
2. Set policy to permit some other key. Batch should be rejected. | 625941b26aa9bd52df036b3e |
def playerStandings(): <NEW_LINE> <INDENT> db = connect() <NEW_LINE> c = db.cursor() <NEW_LINE> query = ("SELECT * FROM players_standings ;") <NEW_LINE> c.execute(query) <NEW_LINE> row =c.fetchall() <NEW_LINE> return row | Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played | 625941b294891a1f4081b845 |
def load_chain(self, chain): <NEW_LINE> <INDENT> chains = Settings.get_chains() <NEW_LINE> if chain in chains: <NEW_LINE> <INDENT> self.original_name = chain <NEW_LINE> self.m_chain_textbox.SetValue(chain) <NEW_LINE> searches = chains[chain] <NEW_LINE> for x in range(len(searches)): <NEW_LINE> <INDENT> self.m_search_list.Insert(searches[x], x) | Load an existing chain. | 625941b224f1403a9260090e |
def init_particles_freespace(num_particles, occupancy_map): <NEW_LINE> <INDENT> MIN_PROBABILITY = 0.35 <NEW_LINE> y, x = np.where(occupancy_map == 0) <NEW_LINE> indices = np.random.choice(len(y), num_particles, replace=False) <NEW_LINE> y0_vals = y[indices].astype(np.float) * 10. <NEW_LINE> x0_vals = x[indices].astype(np.float) * 10. <NEW_LINE> theta0_vals = np.random.uniform(-np.pi, np.pi, num_particles) <NEW_LINE> w0_vals = np.ones((num_particles,), dtype=np.float64) <NEW_LINE> w0_vals = w0_vals / num_particles <NEW_LINE> X_bar_init = np.stack([x0_vals, y0_vals, theta0_vals, w0_vals], axis=1) <NEW_LINE> return X_bar_init | This version converges faster than init_particles_random | 625941b250485f2cf553cb34 |
def command_exec(self, cmd: str) -> str: <NEW_LINE> <INDENT> output = None <NEW_LINE> try: <NEW_LINE> <INDENT> stdin, stdout, stderr = self.ssh_client.exec_command(cmd) <NEW_LINE> LOGGER.debug('Command exec on host:%s - cmd:%s', self.host, cmd) <NEW_LINE> <DEDENT> except paramiko.SSHException: <NEW_LINE> <INDENT> LOGGER.exception('Command exec exception on host:%s - cmd:%s', self.host, cmd) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> output = stdout.read().decode('ascii').strip('\n').replace('\r', '') <NEW_LINE> self.error = stderr.read().decode('ascii').strip('\n') <NEW_LINE> if self.error: <NEW_LINE> <INDENT> LOGGER.debug('Command exec error on host:%s - err:%s', self.host, self.error) <NEW_LINE> <DEDENT> <DEDENT> return output | Provede prikaz a vrati vysledek | 625941b20383005118ecf381 |
def handle(self, *args, **options): <NEW_LINE> <INDENT> self.require_settings(args, options) <NEW_LINE> self.load_credentials() <NEW_LINE> self.get_django_settings_file() <NEW_LINE> if not options['zip']: <NEW_LINE> <INDENT> self.create_package() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.zip_path = options['zip'] <NEW_LINE> <DEDENT> self.zappa.upload_to_s3(self.zip_path, self.s3_bucket_name) <NEW_LINE> lambda_arn = self.zappa.update_lambda_function( self.s3_bucket_name, self.zip_path, self.lambda_name) <NEW_LINE> self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name) <NEW_LINE> if self.zappa_settings[self.api_stage].get('delete_zip', True) and not options['zip']: <NEW_LINE> <INDENT> os.remove(self.zip_path) <NEW_LINE> <DEDENT> self.remove_s3_local_settings() <NEW_LINE> print("Your updated Zappa deployment is live!") <NEW_LINE> events = self.zappa_settings[self.api_stage].get('events') <NEW_LINE> iam = self.zappa.boto_session.resource('iam') <NEW_LINE> self.zappa.credentials_arn = iam.Role(self.zappa.role_name).arn <NEW_LINE> if options['unschedule'] and events: <NEW_LINE> <INDENT> self.zappa.unschedule_events(lambda_arn, self.lambda_name, events) <NEW_LINE> <DEDENT> elif options['unschedule'] and not events: <NEW_LINE> <INDENT> print("No Events to Unschedule") <NEW_LINE> <DEDENT> if options['schedule'] and events: <NEW_LINE> <INDENT> self.zappa.schedule_events(lambda_arn, self.lambda_name, events) <NEW_LINE> <DEDENT> elif options['schedule'] and not events: <NEW_LINE> <INDENT> print("No Events to Schedule") | Execute the command. | 625941b255399d3f05588453 |
def GetPosition(self,channel=0): <NEW_LINE> <INDENT> channelID,destAddress=self.channelAddresses[channel] <NEW_LINE> response=self.query(c.MGMSG_MOT_REQ_POSCOUNTER,c.MGMSG_MOT_GET_POSCOUNTER,channelID,destID=destAddress) <NEW_LINE> posParam=response[-1][-1] <NEW_LINE> return self._encToPosition(posParam) | Get the position in mm | 625941b20fa83653e4656d61 |
def test_names(uqcsbot: MockUQCSBot): <NEW_LINE> <INDENT> uqcsbot.post_message(TEST_CHANNEL_ID, '!pokemash mr. mime scyther') <NEW_LINE> messages = uqcsbot.test_messages.get(TEST_CHANNEL_ID, []) <NEW_LINE> assert len(messages) == 2 <NEW_LINE> assert messages[-1]['text'] == ("_Mr.ther_\n" "https://images.alexonsager.net/pokemon/fused/123/123.122.png") | Test !pokemash with name arguments. | 625941b20a50d4780f666c2b |
def get_tool_version(self, _): <NEW_LINE> <INDENT> print(self.image_gen.get_tool_version()) | Implements 'version' sub-command | 625941b28c3a87329515815a |
def main(selection): <NEW_LINE> <INDENT> if selection == 'all': <NEW_LINE> <INDENT> for key in FILES_TO_DELETE: <NEW_LINE> <INDENT> main(key) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for entry in FILES_TO_DELETE[selection]: <NEW_LINE> <INDENT> if isinstance(entry, tuple): <NEW_LINE> <INDENT> path, pattern = entry <NEW_LINE> for file_or_folder in path.glob(pattern): <NEW_LINE> <INDENT> if file_or_folder.is_file(): <NEW_LINE> <INDENT> print("Remove file %s" % file_or_folder) <NEW_LINE> file_or_folder.unlink() <NEW_LINE> <DEDENT> elif file_or_folder.is_dir(): <NEW_LINE> <INDENT> print("Remove folder %s" % file_or_folder) <NEW_LINE> shutil.rmtree(file_or_folder) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif entry.is_file(): <NEW_LINE> <INDENT> print("Remove file %s" % entry) <NEW_LINE> entry.unlink() <NEW_LINE> <DEDENT> elif entry.is_dir(): <NEW_LINE> <INDENT> print("Remove folder %s" % entry) <NEW_LINE> shutil.rmtree(entry) | Main function | 625941b2f7d966606f6a9da5 |
def poisson(center, n_samples, cls=Distribution, **kwargs): <NEW_LINE> <INDENT> has_unit = False <NEW_LINE> if hasattr(center, 'unit'): <NEW_LINE> <INDENT> has_unit = True <NEW_LINE> poissonarr = np.asanyarray(center.value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> poissonarr = np.asanyarray(center) <NEW_LINE> <DEDENT> randshape = poissonarr.shape + (n_samples,) <NEW_LINE> samples = np.random.poisson(poissonarr[..., np.newaxis], randshape) <NEW_LINE> if has_unit: <NEW_LINE> <INDENT> if center.unit == u.adu: <NEW_LINE> <INDENT> warn('ADUs were provided to poisson. ADUs are not strictly count' 'units because they need the gain to be applied. It is ' 'recommended you apply the gain to convert to e.g. electrons.') <NEW_LINE> <DEDENT> elif center.unit not in COUNT_UNITS: <NEW_LINE> <INDENT> warn('Unit {} was provided to poisson, which is not one of {}, ' 'and therefore suspect as a "counting" unit. Ensure you mean ' 'to use Poisson statistics.'.format(center.unit, COUNT_UNITS)) <NEW_LINE> <DEDENT> samples = samples * center.unit <NEW_LINE> <DEDENT> return cls(samples, **kwargs) | Create a Poisson distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center value of this distribution (i.e., λ).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : ``cls``, usually `Distribution`
The sampled poisson distribution. | 625941b299cbb53fe6792984 |
def p_site_lstate(p): <NEW_LINE> <INDENT> site = {} <NEW_LINE> site["name"] = p[1] <NEW_LINE> site["istate"] = "" <NEW_LINE> site["lstate"] = p[2] <NEW_LINE> p[0] = site | site : ID lstate | 625941b221a7993f00bc7a85 |
def getProxyLocation(): <NEW_LINE> <INDENT> for envVar in ['GRID_PROXY_FILE', 'X509_USER_PROXY']: <NEW_LINE> <INDENT> if envVar in os.environ: <NEW_LINE> <INDENT> proxyPath = os.path.realpath(os.environ[envVar]) <NEW_LINE> if os.path.isfile(proxyPath): <NEW_LINE> <INDENT> return proxyPath <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> proxyName = "x509up_u%d" % os.getuid() <NEW_LINE> if os.path.isfile("/tmp/%s" % proxyName): <NEW_LINE> <INDENT> return "/tmp/%s" % proxyName <NEW_LINE> <DEDENT> return False | Get the path of the currently active grid proxy file
| 625941b273bcbd0ca4b2be19 |
def xchg(a, i, j): <NEW_LINE> <INDENT> tmp = a[i] <NEW_LINE> a[i] = a[j] <NEW_LINE> a[j] = tmp | Exchange elements at 'i' and 'j' in array 'a' | 625941b2711fe17d82542119 |
def hasPathSum(self, root: TreeNode, sum: int) -> bool: <NEW_LINE> <INDENT> if not root: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> sum -= root.val <NEW_LINE> if sum == 0 and not root.left and not root.right: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return self.hasPathSum(root.left, sum) or self.hasPathSum(root.right, sum) | >>> solution = Solution()
>>> tree = make_binary_tree([1, 2, 3])
>>> solution.hasPathSum(tree, 4)
True
>>> tree = make_binary_tree([1])
>>> solution.hasPathSum(tree, 1)
True
>>> tree = make_binary_tree([1, 2])
>>> solution.hasPathSum(tree, 1)
False
>>> tree = make_binary_tree([1, 2, 3, 4])
>>> solution.hasPathSum(tree, 3)
False
>>> tree = make_binary_tree([])
>>> solution.hasPathSum(tree, 0)
False
>>> tree = make_binary_tree([1, 2, 3])
>>> solution.hasPathSum(tree, 6)
False
>>> tree = make_binary_tree([5, 4, 8, 11, None, 13, 4, 7, 2, None, None, None, None, None, 1])
>>> solution.hasPathSum(tree, 22)
True | 625941b2d6c5a10208143de2 |
def SAS(self, Obj): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> Obj = str(Obj) <NEW_LINE> Obj = Obj.strip() <NEW_LINE> Obj = Obj.replace('*', '%') <NEW_LINE> return Obj <NEW_LINE> <DEDENT> except Exception as StringAndStripError1: <NEW_LINE> <INDENT> StringAndStripError1 = str(StringAndStripError1) <NEW_LINE> print ("Error while in accpac_class.py code 300: With error message: \n" + StringAndStripError1) <NEW_LINE> return 1 | String and Strip tool.
------
1. Creates a string and returns it
2. Strips string of spaces
3. Turns * to %
------
Strips an object and converts it to a string.
Returns a string if all went well.
Returns 1 if error | 625941b29b70327d1c4e0b71 |
def _add_to_workflow(self): <NEW_LINE> <INDENT> workflow_code = getattr(settings, 'APPLICATION_WORKFLOW', None) <NEW_LINE> if workflow_code != None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> approval_workflow = Workflow.objects.get(name=workflow_code) <NEW_LINE> set_workflow(self, approval_workflow) <NEW_LINE> <DEDENT> except ObjectDoesNotExist: <NEW_LINE> <INDENT> raise ImproperlyConfigured('The workflow you specify in APPLICATION_WORKFLOW must actually be configured in the db') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ImproperlyConfigured('You must set APPLICATION_WORKFLOW in the settings file') | Adds the EthicsApplication to the workflow that is defined in Settings.APPLICATION_WORKFLOW
Will raise an ImproperlConfigured exception if this setting is not set, or the workflow defined
doesn't exist. | 625941b2287bf620b61d380e |
def logger_initiate(): <NEW_LINE> <INDENT> log_config() <NEW_LINE> log_set_level() | Iniates the logger boiler plate | 625941b28e7ae83300e4ad6f |
def unapprove(self): <NEW_LINE> <INDENT> from base import actions <NEW_LINE> if self.is_redgreen: <NEW_LINE> <INDENT> self.approved = False <NEW_LINE> self.save(update_fields=['approved']) <NEW_LINE> actions.unapprove_user(self) | Unapprove a Red/Green Member
Only applies to Red/Green | 625941b223849d37ff7b2e37 |
def external_login_email_post(): <NEW_LINE> <INDENT> form = ResendConfirmationForm(request.form) <NEW_LINE> session = get_session() <NEW_LINE> if not session.is_external_first_login: <NEW_LINE> <INDENT> raise HTTPError(http.UNAUTHORIZED) <NEW_LINE> <DEDENT> external_id_provider = session.data['auth_user_external_id_provider'] <NEW_LINE> external_id = session.data['auth_user_external_id'] <NEW_LINE> fullname = session.data['auth_user_fullname'] <NEW_LINE> service_url = session.data['service_url'] <NEW_LINE> destination = 'dashboard' <NEW_LINE> for campaign in campaigns.get_campaigns(): <NEW_LINE> <INDENT> if campaign != 'institution': <NEW_LINE> <INDENT> campaign_url = furl.furl(campaigns.campaign_url_for(campaign)).url <NEW_LINE> external_campaign_url = furl.furl(campaigns.external_campaign_url_for(campaign)).url <NEW_LINE> if campaigns.is_proxy_login(campaign): <NEW_LINE> <INDENT> if check_service_url_with_proxy_campaign(str(service_url), campaign_url, external_campaign_url): <NEW_LINE> <INDENT> destination = campaign <NEW_LINE> if campaign != 'osf-preprints': <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif service_url.startswith(campaign_url): <NEW_LINE> <INDENT> destination = campaign <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if form.validate(): <NEW_LINE> <INDENT> clean_email = form.email.data <NEW_LINE> user = get_user(email=clean_email) <NEW_LINE> external_identity = { external_id_provider: { external_id: None, }, } <NEW_LINE> try: <NEW_LINE> <INDENT> ensure_external_identity_uniqueness(external_id_provider, external_id, user) <NEW_LINE> <DEDENT> except ValidationError as e: <NEW_LINE> <INDENT> raise HTTPError(http.FORBIDDEN, e.message) <NEW_LINE> <DEDENT> if user: <NEW_LINE> <INDENT> external_identity[external_id_provider][external_id] = 'LINK' <NEW_LINE> if external_id_provider in user.external_identity: <NEW_LINE> <INDENT> user.external_identity[external_id_provider].update(external_identity[external_id_provider]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> user.external_identity.update(external_identity) <NEW_LINE> <DEDENT> user.add_unconfirmed_email(clean_email, external_identity=external_identity) <NEW_LINE> user.save() <NEW_LINE> send_confirm_email( user, clean_email, external_id_provider=external_id_provider, external_id=external_id, destination=destination ) <NEW_LINE> message = language.EXTERNAL_LOGIN_EMAIL_LINK_SUCCESS.format( external_id_provider=external_id_provider, email=user.username ) <NEW_LINE> kind = 'success' <NEW_LINE> remove_session(session) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> external_identity[external_id_provider][external_id] = 'CREATE' <NEW_LINE> user = OSFUser.create_unconfirmed( username=clean_email, password=None, fullname=fullname, external_identity=external_identity, campaign=None ) <NEW_LINE> user.save() <NEW_LINE> send_confirm_email( user, user.username, external_id_provider=external_id_provider, external_id=external_id, destination=destination ) <NEW_LINE> message = language.EXTERNAL_LOGIN_EMAIL_CREATE_SUCCESS.format( external_id_provider=external_id_provider, email=user.username ) <NEW_LINE> kind = 'success' <NEW_LINE> remove_session(session) <NEW_LINE> <DEDENT> status.push_status_message(message, kind=kind, trust=False) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> forms.push_errors_to_status(form.errors) <NEW_LINE> <DEDENT> return { 'form': form, 'external_id_provider': external_id_provider } | View to handle email submission for first-time oauth-login user.
HTTP Method: POST | 625941b215fb5d323cde08a6 |
Subsets and Splits