code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def map_by_key(items, key='id'): <NEW_LINE> <INDENT> return DictObject(map(lambda item: (item.get(key), item), items))
Creates a dict from a list of dicts.
625941b59b70327d1c4e0bc2
def __init__(self, fileFormat = None, imageFormat = None, width = None, height = None, chromaSubsampling = None, colorBitDepth = None, compressionMode = None, report_errors=errors_report.STRICT): <NEW_LINE> <INDENT> super(Technical, self).__init__() <NEW_LINE> self._fileFormat = None <NEW_LINE> self._imageFormat = None <NEW_LINE> self._resolutionWidth = None <NEW_LINE> self._resolutionWidthUnit = None <NEW_LINE> self._resolutionHeightUnit = None <NEW_LINE> self._resolutionHeight = None <NEW_LINE> self._colorSpace = None <NEW_LINE> self._chromaSubsampling = None <NEW_LINE> self._colorDepth = None <NEW_LINE> self._compressionMode = None <NEW_LINE> self._colorDepthUnit = None <NEW_LINE> self._chromaSubsampling = None <NEW_LINE> self._additionalTechnicalNotes = None <NEW_LINE> self.report_errors = report_errors <NEW_LINE> self.fileFormat = fileFormat <NEW_LINE> self.imageFormat = imageFormat <NEW_LINE> if width: <NEW_LINE> <INDENT> if isinstance(width, int): <NEW_LINE> <INDENT> self.resolutionWidthUnit = "pixels" <NEW_LINE> self.resolutionWidth = width <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError("Expected int received " + str(type(width))) <NEW_LINE> <DEDENT> <DEDENT> if height: <NEW_LINE> <INDENT> if isinstance(height, int): <NEW_LINE> <INDENT> self.resolutionHeightUnit = "pixels" <NEW_LINE> self.resolutionHeight = height <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError("Expected int received " + str(type(height))) <NEW_LINE> <DEDENT> <DEDENT> if colorBitDepth: <NEW_LINE> <INDENT> self.colorDepth = colorBitDepth <NEW_LINE> self.colorDepthUnit = "bits" <NEW_LINE> <DEDENT> if compressionMode: <NEW_LINE> <INDENT> self.compressionMode = compressionMode <NEW_LINE> <DEDENT> if chromaSubsampling: <NEW_LINE> <INDENT> self.chromaSubsampling = chromaSubsampling
:param str fileFormat: Specifies the MIME type of the file. :param str imageFormat: Specifies the encoding of the digital file. For example: JPEG200 :param int width: Specifies the x dimensions of the digital file. :param int height: Specifies the y dimensions of the digital file :param str chromaSubsampling: Specifies the Chroma Subsampling as a ratio in the format integer:integer:integer. For example 4:2:2.In the absence of subsampling, the value of this element is 4:4:4. :param colorDepth: The bit depth per channel :param str compressionMode: _fileFormat (str): Specifies the MIME type of the file. _imageFormat (str): Specifies the encoding of the digital file. For example: JPEG200 _resolutionWidth Specifies the x dimensions of the digital file. _resolutionHeight Specifies the y dimensions of the digital file _colorSpace (str): Specifies the type of color space used by the digital file. For example: YUV _chromaSubsampling (str): Specifies the Chroma Subsampling as a ratio in the format integer:integer:integer. For example 4:2:2.In the absence of subsampling, the value of this element is 4:4:4. _colorDepth The bit depth per channel _compressionMode Designates the type of compression. Please use only Lossless, Lossy, Uncompressed, or Unknown _additionalTechnicalNotes (str): Additional techincal notes about the file that do not fit into other elements
625941b5711fe17d8254216a
def migrate_author(self, author_name): <NEW_LINE> <INDENT> action_text = "The author '%s' needs to be migrated to an User:\n" "1. Use an existing user ?\n" "2. Create a new user ?\n" "Please select a choice: " % author_name <NEW_LINE> while 42: <NEW_LINE> <INDENT> selection = raw_input(smart_str(action_text)) <NEW_LINE> if selection in '12': <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> if selection == '1': <NEW_LINE> <INDENT> users = User.objects.all() <NEW_LINE> usernames = [user.username for user in users] <NEW_LINE> while 42: <NEW_LINE> <INDENT> user_text = "1. Select your user, by typing " "one of theses usernames:\n" "[%s]\n" "Please select a choice: " % ', '.join(usernames) <NEW_LINE> user_selected = raw_input(user_text) <NEW_LINE> if user_selected in usernames: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> return users.get(username=user_selected) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> create_text = "2. Please type the email of the '%s' user: " % author_name <NEW_LINE> author_mail = raw_input(create_text) <NEW_LINE> try: <NEW_LINE> <INDENT> return User.objects.create_user(author_name, author_mail) <NEW_LINE> <DEDENT> except IntegrityError: <NEW_LINE> <INDENT> return User.objects.get(username=author_name)
Handle actions for migrating the users
625941b53617ad0b5ed67ced
def configure(self): <NEW_LINE> <INDENT> if os.getuid() != 0: <NEW_LINE> <INDENT> raise UserWarning("Must run as root") <NEW_LINE> <DEDENT> rc, out, err = utils.execCmd( ( '/usr/sbin/usermod', '-a', '-G', ','.join(self.SANLOCK_GROUPS), SANLOCK_USER ), raw=True, ) <NEW_LINE> sys.stdout.write(out) <NEW_LINE> sys.stderr.write(err) <NEW_LINE> if rc != 0: <NEW_LINE> <INDENT> raise RuntimeError("Failed to perform sanlock config.")
Configure sanlock process groups
625941b5f8510a7c17cf94f3
def number(): <NEW_LINE> <INDENT> no = Member.objects.aggregate(Max('member_number')) <NEW_LINE> value = no.values()[0] <NEW_LINE> if value is None: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return value + 1
Returns sequential number to be used like member_number field in Member model.
625941b58c0ade5d55d3e7ae
def clear(self): <NEW_LINE> <INDENT> self.__head = None <NEW_LINE> self.__tail = None <NEW_LINE> self.__size = 0
Removes all of the current elemnts from the list
625941b5046cf37aa974cb39
def split_schema_func(schema, func): <NEW_LINE> <INDENT> (sch, fnc) = split_schema_obj(func, schema) <NEW_LINE> if sch != schema: <NEW_LINE> <INDENT> return (sch, fnc) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return fnc
Split a function related to an object from its schema :param schema: schema to which the main object belongs :param func: possibly qualified function name :returns: a schema, function tuple, or just the unqualified function name
625941b57c178a314d6ef246
def draw_quickmarker_menu(self, context): <NEW_LINE> <INDENT> layout = self.layout <NEW_LINE> if len(context.scene.vseqf.marker_presets) > 0: <NEW_LINE> <INDENT> layout.menu('VSEQF_MT_quickmarkers_menu', text="Quick Markers")
Draws the submenu for the QuickMarker presets, placed in the sequencer markers menu
625941b5de87d2750b85fb7c
def repair_active_generation( self, force_operational=False, wait_until_operational=True, ): <NEW_LINE> <INDENT> if self.active_is_fully_operational(): <NEW_LINE> <INDENT> logger.info("All active nodes are operational") <NEW_LINE> return [] <NEW_LINE> <DEDENT> nodes = self.get_inoperational_active_nodes() <NEW_LINE> made_operational = [] <NEW_LINE> for node in nodes: <NEW_LINE> <INDENT> if not node.aws_id: <NEW_LINE> <INDENT> logger.warning( "Node missing: %s- %s", node.aws_type, node.name, ) <NEW_LINE> continue <NEW_LINE> <DEDENT> if not node.is_operational: <NEW_LINE> <INDENT> if node.is_healthy or force_operational: <NEW_LINE> <INDENT> logger.info("Making node operational: %s" % node) <NEW_LINE> node.make_operational(force_operational=force_operational) <NEW_LINE> made_operational.append(node) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.warning("Node unhealthy: %s" % node.boto_instance) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.debug("Node operational: %s" % node) <NEW_LINE> <DEDENT> <DEDENT> fixed_nodes = copy(made_operational) <NEW_LINE> if len(fixed_nodes) == 0: <NEW_LINE> <INDENT> logger.info("No healthy non-operational nodes available") <NEW_LINE> return [] <NEW_LINE> <DEDENT> if not wait_until_operational: <NEW_LINE> <INDENT> return fixed_nodes <NEW_LINE> <DEDENT> logger.info("Waiting until all nodes are actually operational") <NEW_LINE> time_waited = 0 <NEW_LINE> while made_operational: <NEW_LINE> <INDENT> if time_waited > MAKE_OPERATIONAL_TIMEOUT: <NEW_LINE> <INDENT> raise WaitTimedOut( "Timed out waiting on nodes: %s" % made_operational) <NEW_LINE> <DEDENT> _made_operational = made_operational[:] <NEW_LINE> for node in _made_operational: <NEW_LINE> <INDENT> if node.is_operational: <NEW_LINE> <INDENT> made_operational.remove(node) <NEW_LINE> <DEDENT> <DEDENT> if made_operational: <NEW_LINE> <INDENT> logger.info( "%s node(s) still not operational. Waiting %ss", len(made_operational), WAIT_TIME, ) <NEW_LINE> time.sleep(WAIT_TIME) <NEW_LINE> time_waited += WAIT_TIME <NEW_LINE> <DEDENT> <DEDENT> return fixed_nodes
Ensure that all healthy active-generation nodes are operational. ``force_operational`` If True, even non-healthy active-generation nodes will be made operational. ``wait_until_operational`` If False, doesn't wait for the nodes to become fully operational. Returns any nodes that were made operational by this action.
625941b51b99ca400220a89f
def __init__(self, dispatch, name, default, index, before=None, after=None): <NEW_LINE> <INDENT> self.dispatch = dispatch <NEW_LINE> self.name = name <NEW_LINE> self.default = default <NEW_LINE> self.index = index <NEW_LINE> self._before = before <NEW_LINE> self._after = after
Register a custom predicate for a dispatch method. The function to be registered should have the same arguments as the dispatch method and return a value that is used when registering an implementation for the dispatch method. The predicates are ordered by their before and after arguments. :param dispatch: the dispatch method this predicate is for. You can use the :meth:`App.method` directive to add a dispatch method to an app. :param name: the name used to identify the predicate when registering the implementation of the dispatch method. :param default: the expected value of the predicate, to be used when registering an implementation if the expected value for the predicate is not given explicitly. :param index: the index to use. Typically :class:`reg.KeyIndex` or :class:`reg.ClassIndex`. :param before: predicate function this function wants to have priority over. :param after: predicate function we want to have priority over this one.
625941b532920d7e50b27fba
def add_tag(tag): <NEW_LINE> <INDENT> def decorator(func: Callable[[Any], str]): <NEW_LINE> <INDENT> @wraps(func) <NEW_LINE> def wrapper(*args, **kwargs): <NEW_LINE> <INDENT> return f'<{tag}>{func(*args, **kwargs)}</{tag}>' <NEW_LINE> <DEDENT> return wrapper <NEW_LINE> <DEDENT> return decorator
Add a specified tag for the function that returns strings
625941b573bcbd0ca4b2be6c
def on_rate_limit(self, url, retry_count): <NEW_LINE> <INDENT> if self.max_retries != -1 and retry_count >= self.max_retries: <NEW_LINE> <INDENT> raise ApiError('Max retries exceeded')
A handler called when HTTP 429 is received. Raises an exception when :data:`~lichess.api.DefaultApiClient.max_retries` is exceeded.
625941b516aa5153ce362266
def declare(self, nowait=False): <NEW_LINE> <INDENT> return (self.name and self.exchange.declare(nowait), self.name and self.queue_declare(nowait, passive=False), self.name and self.queue_bind(nowait))
Declares the queue, the exchange and binds the queue to the exchange.
625941b55166f23b2e1a4f47
def content_of_highest_head( self, v ): <NEW_LINE> <INDENT> c = self.cell_of_highest_head(v) <NEW_LINE> return c[1]-c[0]
Return the diagonal of the highest head of the cells labeled ``v`` in the standard part of ``self``. Return the content of the cell of the head in the highest row of all ribbons labeled by ``v`` of the underlying standard tableau. If there is no cell with entry ``v`` then the value returned is the length of the first row. INPUT: - ``v`` -- an integer representing the label in the standard tableau OUTPUT: - an integer representing the content of the head of the highest ribbon with label ``v`` EXAMPLES:: sage: [StrongTableau([[-1,2,-3],[-2,3],[3]], 1).content_of_highest_head(v) for v in range(1,5)] [0, -1, -2, 3] TESTS:: sage: StrongTableau([], 4).content_of_highest_head(1) 0 sage: StrongTableau([[-1,-1]], 4).content_of_highest_head(3) 2
625941b57b180e01f3dc45f5
@profile_blu.route('/release2', methods=['GET', 'POST']) <NEW_LINE> @login_required <NEW_LINE> def news_release2(): <NEW_LINE> <INDENT> if request.method == "GET": <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> categories = Category.query.all() <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> current_app.logger.error(e) <NEW_LINE> return jsonify(errno=RET.DBERR, errmsg="获取分类失败") <NEW_LINE> <DEDENT> category_list = [] <NEW_LINE> for category in categories: <NEW_LINE> <INDENT> category_list.append(category.to_dict()) <NEW_LINE> <DEDENT> return render_template("news/user_news_release.html", categories=category_list) <NEW_LINE> <DEDENT> title = request.form.get("title") <NEW_LINE> category_id = request.form.get("category_id") <NEW_LINE> index_image = request.files.get("index_image") <NEW_LINE> digest = request.form.get("digest") <NEW_LINE> content = request.form.get("content") <NEW_LINE> if not all([title, category_id, index_image, digest, content]): <NEW_LINE> <INDENT> return jsonify(errno=RET.PARAMERR, errmsg="参数不全") <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> image_name = storage(index_image.read()) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> current_app.logger.error(e) <NEW_LINE> return jsonify(errno=RET.THIRDERR, errmsg="七牛云异常") <NEW_LINE> <DEDENT> if not image_name: return jsonify(errno=RET.NODATA, errmsg="上传失败") <NEW_LINE> news = News() <NEW_LINE> news.title = title <NEW_LINE> news.source = g.user.nick_name <NEW_LINE> news.digest = digest <NEW_LINE> news.content = content <NEW_LINE> news.index_image_url = constants.QINIU_DOMIN_PREFIX + image_name <NEW_LINE> news.category_id = category_id <NEW_LINE> news.user_id = g.user.id <NEW_LINE> news.status = 1 <NEW_LINE> try: <NEW_LINE> <INDENT> db.session.add(news) <NEW_LINE> db.session.commit() <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> current_app.logger.error(e) <NEW_LINE> db.session.rollback() <NEW_LINE> return jsonify(errno=RET.DBERR, errmsg="发布新闻失败") <NEW_LINE> <DEDENT> return jsonify(errno=RET.OK, errmsg="发布成功")
1.判断是否是GET请求 2.获取参数 3.校验参数 4.上传图像 5.创建新闻对象,设置属性 6.保存新闻到数据库 7.返回响应 :return:
625941b530dc7b766590175a
def SA(self, evaluations=100000, max_temp=None, cool_rate=0.5, benchmarks=(ComparativeBenchmarks.f1()), precision=None): <NEW_LINE> <INDENT> for i in range(len(benchmarks)): <NEW_LINE> <INDENT> benchmark = benchmarks[i] <NEW_LINE> sa = SimulatedAnnealing(max_temp=max_temp, cool_rate=cool_rate, eval_limit=evaluations, benchmark=benchmark) <NEW_LINE> results = [] <NEW_LINE> for j in range(30): <NEW_LINE> <INDENT> state, mins = sa.annealing(precision=precision, print_steps=False) <NEW_LINE> results += [mins] <NEW_LINE> sa.__init__(max_temp=max_temp, cool_rate=cool_rate, eval_limit=evaluations, benchmark=benchmark) <NEW_LINE> print(benchmark.name, j+1, state.value) <NEW_LINE> <DEDENT> self.csv(results, "SA_Results/sa_output_"+benchmark.name+".csv")
Run SA algorithm on list of benchmarks. 30 executions per benchmark. :param evaluations: :param max_temp: :param cool_rate: :param benchmarks: :param precision:
625941b5099cdd3c635f0a4b
def get_network_ids(self): <NEW_LINE> <INDENT> session = self.session() <NEW_LINE> query = session.query(WaveformChannel.network) <NEW_LINE> query = query.group_by(WaveformChannel.network) <NEW_LINE> results = query.all() <NEW_LINE> session.close() <NEW_LINE> return [r[0] for r in results if len(r) == 1]
Fetches all possible network id's.
625941b5b7558d58953c4d0b
def test_get_top(self): <NEW_LINE> <INDENT> con = HnApi() <NEW_LINE> top = con.get_top() <NEW_LINE> self.assertTrue(len(top) > 100) <NEW_LINE> item_0 = con.get_item(top[0]) <NEW_LINE> self.assertTrue(con.is_api_item(item_0)) <NEW_LINE> item_100 = con.get_item(top[-1]) <NEW_LINE> self.assertTrue(con.is_api_item(item_100))
Test retrieval of first and last items from /top endpoint
625941b515fb5d323cde08f7
def approx_distance(self, slowness_turn_layer, p, is_p_wave): <NEW_LINE> <INDENT> if slowness_turn_layer >= self.get_num_layers(is_p_wave): <NEW_LINE> <INDENT> raise SlownessModelError( "Can't calculate a distance when get_num_layers() is smaller " "than the given slowness_turn_layer!") <NEW_LINE> <DEDENT> if p < 0: <NEW_LINE> <INDENT> raise SlownessModelError("Ray parameter must not be negative!") <NEW_LINE> <DEDENT> td = np.zeros(1, dtype=TimeDist) <NEW_LINE> td['p'] = p <NEW_LINE> layer_num = np.arange(0, slowness_turn_layer + 1) <NEW_LINE> if len(layer_num): <NEW_LINE> <INDENT> time, dist = self.layer_time_dist(p, layer_num, is_p_wave) <NEW_LINE> td['time'] = 2 * np.sum(time) <NEW_LINE> td['dist'] = 2 * np.sum(dist) <NEW_LINE> <DEDENT> return td
Approximate distance for ray turning at the bottom of a layer. Generates approximate distance, in radians, for a ray from a surface source that turns at the bottom of the given slowness layer. :param slowness_turn_layer: The number of the layer at which the ray should turn. :type slowness_turn_layer: int :param p: The slowness to calculate, in s/km. :type p: float :param is_p_wave: Whether to use the P (``True``) or S (``False``) wave. :type is_p_wave: bool :returns: The time (in s) and distance (in rad) the ray travels. :rtype: :class:`~numpy.ndarray` (dtype = :class:`obspy.taup.helper_classes.TimeDist`, shape = (``slowness_turn_layer``, ))
625941b5046cf37aa974cb3a
def normalize_features(features): <NEW_LINE> <INDENT> temp_feats = np.array([]) <NEW_LINE> for count, f in enumerate(features): <NEW_LINE> <INDENT> if f.shape[0] > 0: <NEW_LINE> <INDENT> if count == 0: <NEW_LINE> <INDENT> temp_feats = f <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> temp_feats = np.vstack((temp_feats, f)) <NEW_LINE> <DEDENT> count += 1 <NEW_LINE> <DEDENT> <DEDENT> mean = np.mean(temp_feats, axis=0) + 1e-14 <NEW_LINE> std = np.std(temp_feats, axis=0) + 1e-14 <NEW_LINE> features_norm = [] <NEW_LINE> for f in features: <NEW_LINE> <INDENT> ft = f.copy() <NEW_LINE> for n_samples in range(f.shape[0]): <NEW_LINE> <INDENT> ft[n_samples, :] = (ft[n_samples, :] - mean) / std <NEW_LINE> <DEDENT> features_norm.append(ft) <NEW_LINE> <DEDENT> return features_norm, mean, std
This function normalizes a feature set to 0-mean and 1-std. Used in most classifier trainning cases. ARGUMENTS: - features: list of feature matrices (each one of them is a np matrix) RETURNS: - features_norm: list of NORMALIZED feature matrices - mean: mean vector - std: std vector
625941b5ec188e330fd5a597
def claim_device(self, configFileUrl=None, configId=None, deviceClaimList=None, fileServiceId=None, imageId=None, imageUrl=None, populateInventory=None, projectId=None, workflowId=None, headers=None, payload=None, active_validation=True, **request_parameters): <NEW_LINE> <INDENT> check_type(headers, dict) <NEW_LINE> check_type(payload, dict) <NEW_LINE> if headers is not None: <NEW_LINE> <INDENT> if 'Content-Type' in headers: <NEW_LINE> <INDENT> check_type(headers.get('Content-Type'), basestring, may_be_none=False) <NEW_LINE> <DEDENT> if 'X-Auth-Token' in headers: <NEW_LINE> <INDENT> check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) <NEW_LINE> <DEDENT> <DEDENT> _params = { } <NEW_LINE> _params.update(request_parameters) <NEW_LINE> _params = dict_from_items_with_values(_params) <NEW_LINE> path_params = { } <NEW_LINE> _payload = { 'configFileUrl': configFileUrl, 'configId': configId, 'deviceClaimList': deviceClaimList, 'fileServiceId': fileServiceId, 'imageId': imageId, 'imageUrl': imageUrl, 'populateInventory': populateInventory, 'projectId': projectId, 'workflowId': workflowId, } <NEW_LINE> _payload.update(payload or {}) <NEW_LINE> _payload = dict_from_items_with_values(_payload) <NEW_LINE> if active_validation: <NEW_LINE> <INDENT> self._request_validator('jsd_d8a619974a8a8c48_v2_1_2') .validate(_payload) <NEW_LINE> <DEDENT> with_custom_headers = False <NEW_LINE> _headers = self._session.headers or {} <NEW_LINE> if headers: <NEW_LINE> <INDENT> _headers.update(dict_of_str(headers)) <NEW_LINE> with_custom_headers = True <NEW_LINE> <DEDENT> e_url = ('/dna/intent/api/v1/onboarding/pnp-device/claim') <NEW_LINE> endpoint_full_url = apply_path_params(e_url, path_params) <NEW_LINE> if with_custom_headers: <NEW_LINE> <INDENT> json_data = self._session.post(endpoint_full_url, params=_params, json=_payload, headers=_headers) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> json_data = self._session.post(endpoint_full_url, params=_params, json=_payload) <NEW_LINE> <DEDENT> return self._object_factory('bpm_d8a619974a8a8c48_v2_1_2', json_data)
Claims one of more devices with specified workflow. Args: configFileUrl(string): ClaimDeviceRequest's configFileUrl. configId(string): ClaimDeviceRequest's configId. deviceClaimList(list): ClaimDeviceRequest's deviceClaimList (list of objects). fileServiceId(string): ClaimDeviceRequest's fileServiceId. imageId(string): ClaimDeviceRequest's imageId. imageUrl(string): ClaimDeviceRequest's imageUrl. populateInventory(boolean): ClaimDeviceRequest's populateInventory. projectId(string): ClaimDeviceRequest's projectId. workflowId(string): ClaimDeviceRequest's workflowId. headers(dict): Dictionary of HTTP Headers to send with the Request . payload(dict): A JSON serializable Python object to send in the body of the Request. active_validation(bool): Enable/Disable payload validation. Defaults to True. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
625941b5091ae35668666d55
def __init__(self): <NEW_LINE> <INDENT> self.swaggerTypes = { 'UsedSize': 'long', 'TotalSize': 'long' } <NEW_LINE> self.attributeMap = { 'UsedSize': 'UsedSize','TotalSize': 'TotalSize'} <NEW_LINE> self.UsedSize = None <NEW_LINE> self.TotalSize = None
Attributes: swaggerTypes (dict): The key is attribute name and the value is attribute type. attributeMap (dict): The key is attribute name and the value is json key in definition.
625941b54a966d76dd550dfa
def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size): <NEW_LINE> <INDENT> num_preprocess_threads = 16 <NEW_LINE> images, label_batch = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples) <NEW_LINE> tf.image_summary('images', images) <NEW_LINE> return images, tf.reshape(label_batch, [batch_size])
Construct a queued batch of images and labels. Args: image: 3-D Tensor of [height, width, 1] of type.float32. label: 1-D Tensor of type.int32 min_queue_examples: int32, minimum number of samples to retain in the queue that provides of batches of examples. batch_size: Number of images per batch. Returns: images: Images. 4D tensor of [batch_size, height, width, 1] size. labels: Labels. 1D tensor of [batch_size] size.
625941b5097d151d1a222c4b
def isSymmetric(self, root: TreeNode) -> bool: <NEW_LINE> <INDENT> def sub(left, right): <NEW_LINE> <INDENT> if not left and not right: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> if not left or not right or left.val != right.val: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return sub(left.left, right.right) and sub(left.right, right.left) <NEW_LINE> <DEDENT> return sub(root.left, root.right) if root else True
递归法
625941b50c0af96317bb7fd8
def get_subscription_node(self, subscription): <NEW_LINE> <INDENT> if not self.subscription_connected(subscription): <NEW_LINE> <INDENT> self.connect_subscription_node(subscription) <NEW_LINE> <DEDENT> for node in self.connections: <NEW_LINE> <INDENT> if subscription["subscribed_node_id"] == node.node_id: <NEW_LINE> <INDENT> return node <NEW_LINE> <DEDENT> <DEDENT> return None
check if client is connected to node subscribed to and return that node. if not, attempt to connect to it and return.
625941b52ae34c7f2600cf21
def _create_content_code_element(self, root, value): <NEW_LINE> <INDENT> if value is not None and value != '': <NEW_LINE> <INDENT> value = str(value) <NEW_LINE> self._validate_content_code_value(value) <NEW_LINE> <DEDENT> content_element = ET.SubElement(root, "Consignment.Contentcode") <NEW_LINE> content_element.text = value
Append 'Consignment.Contentcode' element to given root object. For multiple parcel this item can accept empty string :param root: root XML element object :param value: string of content code :return:
625941b5460517430c393f7f
def herald_message(self, herald_svc, message): <NEW_LINE> <INDENT> subject = 'herald/routing/reply/{}/'.format(self.is_router()) <NEW_LINE> herald_svc.reply(message, None, subject=subject)
An Herald hello message has been received
625941b52c8b7c6e89b355b3
def next_element(bst, n): <NEW_LINE> <INDENT> latest_left_parent = None <NEW_LINE> cur = bst <NEW_LINE> while cur.label != n: <NEW_LINE> <INDENT> if n > cur.label: <NEW_LINE> <INDENT> if cur.right is BST.empty: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> cur = cur.right <NEW_LINE> <DEDENT> elif n < cur.label: <NEW_LINE> <INDENT> if cur.left is BST.empty: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> latest_left_parent = cur <NEW_LINE> cur = cur.left <NEW_LINE> <DEDENT> <DEDENT> if cur.right is not BST.empty: <NEW_LINE> <INDENT> cur = cur.right <NEW_LINE> while cur.left is not BST.empty: <NEW_LINE> <INDENT> cur = cur.left <NEW_LINE> <DEDENT> return cur.label <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if latest_left_parent is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return latest_left_parent.label
This function takes in a BST and a number N and it returns the smallest element that is greater than N, or None if it has no such element. >>> t = BST(8, BST(3, BST(1), BST(6, BST(4), BST(7))), BST(10, BST.empty, BST(14, BST(13)))) >>> next_element(t, 1) 3 >>> next_element(t, 3) 4 >>> next_element(t, 5) 6 >>> next_element(t, 7) 8 >>> next_element(t, 10) 13 >>> next_element(t, 14) >>> result = [1] >>> a = next_element(t, 1) >>> while a: ... result += [a] ... a = next_element(t, a) >>> result [1, 3, 4, 6, 7, 8, 10, 13, 14]
625941b5fff4ab517eb2f228
def _postfetch(mapper, uowtransaction, table, state, dict_, result, params, value_params, bulk=False): <NEW_LINE> <INDENT> prefetch_cols = result.context.compiled.prefetch <NEW_LINE> postfetch_cols = result.context.compiled.postfetch <NEW_LINE> returning_cols = result.context.compiled.returning <NEW_LINE> if mapper.version_id_col is not None and mapper.version_id_col in mapper._cols_by_table[table]: <NEW_LINE> <INDENT> prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] <NEW_LINE> <DEDENT> refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush) <NEW_LINE> if refresh_flush: <NEW_LINE> <INDENT> load_evt_attrs = [] <NEW_LINE> <DEDENT> if returning_cols: <NEW_LINE> <INDENT> row = result.context.returned_defaults <NEW_LINE> if row is not None: <NEW_LINE> <INDENT> for col in returning_cols: <NEW_LINE> <INDENT> if col.primary_key: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> dict_[mapper._columntoproperty[col].key] = row[col] <NEW_LINE> if refresh_flush: <NEW_LINE> <INDENT> load_evt_attrs.append(mapper._columntoproperty[col].key) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> for c in prefetch_cols: <NEW_LINE> <INDENT> if c.key in params and c in mapper._columntoproperty: <NEW_LINE> <INDENT> dict_[mapper._columntoproperty[c].key] = params[c.key] <NEW_LINE> if refresh_flush: <NEW_LINE> <INDENT> load_evt_attrs.append(mapper._columntoproperty[c].key) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if refresh_flush and load_evt_attrs: <NEW_LINE> <INDENT> mapper.class_manager.dispatch.refresh_flush( state, uowtransaction, load_evt_attrs) <NEW_LINE> <DEDENT> if postfetch_cols: <NEW_LINE> <INDENT> state._expire_attributes(state.dict, [mapper._columntoproperty[c].key for c in postfetch_cols if c in mapper._columntoproperty] ) <NEW_LINE> <DEDENT> for m, equated_pairs in mapper._table_to_equated[table]: <NEW_LINE> <INDENT> if state is None: <NEW_LINE> <INDENT> sync.bulk_populate_inherit_keys(dict_, m, equated_pairs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sync.populate(state, m, state, m, equated_pairs, uowtransaction, mapper.passive_updates)
Expire attributes in need of newly persisted database state, after an INSERT or UPDATE statement has proceeded for that state.
625941b5956e5f7376d70c6b
def interpolate_colors(t, color1, color2): <NEW_LINE> <INDENT> r1, g1, b1 = color1 <NEW_LINE> r2, g2, b2 = color2 <NEW_LINE> return (int((1-t) * r1 + t * r2), int((1-t) * g1 + t * g2), int((1-t) * b1 + t * b2))
Interpolate between color1 (for t == 0.0) and color2 (for t == 1.0).
625941b55166f23b2e1a4f48
def state_invariants(self, state: Sequence[tf.Tensor]) -> List[TensorFluent]: <NEW_LINE> <INDENT> scope = self._scope.state_invariant(self.non_fluents, state) <NEW_LINE> invariants = [] <NEW_LINE> with self.graph.as_default(): <NEW_LINE> <INDENT> with tf.name_scope('state_invariants'): <NEW_LINE> <INDENT> for p in self.rddl.domain.invariants: <NEW_LINE> <INDENT> fluent = self._compile_expression(p, scope) <NEW_LINE> invariants.append(fluent) <NEW_LINE> <DEDENT> return invariants
Compiles the state invarints given current `state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A list of :obj:`rddl2tf.core.fluent.TensorFluent`.
625941b53346ee7daa2b2b58
def keyword_access_check(method): <NEW_LINE> <INDENT> @wraps(method) <NEW_LINE> def wrapper(request, *args, **kwargs): <NEW_LINE> <INDENT> if request.user.is_staff: <NEW_LINE> <INDENT> return method(request, *args, **kwargs) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> keyword = Keyword.objects.get(keyword=kwargs['keyword']) <NEW_LINE> if keyword.is_locked: <NEW_LINE> <INDENT> if not keyword.can_user_access(request.user): <NEW_LINE> <INDENT> messages.warning(request, settings.NO_ACCESS_WARNING) <NEW_LINE> return redirect('/keyword/all/') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except Keyword.DoesNotExist: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return method(request, *args, **kwargs) <NEW_LINE> <DEDENT> return wrapper
Check a user can access a specific keyword.
625941b52eb69b55b151c699
def get_M(self, system='NED', style='n'): <NEW_LINE> <INDENT> return self._matrix_w_style_and_system(self._M, system, style)
Returns the moment tensor in matrix representation. Call with arguments to set ouput in other basis system or in fancy style (to be viewed with 'print')
625941b557b8e32f5248328f
def __lt__(self, other): <NEW_LINE> <INDENT> return self.__ne__(other) and self.pki.id == other.pki.id and self.nid in other.subtree()
Lower than. A node is said to be lower than another if it belongs to that other node's subtree.
625941b5a8370b7717052691
def write(self, line): <NEW_LINE> <INDENT> pass
Send line straight to the server. line may need to be encoded as bytes first. This function is to be implemented by the subclass.
625941b5097d151d1a222c4c
def indent(text, indent): <NEW_LINE> <INDENT> return "\n".join([" " * indent + x for x in text.split("\n")])
return *text(indented by *indent*.
625941b5dc8b845886cb5324
def clear_answer_id_terms(self): <NEW_LINE> <INDENT> raise errors.Unimplemented()
Clears all answer ``Id`` terms. *compliance: mandatory -- This method must be implemented.*
625941b57d43ff24873a2a93
def print_running_jobs(self): <NEW_LINE> <INDENT> print('Running jobs:') <NEW_LINE> now = time.time() <NEW_LINE> for job in self.running_jobs: <NEW_LINE> <INDENT> print(job, 'running for %i seconds' % (now - job.start_time))
When the user presses enter, print the jobs that are currently being executed
625941b5498bea3a759b98a2
def test_grasp_contact(self): <NEW_LINE> <INDENT> rospy.sleep(2) <NEW_LINE> hand = ShadowHand_ROS() <NEW_LINE> for j in hand.allJoints: <NEW_LINE> <INDENT> hand.sendupdate(j.name, 0.0) <NEW_LINE> <DEDENT> rospy.sleep(2) <NEW_LINE> goal = GraspGoal() <NEW_LINE> goal.grasp = self.mk_grasp({ 'LFJ3': 1.4, 'RFJ3': 1.4, 'MFJ3': 1.4, 'FFJ3': 1.4, 'LFJ0': 2.0, 'RFJ0': 2.0, 'MFJ0': 2.0, 'FFJ0': 2.0, }) <NEW_LINE> goal.pre_grasp = False <NEW_LINE> client = SimpleActionClient('grasp', GraspAction) <NEW_LINE> client.wait_for_server() <NEW_LINE> client.send_goal(goal) <NEW_LINE> client.wait_for_result(rospy.Duration.from_sec(20.0)) <NEW_LINE> self.assertEqual(client.get_state(), GoalStatus.SUCCEEDED, "Action did not return in SUCCEEDED state.") <NEW_LINE> rospy.sleep(2) <NEW_LINE> for j in hand.allJoints: <NEW_LINE> <INDENT> hand.sendupdate(j.name, 0.0) <NEW_LINE> <DEDENT> rospy.sleep(2)
Test sending a grasp with object in the way
625941b532920d7e50b27fbb
def load_class(module_name, class_name): <NEW_LINE> <INDENT> return getattr(importlib.import_module(module_name), class_name)
Loads a class from a module. :param module_name: The full name of the module. EX: abr.executors :type module_name: str :param class_name: The name of the class to load from the module. :type class_name: str :returns: The class requested. :rtype: class
625941b5090684286d50ead2
def form_invalid(self, form): <NEW_LINE> <INDENT> super(TalkUpdateView, self).form_invalid(form)
Sobrescreve o metodo form_invalid para o contexto com o formulario preenchido
625941b515baa723493c3d62
def match(tgt, opts=None): <NEW_LINE> <INDENT> if not opts: <NEW_LINE> <INDENT> opts = __opts__ <NEW_LINE> <DEDENT> nodegroups = opts.get('nodegroups', {}) <NEW_LINE> matchers = salt.loader.matchers(opts) <NEW_LINE> minion_id = opts.get('minion_id', __opts__['id']) <NEW_LINE> if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): <NEW_LINE> <INDENT> log.error('Compound target received that is neither string, list nor tuple') <NEW_LINE> return False <NEW_LINE> <DEDENT> log.debug('compound_match: %s ? %s', minion_id, tgt) <NEW_LINE> ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', 'J': 'pillar_pcre', 'L': 'list', 'N': None, 'S': 'ipcidr', 'E': 'pcre'} <NEW_LINE> if HAS_RANGE: <NEW_LINE> <INDENT> ref['R'] = 'range' <NEW_LINE> <DEDENT> results = [] <NEW_LINE> opers = ['and', 'or', 'not', '(', ')'] <NEW_LINE> if isinstance(tgt, six.string_types): <NEW_LINE> <INDENT> words = tgt.split() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> words = tgt[:] <NEW_LINE> <DEDENT> while words: <NEW_LINE> <INDENT> word = words.pop(0) <NEW_LINE> target_info = salt.utils.minions.parse_target(word) <NEW_LINE> if word in opers: <NEW_LINE> <INDENT> if results: <NEW_LINE> <INDENT> if results[-1] == '(' and word in ('and', 'or'): <NEW_LINE> <INDENT> log.error('Invalid beginning operator after "(": %s', word) <NEW_LINE> return False <NEW_LINE> <DEDENT> if word == 'not': <NEW_LINE> <INDENT> if not results[-1] in ('and', 'or', '('): <NEW_LINE> <INDENT> results.append('and') <NEW_LINE> <DEDENT> <DEDENT> results.append(word) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if word not in ['(', 'not']: <NEW_LINE> <INDENT> log.error('Invalid beginning operator: %s', word) <NEW_LINE> return False <NEW_LINE> <DEDENT> results.append(word) <NEW_LINE> <DEDENT> <DEDENT> elif target_info and target_info['engine']: <NEW_LINE> <INDENT> if 'N' == target_info['engine']: <NEW_LINE> <INDENT> decomposed = salt.utils.minions.nodegroup_comp(target_info['pattern'], nodegroups) <NEW_LINE> if decomposed: <NEW_LINE> <INDENT> words = decomposed + words <NEW_LINE> <DEDENT> continue <NEW_LINE> <DEDENT> engine = ref.get(target_info['engine']) <NEW_LINE> if not engine: <NEW_LINE> <INDENT> log.error( 'Unrecognized target engine "%s" for target ' 'expression "%s"', target_info['engine'], word ) <NEW_LINE> return False <NEW_LINE> <DEDENT> engine_args = [target_info['pattern']] <NEW_LINE> engine_kwargs = {} <NEW_LINE> if target_info['delimiter']: <NEW_LINE> <INDENT> engine_kwargs['delimiter'] = target_info['delimiter'] <NEW_LINE> <DEDENT> results.append( six.text_type(matchers['{0}_match.match'.format(engine)](*engine_args, **engine_kwargs)) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> results.append(six.text_type(matchers['glob_match.match'](word))) <NEW_LINE> <DEDENT> <DEDENT> results = ' '.join(results) <NEW_LINE> log.debug('compound_match %s ? "%s" => "%s"', minion_id, tgt, results) <NEW_LINE> try: <NEW_LINE> <INDENT> return eval(results) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> log.error( 'Invalid compound target: %s for results: %s', tgt, results) <NEW_LINE> return False <NEW_LINE> <DEDENT> return False
Runs the compound target check
625941b5656771135c3eb662
def setup(self, sctx): <NEW_LINE> <INDENT> dof_numbers, self.dof_X = self.get_dof_method() <NEW_LINE> self.dof_numbers = dof_numbers[:, tuple(self.dims)] <NEW_LINE> if self.get_link_dof_method: <NEW_LINE> <INDENT> link_dofs, linked_dof_X = self.get_link_dof_method() <NEW_LINE> if len(self.link_dims) == 0: <NEW_LINE> <INDENT> self.link_dims = self.dims <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if len(self.dims) != len(self.link_dims): <NEW_LINE> <INDENT> raise IndexError('incompatible dim specification (%d != %d' % (len(self.dims), len(self.link_dims))) <NEW_LINE> <DEDENT> <DEDENT> link_dofs_arr = link_dofs[:, tuple(self.link_dims)] <NEW_LINE> self.bcdof_list = [BCDof(var=self.var, dof=dof, value=self.value, link_dofs=[ldof], link_coeffs=self.link_coeffs, time_function=self.time_function) for dof, ldof in zip(self.dof_numbers.flatten(), link_dofs_arr.flatten())] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.bcdof_list = [BCDof(var=self.var, dof=dof, value=self.value, link_dofs=self.link_dofs, link_coeffs=self.link_coeffs, time_function=self.time_function) for dof in self.dof_numbers.flatten()] <NEW_LINE> <DEDENT> return
Locate the spatial context.
625941b52eb69b55b151c69a
def shape_from_args(self): <NEW_LINE> <INDENT> return ku.shape(self.key, self._orig_key, self.args[0].shape)
Returns the shape of the index expression.
625941b530c21e258bdfa28c
def setSaveSettings(self, state): <NEW_LINE> <INDENT> self._save_settings=state
Used when i don't want my columns with being saved
625941b54c3428357757c11b
def process_s3_event(event, context): <NEW_LINE> <INDENT> logger.debug("event:%s" % event) <NEW_LINE> logger.debug("context:%s" % context) <NEW_LINE> bucket = event['Records'][0]['s3']['bucket']['name'] <NEW_LINE> key = event['Records'][0]['s3']['object']['key'] <NEW_LINE> if key.split('.')[-1].lower() not in config.ALLOWED_EXTENTIONS: <NEW_LINE> <INDENT> raise NotImplementedError('Unsupported file extention in %s:%s' % (bucket, key)) <NEW_LINE> <DEDENT> s3_client = boto3.client('s3') <NEW_LINE> logger.info("loading file %s:%s to /tmp/%s" % (bucket, key, key)) <NEW_LINE> s3_client.download_file(bucket, key, '/tmp/' + key) <NEW_LINE> original_image_data = Image.open('/tmp/' + key) <NEW_LINE> for prefix, width, height, ext in config.IMAGE_CONVERSIONS: <NEW_LINE> <INDENT> resize_data = resize_image(original_image_data, width, height, ext) <NEW_LINE> new_file_key = ".".join(key.split('.')[:-1]) + ".%s" % ext <NEW_LINE> save_to_s3('%s/%s' % (prefix, new_file_key), resize_data) <NEW_LINE> <DEDENT> pass
Process the new s3 event
625941b56aa9bd52df036b92
def bitwiseComplement(self, N): <NEW_LINE> <INDENT> return 2 ** (len(bin(N)) - 2) - N - 1
:type N: int :rtype: int
625941b58da39b475bd64d66
def __init__(self, profile=None, addons=None, addon_manifests=None, apps=None, preferences=None, locations=None, proxy=None, restore=True): <NEW_LINE> <INDENT> self._addons = addons <NEW_LINE> self._addon_manifests = addon_manifests <NEW_LINE> self._apps = apps <NEW_LINE> self._locations = locations <NEW_LINE> self._proxy = proxy <NEW_LINE> if preferences: <NEW_LINE> <INDENT> if isinstance(preferences, dict): <NEW_LINE> <INDENT> preferences = preferences.items() <NEW_LINE> <DEDENT> assert not [i for i in preferences if len(i) != 2] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> preferences = [] <NEW_LINE> <DEDENT> self._preferences = preferences <NEW_LINE> self.create_new = not profile <NEW_LINE> if profile: <NEW_LINE> <INDENT> self.profile = os.path.abspath(os.path.expanduser(profile)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.profile = tempfile.mkdtemp(suffix='.mozrunner') <NEW_LINE> <DEDENT> self.restore = restore <NEW_LINE> self._internal_init()
:param profile: Path to the profile :param addons: String of one or list of addons to install :param addon_manifests: Manifest for addons (see http://bit.ly/17jQ7i6) :param apps: Dictionary or class of webapps to install :param preferences: Dictionary or class of preferences :param locations: ServerLocations object :param proxy: Setup a proxy :param restore: Flag for removing all custom settings during cleanup
625941b5a05bb46b383ec61e
def get( self, resource_group_name, cross_connection_name, **kwargs ): <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> api_version = "2019-08-01" <NEW_LINE> accept = "application/json" <NEW_LINE> url = self.get.metadata['url'] <NEW_LINE> path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> header_parameters = {} <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> raise HttpResponseError(response=response, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response) <NEW_LINE> if cls: <NEW_LINE> <INDENT> return cls(pipeline_response, deserialized, {}) <NEW_LINE> <DEDENT> return deserialized
Gets details about the specified ExpressRouteCrossConnection. :param resource_group_name: The name of the resource group (peering location of the circuit). :type resource_group_name: str :param cross_connection_name: The name of the ExpressRouteCrossConnection (service key of the circuit). :type cross_connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRouteCrossConnection, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.ExpressRouteCrossConnection :raises: ~azure.core.exceptions.HttpResponseError
625941b5be7bc26dc91cd3f6
def test_comment_can_be_deleted(self): <NEW_LINE> <INDENT> create_test_account(self) <NEW_LINE> login_test_account(self) <NEW_LINE> char_id = upload_test_paste(self) <NEW_LINE> response = self.client.post(reverse("comments:add_comment"), {"char_id": char_id, "text": "This is a test comment"}) <NEW_LINE> response = json.loads(response.content) <NEW_LINE> self.assertEqual(response["status"], "success") <NEW_LINE> comment_id = response["data"]["comments"][0]["id"] <NEW_LINE> response = self.client.post(reverse("comments:delete_comment"), {"char_id": char_id, "id": comment_id, "page": 0}) <NEW_LINE> response = json.loads(response.content) <NEW_LINE> self.assertEqual(response["status"], "success") <NEW_LINE> response = self.client.post(reverse("comments:get_comments"), {"char_id": char_id, "page": 0}) <NEW_LINE> response = json.loads(response.content) <NEW_LINE> self.assertEqual(response["status"], "success") <NEW_LINE> self.assertEqual(len(response["data"]["comments"]), 0)
Post a comment and then delete it
625941b59f2886367277a681
def objective_func(run_model, decision_vars): <NEW_LINE> <INDENT> total_deaths = sum(run_model.derived_outputs["infection_deaths"]) <NEW_LINE> max_hospital = max(run_model.derived_outputs["hospital_occupancy"]) <NEW_LINE> closing_gap = decision_vars[-1] <NEW_LINE> return [total_deaths, max_hospital, closing_gap]
Calculates multiple objectives :return: list of objectives
625941b556b00c62f0f1444c
def filter_by_perms(solrq, user): <NEW_LINE> <INDENT> cmodels = searchable_cmodels(user) <NEW_LINE> if cmodels: <NEW_LINE> <INDENT> solrq = solrq.filter(reduce(operator.or_, [solrq.Q(content_model=cm) for cm in cmodels])) <NEW_LINE> <DEDENT> if not user.has_perm('audio.view_audio') and user.has_perm('audio.view_researcher_audio'): <NEW_LINE> <INDENT> cm_query = solrq.Q(solrq.Q(content_model=AudioObject.AUDIO_CONTENT_MODEL) | solrq.Q(content_model=Video.VIDEO_CONTENT_MODEL)) <NEW_LINE> solrq = solrq.filter(cm_query).filter(has_access_copy=True, researcher_access=True) <NEW_LINE> <DEDENT> return solrq
Filter a solr query to return only those content models the specified user has permission to view. :param solrq: sunburnt solr query object :param user: instance of :class:`~django.contrib.auth.models.User` to use for checking permissions :returns: filtered solr query
625941b57b25080760e3924b
def pop_feature(self): <NEW_LINE> <INDENT> if self.__feature_size <= 0: <NEW_LINE> <INDENT> return defines.ReturnCode.FAIL <NEW_LINE> <DEDENT> self.__feature_size -= 1 <NEW_LINE> del self.__feature_id_dict[self.__feature_list[self.__feature_size].id] <NEW_LINE> del self.__feature_list[self.__feature_size] <NEW_LINE> return defines.ReturnCode.SUCC
delete feature from tail
625941b563d6d428bbe442df
def generate_psf(self): <NEW_LINE> <INDENT> self.parameters["sigma_pixel"] = self.parameters["psf_size"] / self.parameters["pixel_size"] <NEW_LINE> kernel_size_pixel = int(self.parameters["sigma_pixel"] * 10) <NEW_LINE> gaussian_kernel_1d = signal.gaussian(kernel_size_pixel, std=self.parameters["sigma_pixel"]) <NEW_LINE> gaussian_kernel_1d = gaussian_kernel_1d.reshape(kernel_size_pixel, 1) <NEW_LINE> self.psf = np.outer(gaussian_kernel_1d, gaussian_kernel_1d)
Generate a PSF from a Gaussian.
625941b5d99f1b3c44c6738e
def factorial(n): <NEW_LINE> <INDENT> if n == 0: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return n * factorial(n-1)
Return n! n! = n x (n−1)! the factorial of any number is that number times the factorial of (that number minus 1)
625941b54527f215b584c24b
def __init__(self, ai_settings, screen): <NEW_LINE> <INDENT> self.screen = screen <NEW_LINE> self.ai_settings = ai_settings <NEW_LINE> self.image = pygame.image.load("images/ship.bmp") <NEW_LINE> self.rect = self.image.get_rect() <NEW_LINE> self.screen_rect = screen.get_rect() <NEW_LINE> self.rect.centerx = self.screen_rect.centerx <NEW_LINE> self.rect.bottom = self.screen_rect.bottom <NEW_LINE> self.center = float(self.rect.centerx) <NEW_LINE> self.moving_right = False <NEW_LINE> self.moving_left = False
初始化飞船并设置其的初始位置
625941b530c21e258bdfa28d
def SetTopLevel(self,*args): <NEW_LINE> <INDENT> pass
SetTopLevel(self: Control,value: bool) Sets the control as the top-level control. value: true to set the control as the top-level control; otherwise,false.
625941b545492302aab5e0af
def check_subset(a1, a2): <NEW_LINE> <INDENT> return set(a2).issubset(a1)
O(n) method that converts to a2 to a set and checks for subset how could you do better than O(n)? you have to iterate all the elements of a2 at the very least
625941b5e5267d203edcda91
def _walkDir(path, recurse, funcArgs, kw): <NEW_LINE> <INDENT> for f in os.listdir(path): <NEW_LINE> <INDENT> pathname = '%s/%s' % (path, f) <NEW_LINE> mode = os.stat(pathname)[ST_MODE] <NEW_LINE> if S_ISDIR(mode): <NEW_LINE> <INDENT> if recurse: <NEW_LINE> <INDENT> recurse -= 1 <NEW_LINE> if not dirFunc: <NEW_LINE> <INDENT> _walkDir(pathname, recurse, funcArgs, kw) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dirFunc(pathname, lambda *args, **kw: _walkDir(pathname, recurse, args, kw), *funcArgs, **kw) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif S_ISREG(mode): <NEW_LINE> <INDENT> if fileFunc: <NEW_LINE> <INDENT> fileFunc(pathname, f, *funcArgs, **kw) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise 'unexpected file type: %s' % pathname
recursively descend the directory rooted at dir
625941b599fddb7c1c9de183
def replace_species(self, species_mapping: Dict[str, str]): <NEW_LINE> <INDENT> species_mapping = {get_el_sp(k): v for k, v in species_mapping.items()} <NEW_LINE> sp_to_replace = set(species_mapping.keys()) <NEW_LINE> sp_in_structure = set(self.composition.keys()) <NEW_LINE> if not sp_in_structure.issuperset(sp_to_replace): <NEW_LINE> <INDENT> warnings.warn( "Some species to be substituted are not present in " "structure. Pls check your input. Species to be " "substituted = %s; Species in structure = %s" % (sp_to_replace, sp_in_structure)) <NEW_LINE> <DEDENT> for site in self.sites: <NEW_LINE> <INDENT> if sp_to_replace.intersection(site.species): <NEW_LINE> <INDENT> c = Composition() <NEW_LINE> for sp, amt in site.species.items(): <NEW_LINE> <INDENT> new_sp = species_mapping.get(sp, sp) <NEW_LINE> try: <NEW_LINE> <INDENT> c += Composition(new_sp) * amt <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> c += {new_sp: amt} <NEW_LINE> <DEDENT> <DEDENT> site.species = c
Swap species. Args: species_mapping (dict): dict of species to swap. Species can be elements too. E.g., {Element("Li"): Element("Na")} performs a Li for Na substitution. The second species can be a sp_and_occu dict. For example, a site with 0.5 Si that is passed the mapping {Element('Si): {Element('Ge'):0.75, Element('C'):0.25} } will have .375 Ge and .125 C.
625941b56e29344779a62406
def get_form_kwargs(self): <NEW_LINE> <INDENT> kwargs = super(SubmitProblemView, self).get_form_kwargs() <NEW_LINE> kwargs['request'] = self.request <NEW_LINE> return kwargs
This is the most convoluted way to go about this...
625941b50383005118ecf3d5
def directConnectPath(*args, **kwargs): <NEW_LINE> <INDENT> pass
Derived from mel command `maya.cmds.directConnectPath`
625941b555399d3f055884a4
def start(self, loop): <NEW_LINE> <INDENT> self.server = loop.run_until_complete( asyncio.streams.start_server(self._accept_client, '127.0.0.1', 12345, loop=loop))
Starts the TCP server, so that it listens on port 12345. For each client that connects, the accept_client method gets called. This method runs the loop until the server sockets are ready to accept connections.
625941b51f5feb6acb0c4946
def get_cluster_internal(cluster_id, session=None, **kwargs): <NEW_LINE> <INDENT> return _get_cluster(cluster_id, session=session, **kwargs)
Helper function to get cluster. Should be only used by other files under db/api.
625941b57d847024c06be0b0
def translate(self, digits: str, css: str) -> Tuple[str, str]: <NEW_LINE> <INDENT> def discard_digits(digits: str) -> str: <NEW_LINE> <INDENT> dot_pos = self.pattern.find('.') <NEW_LINE> if dot_pos == -1: <NEW_LINE> <INDENT> raise TranslationError(f'discard PreDot requires "." in pattern: {self}') <NEW_LINE> <DEDENT> return digits[dot_pos:] <NEW_LINE> <DEDENT> def str_to_set_list(digits: str) -> List[Set[str]]: <NEW_LINE> <INDENT> digits = iter(digits) <NEW_LINE> result = [] <NEW_LINE> for digit in digits: <NEW_LINE> <INDENT> if digit == '[': <NEW_LINE> <INDENT> dset = set() <NEW_LINE> p_digit = None <NEW_LINE> digit = next(digits) <NEW_LINE> while digit != ']': <NEW_LINE> <INDENT> if digit == '-': <NEW_LINE> <INDENT> digit = next(digits) <NEW_LINE> for o in range(ord(p_digit), ord(digit) + 1): <NEW_LINE> <INDENT> dset.add(chr(o)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> dset.add(digit) <NEW_LINE> p_digit = digit <NEW_LINE> <DEDENT> digit = next(digits) <NEW_LINE> <DEDENT> result.append(dset) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result.append({digit}) <NEW_LINE> <DEDENT> <DEDENT> return result <NEW_LINE> <DEDENT> def set_list_to_str(dset_list: List[Set[str]]) -> str: <NEW_LINE> <INDENT> result = '' <NEW_LINE> for dset in dset_list: <NEW_LINE> <INDENT> if not dset: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if len(dset) == 1: <NEW_LINE> <INDENT> append = next(iter(dset)) <NEW_LINE> if not append: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> append = ''.join(sorted(dset)) <NEW_LINE> if append == '0123456789': <NEW_LINE> <INDENT> append = 'X' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> append = f'[{append}]' <NEW_LINE> <DEDENT> <DEDENT> result = f'{result}{append}' <NEW_LINE> <DEDENT> return result <NEW_LINE> <DEDENT> new_digits = str_to_set_list(digits) <NEW_LINE> if self.use_originators_calling_search_space: <NEW_LINE> <INDENT> new_css = css <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_css = ':'.join(self.css) <NEW_LINE> <DEDENT> if self.discard_digits: <NEW_LINE> <INDENT> new_digits = discard_digits(new_digits) <NEW_LINE> <DEDENT> if self.called_party_prefix_digits: <NEW_LINE> <INDENT> new_digits = [{d} for d in self.called_party_prefix_digits] + new_digits <NEW_LINE> <DEDENT> if self.route_next_hop_by_calling_party_number: <NEW_LINE> <INDENT> raise NotImplementedError <NEW_LINE> <DEDENT> if self.called_party_mask: <NEW_LINE> <INDENT> masked = list(d if m == 'X' else {m} for m, d in zip_longest(self.called_party_mask[::-1], new_digits[::-1], fillvalue='')) <NEW_LINE> new_digits = masked[::-1] <NEW_LINE> <DEDENT> new_digits = set_list_to_str(new_digits) <NEW_LINE> log.debug(f'{self} translate {digits}->{new_digits}') <NEW_LINE> return new_digits, new_css
Apply translation to given digit string :param digits: digit string :param css: activating css :return: Tuple[digit string, css for secondary lookup]
625941b5be8e80087fb20a3f
def schema(schema_id): <NEW_LINE> <INDENT> def decorator(handler): <NEW_LINE> <INDENT> handler.__schema_id__ = schema_id <NEW_LINE> return handler <NEW_LINE> <DEDENT> return decorator
Decorator to assign a schema name to an endpoint handler.
625941b5293b9510aa2c3089
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10): <NEW_LINE> <INDENT> s = hex_to_RGB(start_hex) <NEW_LINE> f = hex_to_RGB(finish_hex) <NEW_LINE> RGB_list = [s] <NEW_LINE> for t in range(1, n): <NEW_LINE> <INDENT> curr_vector = [ int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3) ] <NEW_LINE> RGB_list.append(curr_vector) <NEW_LINE> <DEDENT> return color_dict(RGB_list)
returns a gradient list of (n) colors between two hex colors. start_hex and finish_hex should be the full six-digit color string, inlcuding the number sign ("#FFFFFF")
625941b585dfad0860c3ac49
def get_matches(to_search_for, search_in): <NEW_LINE> <INDENT> groups = Counter(search_in) <NEW_LINE> matches = Counter() <NEW_LINE> for row in sorted(to_search_for): <NEW_LINE> <INDENT> for key in surrounding(row): <NEW_LINE> <INDENT> if groups[key]: <NEW_LINE> <INDENT> matches[row] += 1 <NEW_LINE> groups[key] -= 1 <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return matches
Return dictionary of each row and its successful matches.
625941b563f4b57ef0000f14
def find_app_dojo_dir_and_url(app_name): <NEW_LINE> <INDENT> ret = [] <NEW_LINE> media_dir = find_app_dojo_dir(app_name) <NEW_LINE> if not media_dir: return None <NEW_LINE> for d in listdir(media_dir): <NEW_LINE> <INDENT> if path.isdir(safe_join(media_dir, d)): <NEW_LINE> <INDENT> if d not in ("src", "release") and not d.startswith("."): <NEW_LINE> <INDENT> ret.append(tuple([safe_join(media_dir, d), "%(module)s" % { 'module': d }])) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return tuple(ret)
Returns a list of tuples of dojo modules within an apps 'dojo-media' directory. Each tuple contains the abspath to the module directory and the module name.
625941b5f7d966606f6a9df9
def ieq_cons(z,x,R,s_,Vf,Para): <NEW_LINE> <INDENT> S = Para.P.shape[0] <NEW_LINE> c1 = z[0:S] <NEW_LINE> c2 = z[S:2*S] <NEW_LINE> Rprime,_ = ComputeR(c1,c2,0,0,Para) <NEW_LINE> l1,_,l2,_ = Computel(c1,0,c2,0,Rprime,0,Para) <NEW_LINE> xprime,_ = ComputeXprime(c1,0,c2,0,Rprime,0,l1,0,l2,0,x,s_,Para) <NEW_LINE> return np.hstack((xprime-Para.xmin,Para.xmax-xprime,Rprime-Para.Rmin,Para.Rmax-Rprime))
Computes the inequality constraints associated with the constrained optization. Namely those associated with R and xprime
625941b5e64d504609d74631
def test_replace_clash(self): <NEW_LINE> <INDENT> m3 = Merge3( [b"aaa", b"000", b"bbb"], [b"aaa", b"111", b"bbb"], [b"aaa", b"222", b"bbb"] ) <NEW_LINE> self.assertEqual(m3.find_unconflicted(), [(0, 1), (2, 3)]) <NEW_LINE> self.assertEqual( list(m3.find_sync_regions()), [(0, 1, 0, 1, 0, 1), (2, 3, 2, 3, 2, 3), (3, 3, 3, 3, 3, 3)], )
Both try to insert lines in the same place.
625941b5a79ad161976cbf36
def run(self): <NEW_LINE> <INDENT> base_infos = self.scan_base() <NEW_LINE> index_infos = self.read_index() <NEW_LINE> remote_infos = self.get_fileinfomap() <NEW_LINE> local_infos = {} <NEW_LINE> all_files = set().union(base_infos, index_infos, remote_infos) <NEW_LINE> for file_name in all_files: <NEW_LINE> <INDENT> index_info = index_infos.get(file_name, [0, []]) <NEW_LINE> base_info = base_infos.get(file_name, [0, []]) <NEW_LINE> remote_info = remote_infos.get(file_name, [0, []]) <NEW_LINE> if remote_info == index_info and remote_info[1] and not base_info[1]: <NEW_LINE> <INDENT> self.delete(file_name, index_info[0] + 1) <NEW_LINE> local_infos[file_name] = [index_info[0] + 1, []] <NEW_LINE> <DEDENT> elif remote_info == index_info and base_info[1] != index_info[1]: <NEW_LINE> <INDENT> base_info[0] = index_info[0] + 1 <NEW_LINE> assert self.upload(file_name, base_info) <NEW_LINE> local_infos[file_name] = base_info <NEW_LINE> <DEDENT> elif remote_info != index_info: <NEW_LINE> <INDENT> if not remote_info[1] and base_info[1]: <NEW_LINE> <INDENT> os.remove(os.path.join(self.base_dir, file_name)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.download(file_name) <NEW_LINE> <DEDENT> local_infos[file_name] = remote_info <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> local_infos[file_name] = index_info <NEW_LINE> <DEDENT> <DEDENT> self.update_index(local_infos) <NEW_LINE> return
Assume server and base do not change when running
625941b571ff763f4b54947f
def check_ast_errors(error): <NEW_LINE> <INDENT> if len(error) > 0: <NEW_LINE> <INDENT> for err in error: <NEW_LINE> <INDENT> msg = '%s: %s' % (err.get_error()) <NEW_LINE> return msg <NEW_LINE> <DEDENT> <DEDENT> return None
check for errors derived from asteval, raise MinimizerException
625941b566656f66f7cbbf9a
def cri_from_knx(cri): <NEW_LINE> <INDENT> if cri[0] != ConnectRequest.CRI_LENGTH: <NEW_LINE> <INDENT> raise CouldNotParseKNXIP("CRI has wrong length") <NEW_LINE> <DEDENT> if len(cri) < ConnectRequest.CRI_LENGTH: <NEW_LINE> <INDENT> raise CouldNotParseKNXIP("CRI data has wrong length") <NEW_LINE> <DEDENT> self.request_type = ConnectRequestType(cri[1]) <NEW_LINE> self.flags = cri[2] <NEW_LINE> return 4
Parse CRI (Connection Request Information).
625941b5d164cc6175782b3e
def create_payload(self): <NEW_LINE> <INDENT> payload = super(HostCollection, self).create_payload() <NEW_LINE> if 'system_ids' in payload: <NEW_LINE> <INDENT> payload['system_uuids'] = payload.pop('system_ids') <NEW_LINE> <DEDENT> return payload
Rename ``system_ids`` to ``system_uuids``.
625941b516aa5153ce362268
def posWeights(alg, seqw=1, lbda=0, freq0 = np.array([.073, .025, .050, .061, .042, .072, .023, .053, .064, .089,.023, .043, .052, .040, .052, .073, .056, .063, .013, .033])): <NEW_LINE> <INDENT> N_seq, N_pos = alg.shape; N_aa = 20 <NEW_LINE> if type(seqw) == int and seqw == 1: seqw = np.ones((1,N_seq)) <NEW_LINE> freq1, freq2, freq0 = freq(alg, Naa=20, seqw=seqw, lbda=lbda, freq0=freq0) <NEW_LINE> theta = 1 - freq1.sum()/N_pos <NEW_LINE> freqg0 = (1-theta)*freq0 <NEW_LINE> freq0v = np.tile(freq0,N_pos) <NEW_LINE> iok = [i for i in range(N_pos*N_aa) if (freq1[i]>0 and freq1[i]<1)] <NEW_LINE> Wia = np.zeros(N_pos*N_aa) <NEW_LINE> Wia[iok] = abs(np.log((freq1[iok]*(1-freq0v[iok]))/((1-freq1[iok])*freq0v[iok]))) <NEW_LINE> Dia = np.zeros(N_pos*N_aa) <NEW_LINE> Dia[iok] = freq1[iok]*np.log(freq1[iok]/freq0v[iok]) + (1-freq1[iok])*np.log((1-freq1[iok])/(1-freq0v[iok])) <NEW_LINE> Di = np.zeros(N_pos) <NEW_LINE> for i in range(N_pos): <NEW_LINE> <INDENT> freq1i = freq1[N_aa*i: N_aa*(i+1)] <NEW_LINE> aok = [a for a in range(N_aa) if (freq1i[a]>0 and freq1i[a]<1)] <NEW_LINE> flogf = freq1i[aok]*np.log(freq1i[aok]/freqg0[aok]) <NEW_LINE> Di[i] = flogf.sum() <NEW_LINE> freqgi = 1 - freq1i.sum() <NEW_LINE> if freqgi > 0: Di[i] += freqgi*np.log(freqgi/theta) <NEW_LINE> <DEDENT> return Wia, Dia, Di
Compute single-site measures of conservation, and the sca position weights, :math:`\frac {\partial {D_i^a}}{\partial {f_i^a}}` **Arguments:** - `alg` = MSA, dimensions MxL, converted to numerical representation with lett2num_ - `seqw` = a vector of M sequence weights (default is uniform weighting) - `lbda` = pseudo-counting frequencies, default is no pseudocounts - `freq0` = background amino acid frequencies :math:`q_i^a` **Returns:** - `Wia` = positional weights from the derivation of a relative entropy, :math:`\frac {\partial {D_i^a}}{\partial {f_i^a}}` (Lx20) - `Dia` = the relative entropy per position and amino acid (Lx20) - `Di` = the relative entropy per position (L) :Example: >>> Wia, Dia, Di = posWeights(alg, seqw=1,freq0)
625941b55166f23b2e1a4f49
def getOperationalLimitValue(self): <NEW_LINE> <INDENT> return self._OperationalLimitValue
Values of equipment limits.
625941b591af0d3eaac9b804
def test_var_initialization(self): <NEW_LINE> <INDENT> model = State() <NEW_LINE> self.assertIsInstance(model.created_at, datetime)
Check default type
625941b5c432627299f04a35
def computeArea2(self, ax1, ay1, ax2, ay2, bx1, by1, bx2, by2): <NEW_LINE> <INDENT> area1 = (ax2 - ax1) * (ay2 - ay1) <NEW_LINE> area2 = (bx2 - bx1) * (by2 - by1) <NEW_LINE> ix1 = max(ax1, bx1) <NEW_LINE> iy1 = max(ay1, by1) <NEW_LINE> ix2 = min(ax2, bx2) <NEW_LINE> iy2 = min(ay2, by2) <NEW_LINE> iarea = max(0, ix2 - ix1) * max(0, iy2 - iy1) <NEW_LINE> return area1 + area2 - iarea
:type ax1: int :type ay1: int :type ax2: int :type ay2: int :type bx1: int :type by1: int :type bx2: int :type by2: int :rtype: int
625941b5ec188e330fd5a598
def testGetSetters1(self): <NEW_LINE> <INDENT> self.genericAggregateTest(['x', 'y'])
Test for nonexistent aggregate position == (x, y)
625941b55510c4643540f1ea
def get_examples(ds_data, network, parents, verbose=1, **params): <NEW_LINE> <INDENT> classes = params.setdefault('classes', [-1,0,1]) <NEW_LINE> target = params.setdefault('target', int(1.2e6)) <NEW_LINE> slice_len = params.setdefault('slice_len', 330) <NEW_LINE> assert not target % len(classes) <NEW_LINE> G = np.mean(ds_data, axis=0) <NEW_LINE> examples = np.zeros((target, 5, slice_len, 1)) <NEW_LINE> labels = np.zeros((target, len(classes))) <NEW_LINE> count = 0 <NEW_LINE> if verbose > 0: <NEW_LINE> <INDENT> print('Generating {} training examples'.format(target)) <NEW_LINE> bar = pb.ProgressBar(max_value=target, widgets=[pb.Percentage(), ' - ', pb.Bar(), ' - ', pb.ETA()]) <NEW_LINE> <DEDENT> for c in classes: <NEW_LINE> <INDENT> pairs = np.argwhere(network == c) <NEW_LINE> reps = int(target/len(classes)/pairs.shape[0]) + 1 <NEW_LINE> pair_idx = np.repeat(np.arange(pairs.shape[0]), reps) <NEW_LINE> pair_idx = np.random.permutation(pair_idx)[:target//len(classes)] <NEW_LINE> start_idx = np.random.randint( 0, ds_data.shape[1]-slice_len, size=target//len(classes)) <NEW_LINE> for i in range(pair_idx.size): <NEW_LINE> <INDENT> n1 = pairs[pair_idx[i]][0] <NEW_LINE> n2 = pairs[pair_idx[i]][1] <NEW_LINE> assert(network[n1,n2] == c) <NEW_LINE> start = start_idx[i] <NEW_LINE> end = start + slice_len <NEW_LINE> p1 = np.mean(ds_data[parents[n1], start:end], axis=0) <NEW_LINE> p2 = np.mean(ds_data[parents[n2], start:end], axis=0) <NEW_LINE> examples[count,:,:,0] = np.vstack(( p1, ds_data[n1][start:end], G[start:end], ds_data[n2][start:end], p2 )) <NEW_LINE> labels[count,:] = np.equal(classes, c, dtype=np.int32) <NEW_LINE> if verbose > 0: <NEW_LINE> <INDENT> bar.update(count) <NEW_LINE> <DEDENT> count +=1 <NEW_LINE> <DEDENT> <DEDENT> if verbose > 0: <NEW_LINE> <INDENT> bar.finish() <NEW_LINE> print( 'Generated examples of shape:', examples.shape, '\nGenerated labels of shape:', labels.shape, '\nThere are {} classes: {}'.format(len(classes), classes) ) <NEW_LINE> <DEDENT> assert not np.isnan(examples).any() <NEW_LINE> return examples, labels
Generates a balanced set of training examples from a single dataset. Args: ds_data: Downsampled spike or fluorescence data in shape (neurons, timesteps). network: Adjacency matrix representing the true connections of the neurons in the dataset. Shape (neurons, neurons). parents: Dict of indices indicating the strongest drivers of each neuron as estimated by GTE. verbose: Control what gets printed to the console. **classes: List of connection class labels, as integers. Default is [-1, 0, 1] for inhibitory, none, and excitatory connection respectively. **target: Total number of examples to generate from this dataset. **slice_len: Length of time series slice used to generate examples. Returns: examples: Array of training examples, shape (target, 5, slice_len, 1). labels: Array of training labels, shape (target, # of classes).
625941b58a43f66fc4b53e5b
def Equals(self,obj): <NEW_LINE> <INDENT> pass
Equals(self: GuidEnum,obj: object) -> bool Compares two Guid-based enum object based on their concrete class and GUID value.
625941b5ac7a0e7691ed3ecb
def generate_password(self): <NEW_LINE> <INDENT> import_path, args, kwargs = settings.TICKETOFFICE_PASSWORD_GENERATOR <NEW_LINE> generator = import_member(import_path) <NEW_LINE> clear_password = generator(*args, **kwargs) <NEW_LINE> self.set_password(clear_password) <NEW_LINE> return clear_password
Generate password, set :py:attr:`password` and return clear value. Uses ``settings.TICKETOFFICE_PASSWORD_GENERATOR``. Does not save the instance.
625941b5d53ae8145f87a068
def length_of_longest_substring(s): <NEW_LINE> <INDENT> idx = 0 <NEW_LINE> cache ={} <NEW_LINE> maxlength = 0 <NEW_LINE> current_length = 0 <NEW_LINE> start_pos = 0 <NEW_LINE> while idx < len(s): <NEW_LINE> <INDENT> if s[idx] not in cache: <NEW_LINE> <INDENT> cache[s[idx]] = idx <NEW_LINE> current_length += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> maxlength = max(current_length, maxlength) <NEW_LINE> next_start = cache[s[idx]]+1 <NEW_LINE> for i in range(start_pos, cache[s[idx]]+1): <NEW_LINE> <INDENT> del cache[s[i]] <NEW_LINE> current_length -= 1 <NEW_LINE> <DEDENT> start_pos = next_start <NEW_LINE> cache[s[idx]] = idx <NEW_LINE> current_length += 1 <NEW_LINE> <DEDENT> idx += 1 <NEW_LINE> <DEDENT> maxlength = max(current_length, maxlength) <NEW_LINE> return maxlength
Given a string, find the length of the longest consecutive substring without repeating characters. :type s: str :rtype: int
625941b52ae34c7f2600cf22
def isDifferent(self, vec2): <NEW_LINE> <INDENT> checkVector(self, vec2) <NEW_LINE> futures = self.client.map(_call_isDifferent, self.vecDask, vec2.vecDask, pure=False) <NEW_LINE> results = self.client.gather(futures) <NEW_LINE> return any(results)
Function to check if two vectors are identical
625941b5b830903b967e9709
def svm_loss_naive(W, X, y, reg): <NEW_LINE> <INDENT> dW = np.zeros(W.shape) <NEW_LINE> num_classes = W.shape[1] <NEW_LINE> num_train = X.shape[0] <NEW_LINE> loss = 0.0 <NEW_LINE> for i in range(num_train): <NEW_LINE> <INDENT> scores = X[i].dot(W) <NEW_LINE> correct_class_score = scores[y[i]] <NEW_LINE> for j in range(num_classes): <NEW_LINE> <INDENT> if j == y[i]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> margin = scores[j] - correct_class_score + 1 <NEW_LINE> if margin > 0: <NEW_LINE> <INDENT> loss += margin <NEW_LINE> dW[:, j] += X[i, :].T <NEW_LINE> dW[:, y[i]] += -X[i, :].T <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> loss /= num_train <NEW_LINE> dW /= num_train <NEW_LINE> loss += reg * np.sum(W * W) <NEW_LINE> dW += 2 * reg * W <NEW_LINE> pass <NEW_LINE> return loss, dW
Structured SVM loss function, naive implementation (with loops). Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W
625941b596565a6dacc8f4c6
def add_node(self, node, parent_id, depth): <NEW_LINE> <INDENT> node_id = hex(id(node)) <NEW_LINE> self.logger.debug('adding node : %s [%s]' % (node_id, depth)) <NEW_LINE> self.nodes[node_id] = { 'raw': node, 'parent_id': parent_id, 'depth': depth }
Add a node to the AST node collection :param node: The AST node to add :param parent_id: The ID of the node's parent :param depth: The depth of the node :return: -
625941b5d58c6744b4257a52
def __init__(self, name: str, path: pathlib.Path) -> None: <NEW_LINE> <INDENT> super().__init__(name, path) <NEW_LINE> self._hg = findhg()
Initialize.
625941b5dc8b845886cb5325
def set_mode(self, mode): <NEW_LINE> <INDENT> self._data['mode'] = mode
Moves the rotary to change with input (if any) is visible. :param mode: can be CubeState.CLOSED, CubeState.TELEGRAPH or CubeState.TRELLIS
625941b550812a4eaa59c117
def TearDownSourcesWithTask(self) -> ObjPath: <NEW_LINE> <INDENT> return TaskContainer.to_object_path( self.implementation.tear_down_sources_with_task() )
Tear down installation sources.
625941b591af0d3eaac9b805
def AwayPart(w, w_grad, listParam, device = 'cpu'): <NEW_LINE> <INDENT> hat_w, alphas = awayStep(w, w_grad, listParam[0], device = device) <NEW_LINE> auxMatrix = hat_w - w <NEW_LINE> aCapTilde = torch.sum(w_grad * auxMatrix, dim = 1) * listParam[1] <NEW_LINE> betas = torch.max(torch.min(aCapTilde, alphas), listParam[3]) <NEW_LINE> w -= (auxMatrix.t() * betas).t()
For the Away Method: listParam[0]: delta listParam[1]: vecDivC listParam[3]: zeroesVec
625941b5d7e4931a7ee9dd0d
def sample_action(self): <NEW_LINE> <INDENT> return random.choice(self.actions.keys())
:return: A random sample from the action space
625941b5cc0a2c11143dcc8a
def test_connection_handler_no_databases(self): <NEW_LINE> <INDENT> DATABASES = {} <NEW_LINE> conns = ConnectionHandler(DATABASES) <NEW_LINE> self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'], 'django.db.backends.dummy') <NEW_LINE> msg = ( 'settings.DATABASES is improperly configured. Please supply the ' 'ENGINE value. Check settings documentation for more details.' ) <NEW_LINE> with self.assertRaisesMessage(ImproperlyConfigured, msg): <NEW_LINE> <INDENT> conns[DEFAULT_DB_ALIAS].ensure_connection()
Empty DATABASES setting defaults to the dummy backend.
625941b57d43ff24873a2a95
def test_no_project_configuration_basic(self): <NEW_LINE> <INDENT> update_translated_resources_without_config( self.db_project, self.vcs_project, self.translated_locale, ) <NEW_LINE> assert TranslatedResource.objects.filter( resource=self.main_db_resource, locale=self.translated_locale ).exists() <NEW_LINE> assert TranslatedResource.objects.filter( resource=self.other_db_resource, locale=self.translated_locale ).exists() <NEW_LINE> assert not TranslatedResource.objects.filter( resource=self.missing_db_resource, locale=self.translated_locale ).exists()
Create/update the TranslatedResource object on all resources available in the current locale.
625941b53eb6a72ae02ec2cc
def parse_kegg(kegg_raw): <NEW_LINE> <INDENT> kegg_info = dict() <NEW_LINE> if kegg_raw.strip() == "": <NEW_LINE> <INDENT> return kegg_info <NEW_LINE> <DEDENT> for line in kegg_raw.split("\n"): <NEW_LINE> <INDENT> fields = re.split(" +", line) <NEW_LINE> if fields[0] != "": <NEW_LINE> <INDENT> field = fields[0] <NEW_LINE> <DEDENT> kegg_info.setdefault(field, list()) <NEW_LINE> kegg_info[field].append(" ".join(fields[1:])) <NEW_LINE> <DEDENT> return kegg_info
Parse raw KEGG output into dictionary entries
625941b594891a1f4081b899
def services(self, **query): <NEW_LINE> <INDENT> return self._list(_service.Service, **query)
Retrieve a generator of services :param kwargs query: Optional query parameters to be sent to limit the resources being returned. :returns: A generator of service instances. :rtype: :class:`~openstack.identity.v3.service.Service`
625941b57c178a314d6ef249
def semantic_matching(I_tr, T_tr, I_te, T_te, I_truth, T_truth): <NEW_LINE> <INDENT> I_tr_lr, I_te_lr = _do_classifier(I_tr, I_te, I_truth) <NEW_LINE> T_tr_lr, T_te_lr = _do_classifier(T_tr, T_te, T_truth) <NEW_LINE> return I_tr_lr, T_tr_lr, I_te_lr, T_te_lr
Learns semantic matching (CM) over I_tr, respectively T_tr, and applies it to I_tr and I_te, and, respectively, T_tr, T_te Parameters ---------- I_tr: np.ndarray [shape=(n_tr, d_I)] image data matrix for training T_tr: np.ndarray [shape=(n_tr, d_T)] text data matrix for training I_te: np.ndarray [shape=(n_te, d_I)] image data matrix for testing T_te: np.ndarray [shape=(n_te, d_T)] text data matrix for testing n_comps: int > 0 [scalar] number of canonical componens to use Returns ------- I_tr_lr : np.ndarray [shape=(n_tr, n_comps)] image data matrix represetned in semantic space T_tr_lr : np.ndarray [shape=(n_tr, n_comps)] text data matrix represetned in semantic space I_te_lr : np.ndarray [shape=(n_te, n_comps)] image data matrix represetned in semantic space T_te_lr : np.ndarray [shape=(n_te, n_comps)] text data matrix represetned in semantic space
625941b530c21e258bdfa28e
def hasNext(self): <NEW_LINE> <INDENT> return True if self.nextVal != None else False
:rtype: bool
625941b594891a1f4081b89a
def __init__(self, ifproc, bank, nchan, bandwidth, data, tsys=None): <NEW_LINE> <INDENT> LineDataHeader.__init__(self, ifproc, bank, nchan, bandwidth) <NEW_LINE> self.data = data <NEW_LINE> self.yarray = data.copy() <NEW_LINE> if type(tsys) == np.ndarray: <NEW_LINE> <INDENT> self.tarray = tsys.copy() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.tarray = None
Constructor for LineData class. Args: ifproc (object): ifproc object with ifproc information bank (int): selects which bank of the spectrometer is relevant nchan (int): number of channels in the spectrum bandwidth (float): bandwidth of the spectrum (MHz) data (array): the spectral line data tsys (array): [optional] tsys array Returns: none
625941b5dd821e528d63af9d
def extract_spoes(text): <NEW_LINE> <INDENT> tokens = tokenizer.tokenize(text, maxlen=maxlen) <NEW_LINE> mapping = tokenizer.rematch(text, tokens) <NEW_LINE> token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) <NEW_LINE> token_ids, segment_ids = to_array([token_ids], [segment_ids]) <NEW_LINE> subject_preds = subject_model.predict([token_ids, segment_ids]) <NEW_LINE> subject_preds[:, [0, -1]] *= 0 <NEW_LINE> start = np.where(subject_preds[0, :, 0] > 0.6)[0] <NEW_LINE> end = np.where(subject_preds[0, :, 1] > 0.5)[0] <NEW_LINE> subjects = [] <NEW_LINE> for i in start: <NEW_LINE> <INDENT> j = end[end >= i] <NEW_LINE> if len(j) > 0: <NEW_LINE> <INDENT> j = j[0] <NEW_LINE> subjects.append((i, j)) <NEW_LINE> <DEDENT> <DEDENT> if subjects: <NEW_LINE> <INDENT> spoes = [] <NEW_LINE> token_ids = np.repeat(token_ids, len(subjects), 0) <NEW_LINE> segment_ids = np.repeat(segment_ids, len(subjects), 0) <NEW_LINE> subjects = np.array(subjects) <NEW_LINE> object_preds = object_model.predict([token_ids, segment_ids, subjects]) <NEW_LINE> object_preds[:, [0, -1]] *= 0 <NEW_LINE> for subject, object_pred in zip(subjects, object_preds): <NEW_LINE> <INDENT> start = np.where(object_pred[:, :, 0] > 0.6) <NEW_LINE> end = np.where(object_pred[:, :, 1] > 0.5) <NEW_LINE> for _start, predicate1 in zip(*start): <NEW_LINE> <INDENT> for _end, predicate2 in zip(*end): <NEW_LINE> <INDENT> if _start <= _end and predicate1 == predicate2: <NEW_LINE> <INDENT> spoes.append( ((mapping[subject[0]][0], mapping[subject[1]][-1]), predicate1, (mapping[_start][0], mapping[_end][-1])) ) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return [(text[s[0]:s[1] + 1], id2predicate[p], text[o[0]:o[1] + 1]) for s, p, o, in spoes] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return []
抽取输入text所包含的三元组
625941b576d4e153a657e922