text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self): METHOD_NAME = [] METHOD_NAME += self.with_or_without("mpi") METHOD_NAME += self.with_or_without("fortran") if "+mpi" in self.spec: mpi_link_flags = self.spec["mpi:cxx"].libs.link_flags METHOD_NAME.append("LIBS={0}".format(mpi_link_flags)) METHOD_NAME.append("CC={0}".format(self.spec["mpi"].mpicc)) METHOD_NAME.append("CXX={0}".format(self.spec["mpi"].mpicxx)) if "+fortran" in self.spec: METHOD_NAME.append("FC={0}".format(self.spec["mpi"].mpifc)) return METHOD_NAME
[ 111, 335 ]
def METHOD_NAME(): """ Service jobs in an infinite loop """ get_engine().dispose() t = create_prometheus_server(job_process_registry, 8001) try: while True: # if no job was found, sleep for a second, otherwise query for another job straight away if not process_job(): sleep(1) finally: logger.info(f"Closing prometheus server") t.server_close()
[ 549, 494 ]
def METHOD_NAME(self): with ReaderFactory("sol") as reader: if reader is None: raise IOError("Reader 'sol' is not registered") soln = reader(join(currdir, "infeasible1.sol")) self.assertEqual( soln.solver.termination_condition, TerminationCondition.infeasible ) self.assertEqual(soln.solution.status, SolutionStatus.infeasible) self.assertEqual(soln.solver.status, SolverStatus.warning) self.assertFalse(check_optimal_termination(soln)) with self.assertRaises(RuntimeError): assert_optimal_termination(soln)
[ 9, -1 ]
def METHOD_NAME(cls): """Register all related modules and rewriters for mmrazor.""" from mmrazor.utils import METHOD_NAME METHOD_NAME(True)
[ 372, 75, 468 ]
def METHOD_NAME(x, p): """A regularized power-law that gets rid of singularities, abs(x)**p*sign(x) :param x: x :param p: p :return:""" return np.abs(x) ** p * np.sign(x)
[ 3617 ]
def METHOD_NAME(mesh, plc, data=None, label='', out=None, showMesh=0): ax = None if data is not None: ax, _ = pg.show(mesh, data, hold=1, colorBar=1, pad=0.55, label=label) if showMesh: ax, _ = pg.show(mesh, axes=ax, hold=1) if out: ax, _ = pg.show(plc, axes=ax) adjustAxes(ax) plt.pause(0.01) ax.figure.METHOD_NAME(out + '.pdf', bbox_inches='tight') try: print("trying pdf2pdfS ... ") os.system('pdf2pdfBB ' + out + '.pdf') os.system('pdf2pdfS ' + out + '.pdf') except: pass else: ax, _ = pg.show(plc, axes=ax) return ax
[ 15345 ]
def METHOD_NAME(self): # Just checking the new file case. # Assume the existing file timestamp case will work if this does. with TemporaryDirectory() as base_dir: new_file_path = Path(base_dir) / "new_file" assert not new_file_path.exists() util.touch(new_file_path) assert new_file_path.exists()
[ 9, 3236 ]
METHOD_NAME(self):
[ 9, 2986 ]
def METHOD_NAME(self, destination_directory): raise Exception("unsupported operation")
[ 73, 551 ]
def METHOD_NAME(request, file_with_test_name): def run(conv, problemType, solution=Solutions.defaultSolution(), problemFunc=None, problemLevel=-1, dataType='s'): if problemFunc == None: problemFunc = YamlBuilder.ProblemSizes if problemLevel==-1: problemLevel = request.config.getoption("--problem-level") config = YamlBuilder.ConvolutionContraction(conv, problemType, solution, dataType, problemFunc, True, problemLevel) configFile = file_with_test_name(".contraction.yaml") print("Generate_YAML output:", configFile) config.write(configFile) return configFile return run
[ 22, 567, 406 ]
def METHOD_NAME(self, pickle_str_ag_with_universe_f): newu, newag = pickle.loads(pickle_str_ag_with_universe_f) assert newag.universe is newu, ( "AtomGroup is not unpickled to the bound Universe" "when Universe is pickled first" )
[ 9, 7922, 12277, 41, 4850, 474 ]
def METHOD_NAME(self): """ Retrieves the description of the component Returns: A string containing the description of the component """ return COMPONENT_LIST[self.index][1]
[ 19, 1067 ]
f METHOD_NAME(self):
[ 9, 567, 700, 599, 130, 259 ]
def METHOD_NAME(self, generator): generator(self)
[ -1 ]
def METHOD_NAME(self, row: Dict[str, Any]) -> Union[TableKey, None]: """ Table key consists of schema and table name :param row: :return: """ if row: return TableKey(schema=row['schema'], table_name=row['name']) return None
[ 19, 410, 59 ]
def METHOD_NAME(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio=raw_speech, return_tensors="np") input_processor = processor(audio=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
[ 9, 964, 2761 ]
f METHOD_NAME(self, x, y):
[ 3495, 4245 ]
def METHOD_NAME( labels: List[Label], add_closed_domain_filter: bool = False, add_meta_filters: Optional[Union[str, list]] = None, drop_negative_labels: bool = False, drop_no_answers: bool = False, ) -> List[MultiLabel]: """ Aggregates Labels into MultiLabel objects (e.g. for evaluation with `Pipeline.eval()`). Labels are always aggregated by question and filters defined in the Label objects. Beyond that you have options to drop certain labels or to dynamically add filters to control the aggregation process. Closed domain aggregation: If the questions are being asked only on the document defined within the Label (i.e. SQuAD style), set `add_closed_domain_filter=True` to aggregate by question, filters and document. Note that Labels' filters are enriched with the document_id of the Label's document. Note that you don't need that step - if your labels already contain the document_id in their filters - if you're using `Pipeline.eval()`'s `add_isolated_node_eval` feature Dynamic metadata aggregation: If the questions are being asked on a subslice of your document set, that is not defined with the Label's filters but with an additional meta field, populate `add_meta_filters` with the names of Label meta fields to aggregate by question, filters and your custom meta fields. Note that Labels' filters are enriched with the specified meta fields defined in the Label. Remarks: `add_meta_filters` is only intended for dynamic metadata aggregation (e.g. separate evaluations per document type). For standard questions use-cases, where a question is always asked on multiple files individually, consider setting the Label's filters instead. For example, if you want to ask a couple of standard questions for each of your products, set filters for "product_id" to your Labels. Thus you specify that each Label is always only valid for documents with the respective product_id. :param labels: List of Labels to aggregate. :param add_closed_domain_filter: When True, adds a filter for the document ID specified in the label. Thus, labels are aggregated in a closed domain fashion based on the question text, filters, and also the id of the document that the label is tied to. See "closed domain aggregation" section for more details. :param add_meta_filters: The names of the Label meta fields by which to aggregate in addition to question and filters. For example: ["product_id"]. Note that Labels' filters are enriched with the specified meta fields defined in the Label. :param drop_negative_labels: When True, labels with incorrect answers and documents are dropped. :param drop_no_answers: When True, labels with no answers are dropped. :return: A list of MultiLabel objects. """ if add_meta_filters: if type(add_meta_filters) == str: add_meta_filters = [add_meta_filters] else: add_meta_filters = [] # drop no_answers in order to not create empty MultiLabels if drop_no_answers: labels = [label for label in labels if label.no_answer is False] # add filters for closed domain and dynamic metadata aggregation for l in labels: label_filters_to_add = {} if add_closed_domain_filter: label_filters_to_add["_id"] = l.document.id for meta_key in add_meta_filters: meta = l.meta or {} curr_meta = meta.get(meta_key, None) if curr_meta: curr_meta = curr_meta if isinstance(curr_meta, list) else [curr_meta] label_filters_to_add[meta_key] = curr_meta if label_filters_to_add: if l.filters is None: l.filters = label_filters_to_add else: l.filters.update(label_filters_to_add) # Filters define the scope a label is valid for the query, so we group the labels by query and filters. grouped_labels: Dict[Tuple, List[Label]] = defaultdict(list) for l in labels: label_filter_keys = [f"{k}={v}" for k, v in l.filters.items()] if l.filters else [] group_keys: list = [l.query] + label_filter_keys group_key = tuple(group_keys) grouped_labels[group_key].append(l) aggregated_labels = [ MultiLabel(labels=ls, drop_negative_labels=drop_negative_labels, drop_no_answers=drop_no_answers) for ls in grouped_labels.values() ] return aggregated_labels
[ 3428, 415 ]
def METHOD_NAME(): """ Test status.netdev for AIX :return: """ # Output from netstat -i -n -I <en0|en1|lo0> -f inet netstat_inet4_en0 = """Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
[ 9, 16771 ]
def METHOD_NAME(self, cognite_client: CogniteClient) -> None: for space in cognite_client.data_modeling.spaces: assert isinstance(space, Space)
[ 9, 3972, 3217, 1041 ]
def METHOD_NAME(B, x): res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) res.shape = (2, x.shape[-1]) return res
[ 2962, 6063 ]
def METHOD_NAME(self, action: int) -> float: """ An example reward function. This is the one function that users will likely wish to inject their own creativity into. Warning! This is function is a showcase of functionality designed to show as many possible environment control features as possible. It is also designed to run quickly on small computers. This is a benchmark, it is *not* for live production. :param action: int = The action made by the agent for the current candle. :return: float = the reward to give to the agent for current step (used for optimization of weights in NN) """ # first, penalize if the action is not valid if not self._is_valid(action): self.tensorboard_log("invalid", category="actions") return -2 pnl = self.get_unrealized_profit() factor = 100. # reward agent for entering trades if (action == Actions.Long_enter.value and self._position == Positions.Neutral): return 25 if (action == Actions.Short_enter.value and self._position == Positions.Neutral): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: factor *= 1.5 elif trade_duration > max_trade_duration: factor *= 0.5 # discourage sitting in position if (self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value): return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) return float(pnl * factor) return 0.
[ 1593, 4181 ]
def METHOD_NAME(self, units, overlap): radius = 4 depth = ImageDepth(radius, nsigma=5.0, napers=100, niters=2, overlap=overlap, seed=123, zeropoint=23.9, progress_bar=False) if overlap: exp_limits = (66.60324687694235, 19.341261496655026) else: exp_limits = (67.65345151009167, 19.324275104703975) data = self.data fluxlim = exp_limits[0] if units: data = self.data * u.Jy fluxlim *= u.Jy limits = depth(data, self.mask) assert_allclose(limits[1], exp_limits[1]) if not units: assert_allclose(limits[0], fluxlim) else: assert_quantity_allclose(limits[0], fluxlim)
[ 9, 660, 3144 ]
def METHOD_NAME(self, dataDict): """ Set values from key-value dictionary """ errKeys = [] for key in dataDict: if key not in self.fieldsList: errKeys.append(key) if errKeys: return S_ERROR(f"Key(s) {', '.join(errKeys)} are not valid") for key in dataDict: self.setValueByKey(key, dataDict[key]) return S_OK()
[ 0, 199, 280, 553 ]
def METHOD_NAME(self) -> Sequence[str]: """ Route Target List.The expected formats are ASN(plain):NN >> example 4294967294:50, ASN.ASN:NN >> example 65533.65333:40, IP-address:NN >> example 10.10.10.10:65535. The possible values of ASN,NN are in range of 0-65535, ASN(plain) is in range of 0-4294967295. """ return pulumi.get(self, "route_targets")
[ 2476, 465 ]
def METHOD_NAME(attribute_bounds): if not isinstance(attribute_bounds, dict): raise TypeError('attribute_bounds must be a dict mapping template to bounds; ' 'got {}'.format(attribute_bounds)) if len(attribute_bounds) != 1: raise NotImplementedError('Currently, only searches with exactly one template ' 'to bounds mapping are supported; got {}' .format(attribute_bounds)) return { 'attribute_bounds': { get_object_id(templ): bounds.as_dict() for templ, bounds in attribute_bounds.items() } }
[ 19, 309, 634, 1070, 2829 ]
def METHOD_NAME(self, chunk, fields): """Not implemented yet.""" # This reads the data from a single chunk without doing any selection, # and is only used for caching data that might be used by multiple # different selectors later. For instance, this can speed up ghost zone # computation. # it should be used by _read_fluid_selection instead of _read_data raise NotImplementedError
[ 203, 464, 365 ]
def METHOD_NAME(self): obj = self.df.copy(deep=True) start_cols = obj.columns out = obj.pyrochem.parse_chem() self.assertTrue(len(out.columns) == len(start_cols)) self.assertTrue( all([a == b for (a, b) in zip(out.columns, start_cols) if "/" not in a]) )
[ 9, -1, 214, 15841 ]
def METHOD_NAME(self) -> None: ...
[ 1160 ]
def METHOD_NAME(module_names, module_contents, template=None): """ Parameters ---------- template : str or dict or plotly.graph_objects.layout.Template instance The figure template name or definition. Returns ------- fig : graph_objects.Figure containing the displayed image A `Figure` object. This figure demonstrates the color scales and sequences in this module, as polar bar charts. """ import plotly.graph_objects as go from plotly.subplots import make_subplots from plotly.express._core import apply_default_cascade args = dict(template=template) apply_default_cascade(args) rows = 2 cols = 4 scales = [ (k, v) for k, v in module_contents.items() if not (k.startswith("_") or k.startswith("swatches") or k.endswith("_r")) ] names = [name for name, colors in scales] fig = make_subplots( rows=rows, cols=cols, subplot_titles=names, specs=[[{"type": "polar"}] * cols] * rows, ) for i, (name, scale) in enumerate(scales): fig.add_trace( go.Barpolar( r=[1] * int(360 / 5), theta=list(range(0, 360, 5)), marker_color=list(range(0, 360, 5)), marker_cmin=0, marker_cmax=360, marker_colorscale=name, name=name, ), row=int(i / cols) + 1, col=i % cols + 1, ) fig.update_traces(width=5.2, marker_line_width=0, base=0.5, showlegend=False) fig.update_polars(angularaxis_visible=False, radialaxis_visible=False) fig.update_layout( title="plotly.colors." + module_names.split(".")[-1], template=args["template"] ) return fig
[ 7355, 7356 ]
def METHOD_NAME(self): """Content of the ``BodyPart`` in unicode.""" return self.content.decode(self.encoding)
[ 526 ]
def METHOD_NAME( self, pyramid_request, value, family, class_, application_instance ): pyramid_request.content_type = "application/json" pyramid_request.json = {"lms": {"product": value}} product = get_product_from_request(pyramid_request) class_.from_request.assert_called_once_with( pyramid_request, application_instance.settings ) assert product == class_.from_request.return_value assert product.family == family
[ 9, 280, 377, 58 ]
def METHOD_NAME(): from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME from ddtrace.internal.schema import schematize_cache_operation from ddtrace.internal.schema import schematize_cloud_api_operation from ddtrace.internal.schema import schematize_database_operation from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.schema import schematize_url_operation from ddtrace.internal.schema.span_attribute_schema import cache_operation_v0 from ddtrace.internal.schema.span_attribute_schema import cloud_api_operation_v0 from ddtrace.internal.schema.span_attribute_schema import database_operation_v0 from ddtrace.internal.schema.span_attribute_schema import service_name_v0 from ddtrace.internal.schema.span_attribute_schema import url_operation_v0 assert DEFAULT_SPAN_SERVICE_NAME is None assert schematize_service_name == service_name_v0 assert schematize_database_operation == database_operation_v0 assert schematize_cache_operation == cache_operation_v0 assert schematize_cloud_api_operation == cloud_api_operation_v0 assert schematize_url_operation == url_operation_v0
[ 9, 549, 83, 512, 235 ]
def METHOD_NAME(attr_name: str, value: str, expected_value: Any) -> None: env_name = f"GPFLOW_{attr_name.upper()}" with mock.patch.dict("os.environ", {env_name: value}): assert os.environ[env_name] == value config = gpflow.config.Config() assert getattr(config, attr_name) == expected_value
[ 9, 485, 2045 ]
def METHOD_NAME(): input = """ query { projects { name slug } } """ (query,) = parse_graphql_query(input) info = MockResolveInfo(query) assert get_fields(info) == ["projects", "projects.name", "projects.slug"]
[ 9, 19, 342, 1401 ]
def METHOD_NAME(): return test_median( lambda a: _median3(a[0], a[1], a[2], 0, 1, 2), 3, 3 ) and test_median(_median5, 1, 5)
[ 9, 14345 ]
def METHOD_NAME(cls) -> onnx.TypeProto: if cls.shape is None: shape = () # "FLOAT" is treated as a scalar elif cls.shape is Ellipsis: shape = None # "FLOAT[...]" is a tensor of unknown rank elif isinstance(cls.shape, tuple): shape = cls.shape # example: "FLOAT[10,20]" else: shape = [cls.shape] # example: "FLOAT[10]" return onnx.helper.make_tensor_type_proto(cls.dtype, shape)
[ 24, 44, 2640 ]
def METHOD_NAME(self, app_vertex): super().METHOD_NAME(app_vertex) if not isinstance(app_vertex, DelayExtensionVertex): raise PacmanConfigurationException( f"The vertex {app_vertex} cannot be supported by the " "SplitterDelayVertexSlice as the only vertex supported by " "this splitter is a DelayExtensionVertex. Please use the " "correct splitter for your vertex and try again.")
[ 0, 16762, 991, 3063 ]
def METHOD_NAME(input_dir, output_zipfile, delete=False): """Zips subdirectories of input_dir to output_zipfile (without compression). Travels into subdirectories, but not sub-subdirectories. Skips any other files in directory. :param delete: If True, delete original directories """ with zipfile.ZipFile(output_zipfile, mode='w') as zp: for dirn in os.listdir(input_dir): full_dirn = os.path.join(input_dir, dirn) if not osp.isdir(full_dirn): continue for fn in os.listdir(full_dirn): zp.write(os.path.join(full_dirn, fn), arcname=os.path.join(dirn, fn)) if delete: shutil.rmtree(full_dirn)
[ 1426, 1190 ]
def METHOD_NAME(testapp, analysis_step_atac_encode4_alignment, software_version): item = { 'analysis_step': analysis_step_atac_encode4_alignment['@id'], 'minor_version': 0, 'software_versions': [ software_version['@id'], ], } return testapp.post_json('/analysis_step_version', item).json['@graph'][0]
[ 689, 367, 281, -1, -1, 5508 ]
def METHOD_NAME(self, m): mtype = m.get_type() if mtype == 'HIGH_LATENCY2': mode_map = mavutil.mode_mapping_bynumber(m.type) if mode_map and m.custom_mode in mode_map: self.master.flightmode = mode_map[m.custom_mode]
[ 6433, 5788 ]
def METHOD_NAME(): sel = alt.selection_point(empty=False) c = ( alt.Chart() .mark_point() .encode(size=alt.condition(sel, alt.value(100), alt.value(10))) .add_params(sel) ) dct = c.to_dict() param_name = sel.param.name cond = dct["encoding"]["size"]["condition"] assert cond["value"] == 100 assert cond["param"] == param_name # The else condition assert dct["encoding"]["size"]["value"] == 10
[ 9, 3115, 405 ]
def METHOD_NAME(self): valid_steiner_trees = [ [ [ (1, 2, {"weight": 10}), (2, 3, {"weight": 10}), (2, 7, {"weight": 1}), (3, 4, {"weight": 10}), (5, 7, {"weight": 1}), ], [ (1, 2, {"weight": 10}), (2, 7, {"weight": 1}), (3, 4, {"weight": 10}), (4, 5, {"weight": 10}), (5, 7, {"weight": 1}), ], [ (1, 2, {"weight": 10}), (2, 3, {"weight": 10}), (2, 7, {"weight": 1}), (4, 5, {"weight": 10}), (5, 7, {"weight": 1}), ], ], [ [ (0, 5, {"weight": 6}), (1, 2, {"weight": 2}), (1, 5, {"weight": 3}), (3, 5, {"weight": 5}), ], [ (0, 5, {"weight": 6}), (4, 2, {"weight": 4}), (4, 5, {"weight": 1}), (3, 5, {"weight": 5}), ], ], [ [ (1, 10, {"weight": 2}), (3, 10, {"weight": 2}), (3, 11, {"weight": 1}), (5, 12, {"weight": 1}), (6, 13, {"weight": 1}), (8, 9, {"weight": 2}), (9, 14, {"weight": 1}), (10, 14, {"weight": 1}), (11, 12, {"weight": 1}), (12, 15, {"weight": 1}), (13, 15, {"weight": 1}), ] ], ] for method in self.methods: for G, term_nodes, valid_trees in zip( [self.G1, self.G2, self.G3], [self.G1_term_nodes, self.G2_term_nodes, self.G3_term_nodes], valid_steiner_trees, ): S = steiner_tree(G, term_nodes, method=method) assert any( edges_equal(list(S.edges(data=True)), valid_tree) for valid_tree in valid_trees )
[ 9, 13289, 151 ]
def METHOD_NAME(filename): return parse_file(filename, use_cpp=True, cpp_args=[ r'-I{}'.format(os.path.dirname(filename)), r'-I{}'.format(FAKE_LIBC_INCLUDE_DIR), r'-D_DOXYGEN_ONLY_'])
[ 214, 572 ]
def METHOD_NAME(self, l, move=None): t = self.thickness tw, th = l+2, t+8 if self.move(tw, th, move, True): return self.moveTo(1, t) self.edges["f"](l) poly = [0, 90, 6, -60, 0, (120, 2*3**-.5), 0, 30, 2, 90, 5, (-180, .5), 5, 90] self.polyline(*(poly+[l-2*3]+list(reversed(poly)))) self.move(tw, th, move)
[ 13724, 15599 ]
def METHOD_NAME(self): """ Returns a dict containing an id and name identifying the brand. Useful when displaying logos next to accounts in templates. For most providers, these are identical to the provider. For OpenID however, the brand can derived from the OpenID identity url. """ provider = self.account.get_provider() return dict(id=provider.id, name=provider.name)
[ 19, 13105 ]
def METHOD_NAME(): try: raise ComponentNotAvailable(type) except Exception as exc: log.reraise_exception("{0} in {1}", exc, session)
[ 339 ]
def METHOD_NAME(event): """ let us know that the user was logged in here successfully """ request = getRequest() if request is None: return if not ICastleLayer.providedBy(request): return request.environ[LOGGED_IN_MARKER_KEY] = 'yes' # do not even allow user to login if the account has been # disabled try: if api.user.get_roles(user=event.object) == ['Authenticated']: site = getSite() raise Redirect('%s/@@disabled-user' % site.absolute_url()) except api.exc.UserNotFoundError: log.warn('could not find logged in user {}'.format(repr(event.object)))
[ 69, 21, 1099, 623 ]
def METHOD_NAME(self): self.runsafe(live_neuron_voltage)
[ 9, 1824, 8476, 268 ]
def METHOD_NAME(self, method, url, body, headers): body = self.fixtures.load("r_orders_order_88833465_api_ivan_net_nz_vps.json") return (httplib.OK, body, {}, httplib.responses[httplib.OK])
[ 3264, 3868, 852, 13907, 58, 13908, 819 ]
def METHOD_NAME(block: bytes, nb_chunks: int) -> List[bytes]: payload_size = len(block) + 4 # encode block len as a uint32 chunk_len = payload_size // nb_chunks if nb_chunks * chunk_len < payload_size: chunk_len += 1 padding_len = chunk_len * nb_chunks - payload_size payload = struct.pack("!I", len(block)) + block + b"\x00" * padding_len return [payload[chunk_len * i : chunk_len * (i + 1)] for i in range(nb_chunks)]
[ 265, 573, 623, 831 ]
def METHOD_NAME(self, pred_results: torch.Tensor, data_sample: TextDetDataSample, **kwargs) -> TextDetDataSample: """ Args: pred_result (torch.Tensor): Prediction results of an image which is a tensor of shape :math:`(N, H, W)`. data_sample (TextDetDataSample): Datasample of an image. Returns: TextDetDataSample: A new DataSample with predictions filled in. Polygons and results are saved in ``TextDetDataSample.pred_instances.polygons``. The confidence scores are saved in ``TextDetDataSample.pred_instances.scores``. """ assert pred_results.dim() == 3 pred_results = torch.sigmoid(pred_results) # text confidence masks = pred_results > self.min_kernel_confidence text_mask = masks[0, :, :] kernel_masks = masks[0:, :, :] * text_mask kernel_masks = kernel_masks.data.cpu().numpy().astype(np.uint8) score = pred_results[0, :, :] score = score.data.cpu().numpy().astype(np.float32) region_num, labels = cv2.connectedComponents( kernel_masks[-1], connectivity=4) labels = contour_expand(kernel_masks, labels, self.min_kernel_area, region_num) labels = np.array(labels) label_num = np.max(labels) polygons = [] scores = [] for i in range(1, label_num + 1): points = np.array(np.where(labels == i)).transpose((1, 0))[:, ::-1] area = points.shape[0] score_instance = np.mean(score[labels == i]) if not (area >= self.min_text_area or score_instance > self.score_threshold): continue polygon = self._points2boundary(points) if polygon: polygons.append(polygon) scores.append(score_instance) pred_instances = InstanceData() pred_instances.polygons = polygons pred_instances.scores = torch.FloatTensor(scores) data_sample.pred_instances = pred_instances scale_factor = data_sample.scale_factor scale_factor = tuple(factor * self.downsample_ratio for factor in scale_factor) data_sample.set_metainfo(dict(scale_factor=scale_factor)) return data_sample
[ 19, 526, 2553 ]
def METHOD_NAME(self, *args, **kwargs): pass
[ 74, 5996 ]
def METHOD_NAME() -> TargetHost: return TargetHost( ip=TARGET_IP, operating_system=OperatingSystem.WINDOWS, ports_status=OPEN_SSH_PORTS, )
[ 1030, 1806 ]
def METHOD_NAME(topology_st): topology_st.standalone.log.info(" +++++ Check Max Ber Size +++++\n") maxbersizestr = getMaxBerSizeFromDseLdif(topology_st) maxbersize = int(maxbersizestr) isdefault = True defaultvalue = 2097152 if maxbersize < 0: topology_st.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n") elif maxbersize == 0: topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) else: isdefault = False topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) try: entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE, "(cn=*)", ['nsslapd-maxbersize']) if entry: searchedsize = entry[0].getValue('nsslapd-maxbersize') topology_st.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize) else: topology_st.standalone.log.fatal('ERROR: cn=config is not found?') assert False except ldap.LDAPError as e: topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc']) assert False if isdefault: topology_st.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue)) assert int(searchedsize) == defaultvalue
[ 250, 232, 3122, 1318 ]
def METHOD_NAME(self): self.ThreatIntelligenceIndicatorMetricsList(ctx=self.ctx)()
[ 750, 710 ]
def METHOD_NAME(test): @functools.wraps(test) def wrapper(self, *args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter('ignore', category=category) return test(self, *args, **kwargs) return wrapper
[ 972 ]
def METHOD_NAME(self, stepCount): """Scrolls the indicated number of steps forward. If stepCount is negative, scrolls backward.""" self['value'] = self.guiItem.getValue() + self.guiItem.getScrollSize() * stepCount
[ 3476, 367 ]
f METHOD_NAME(self, params):
[ 0, 390, 171, 3336 ]
def METHOD_NAME(self, raw_content, bcs_variables): t = jinja2.Template(raw_content) return t.render(bcs_variables)
[ 338, 41, 2045 ]
def METHOD_NAME(url, slug, model, filepath): # noqa: C901 # if url.endswith(".m3u8"): # url = url.split(".m3u8")[0] + "_mid/index.m3u8" trans_model = Model(model) rec = KaldiRecognizer(trans_model, __SAMPLE_RATE__) rec.SetWords(True) last_caption = None thread_id = threading.get_ident() while LIVE_CELERY_TRANSCRIPTION or thread_id not in threads_to_stop: start = time.time() command = [ "ffmpeg", "-y", "-loglevel", "quiet", "-i", url, "-ss", "00:00:00.005", "-t", "00:00:05", "-acodec", "pcm_s16le", "-ac", "1", "-ar", str(__SAMPLE_RATE__), "-f", "s16le", "-", ] with subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) as process: results = [] data = process.stdout.read(4000) while True: if len(data) == 0: break else: data = process.stdout.read(4000) if rec.AcceptWaveform(data): results.append(rec.Result()) results.append(rec.FinalResult()) vtt = WebVTT() caption_text = "" for _, res in enumerate(results): words = json.loads(res).get("result") if not words: continue # start = timestring(words[0]["start"]) # end = timestring(words[-1]["end"]) content = " ".join([w["word"] for w in words]) caption_text += content + " " if last_caption: last_caption_text, current_caption_text = ( last_caption.text.strip(), caption_text.strip(), ) last_caption_words, current_caption_words = last_caption_text.split( " " ), current_caption_text.split(" ") current_caption_words1 = current_caption_words[ 1 : len(current_caption_words) ] for i in range(len(last_caption_words) - 1, 0, -1): if ( last_caption_words[-i:] == current_caption_words[:i] or last_caption_words[-i:] == current_caption_words1[:i] ): caption_text = " ".join(current_caption_words[i:]) break if last_caption_text in caption_text: caption_text = caption_text.replace(last_caption_text, "").strip() if caption_text in last_caption_text: caption_text = caption_text.replace(caption_text, "").strip() current_start = timestring(0) current_end = timestring(86400) if caption_text != "": caption = Caption(current_start, current_end, caption_text) last_caption = caption # print(caption_text) vtt.captions.append(caption) # save or return webvtt vtt.save(filepath) now = time.time() - start if now < 5: time.sleep(5 - now) # print("stopped transcription") threads_to_stop.remove(thread_id) vtt = WebVTT() vtt.save(filepath)
[ 5688 ]
def METHOD_NAME(self): ...
[ 19, 13902, 156 ]
def METHOD_NAME(*args, **kwargs): cmd_line = [sys.executable, '-E'] cmd_line.extend(args) return subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
[ 597, 440 ]
def METHOD_NAME(self): # Arrange for _i in range(100): self.obv.update_raw(1.00000, 1.00010, 10000) # Act, Assert assert self.obv.initialized is True
[ 9, 924, 41, 984, 1461, 610, 2019 ]
def METHOD_NAME(arg): """Transform an argument.""" arg.update(convert_flags_to_boolean_dict(arg.pop('flags', []))) set_if_not_none_or_empty(arg, 'arguments', [METHOD_NAME(x) for x in arg.pop('arguments', [])]) return arg
[ 197, 1545 ]
METHOD_NAME(self):
[ -1 ]
def METHOD_NAME(argv): """ Creates the GetOpt table + read and validate the options given when calling the script. :param argv: command-line arguments :type argv: list :return: the path to the conp-dataset directory :rtype: str """ conp_dataset_dir_path = None description = ( "\nThis tool facilitates the creation of statistics per data providers for reporting purposes." " It will read DATS files and print out a summary per data providers based on the following list" "of DATS fields present in the DATS. json of every dataset present in the conp-dataset/projects" "directory.\n Queried fields: <distribution->access->landingPage>; " "<distributions->access->authorizations>; " "<distributions->size>; <extraProperties->files>; <keywords>\n" ) usage = ( "\n" "usage : python " + __file__ + " -d <conp-dataset directory path>\n\n" "options: \n" "\t-d: path to the conp-dataset directory to parse\n" ) try: opts, args = getopt.getopt(argv, "hd:") except getopt.GetoptError: sys.exit() for opt, arg in opts: if opt == "-h": print(description + usage) sys.exit() elif opt == "-d": conp_dataset_dir_path = arg if not conp_dataset_dir_path: print( "a path to the conp-dataset needs to be given as an argument to the script by using the option `-d`", ) print(description + usage) sys.exit() if not os.path.exists(conp_dataset_dir_path + "/projects"): print( conp_dataset_dir_path + "does not appear to be a valid path and does not include a `projects` directory", ) print(description + usage) sys.exit() return conp_dataset_dir_path
[ 214, 362 ]
def METHOD_NAME( circuit: 'cirq.Circuit', qubit_order: Sequence['cirq.Qid'], acquaintance_size: Optional[int] = 0, swap_gate: 'cirq.Gate' = ops.SWAP, ) -> bool: """Replace every rectified moment with acquaintance gates with a generalized swap network. The generalized swap network has a partition given by the acquaintance gates in that moment (and singletons for the free qubits). Accounts for reversing effect of swap networks. Args: circuit: The acquaintance strategy. qubit_order: The qubits, in order, on which the replacing swap network gate acts on. acquaintance_size: The acquaintance size of the new swap network gate. swap_gate: The gate used to swap logical indices. Returns: Whether or not the overall effect of the inserted swap network gates is to reverse the order of the qubits, i.e. the parity of the number of swap network gates inserted. Raises: TypeError: circuit is not an acquaintance strategy. """ rectify_acquaintance_strategy(circuit) reflected = False reverse_map = {q: r for q, r in zip(qubit_order, reversed(qubit_order))} for moment_index, moment in enumerate(circuit): if reflected: moment = moment.transform_qubits(reverse_map.__getitem__) if all(isinstance(op.gate, AcquaintanceOpportunityGate) for op in moment.operations): swap_network_gate = SwapNetworkGate.from_operations( qubit_order, moment.operations, acquaintance_size, swap_gate ) swap_network_op = swap_network_gate(*qubit_order) moment = circuits.Moment([swap_network_op]) reflected = not reflected circuit._moments[moment_index] = moment return reflected
[ 369, 12559, 41, 3007, 1228 ]
def METHOD_NAME(self): for source_file_info, source_content_type in [ (None, None), ({ 'a': 'b' }, None), (None, 'text/plain'), ]: with self.subTest( source_file_info=source_file_info, source_content_type=source_content_type ): with self.assertRaises( SSECKeyIdMismatchInCopy, 'attempting to copy file using MetadataDirectiveMode.COPY without providing source_file_info ' 'and source_content_type for differing sse_c_key_ids: source="some-id-2", destination="some-id"' ): CopyManager.establish_sse_c_file_metadata( MetadataDirectiveMode.COPY, destination_file_info=None, destination_content_type=None, destination_server_side_encryption=SSE_C_AES, source_server_side_encryption=SSE_C_AES_2, source_file_info=source_file_info, source_content_type=source_content_type, )
[ 9, 3502, 1381, 2629, 215, 505, 46 ]
def METHOD_NAME(self) -> None: # as there is a separate stack push & pop operation management, if stack is empty, # it should be the scenario that user call `set_pipeline_settings` out of `pipeline` decorator, # then directly raise user error. if len(self._stack) == 0: error_message = "Please call `set_pipeline_settings` inside a `pipeline` decorated function." raise UserErrorException( message=error_message, no_personal_data_message=error_message, )
[ 250, 1501 ]
def METHOD_NAME(device_mesh, rank): # tensor to comm tensor_to_comm = torch.ones(2, 2).cuda() * rank tensor_to_check = torch.ones(2, 2).cuda() * rank dim_partition_dict = {} # DistSpec: # shard_sequence: R,R # device_mesh_shape: (2, 2) sharding_spec = ShardingSpec(device_mesh, tensor_to_comm.shape, dim_partition_dict=dim_partition_dict) comm_spec = CommSpec(CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, sharding_spec, logical_process_axis=0) tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm) assert tensor_to_comm.equal(tensor_to_check)
[ 250, 75, 332, 5299 ]
def METHOD_NAME(key: str, default_return: None = None) -> Optional[str]: return SYSTEM_CONFIG_GETTERS.get(key, lambda: default_return)()
[ 19, 112, 200, 99 ]
def METHOD_NAME(self): 'Spin 1 z-component' return self['spin1z'][:]
[ -1, 49 ]
def METHOD_NAME(mat, cols): return decode_matrix_fbs(encode_matrix_fbs(mat, col_idx=cols)).to_numpy()
[ 197 ]
def METHOD_NAME(self, j_stripping): """ A private function to obtain the standard variables which can be derived from the lithum stripping interfacial reaction current Parameters ---------- j_stripping : :class:`pybamm.Symbol` The net lithium stripping interfacial reaction current. Returns ------- variables : dict The variables which can be derived from the plated lithium thickness. """ # Set scales to one for the "no plating" model so that they are not required # by parameter values in general j_stripping_av = pybamm.x_average(j_stripping) variables = { "Lithium plating interfacial current density [A.m-2]": j_stripping, "X-averaged lithium plating " "interfacial current density [A.m-2]": j_stripping_av, } return variables
[ 19, 2356, 115, 2045 ]
def METHOD_NAME(self, engine, model=None, filename=None): image = FlashBuilder.create_model_image(model, filename) self.models[engine] = image
[ 238, 578 ]
def METHOD_NAME(): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ]
[ 19, -1, 415 ]
def METHOD_NAME(self, sample_rate): """ Set the new sampling rate Args: sample_rate: the new rate """ self._sd.METHOD_NAME(sample_rate)
[ 0, 734, 1585 ]
def METHOD_NAME(): return ArgumentParser(add_help=False)
[ 1319 ]
def METHOD_NAME(self): self.model_files = []
[ 0, 1 ]
METHOD_NAME(s):
[ 1660, 4621, 200 ]
def METHOD_NAME(self) -> Optional[str]: """ Tenant ID. """ return pulumi.get(self, "tenant_id")
[ 4154, 147 ]
def METHOD_NAME(self, names): """Multiple pip-installed modules can be collected at once.""" custom_modules = _DeployableEntity._custom_modules_as_artifact(names) for name in names: self.assert_in_custom_modules(custom_modules, name)
[ 9, 107, 468 ]
def METHOD_NAME( target_directory: str | None, ) -> tuple[bool, str, FileDataContext | None]: is_config_ok: bool = True upgrade_message: str = "" context: FileDataContext | None = None try: # Without this check, FileDataContext will possibly scaffold a project structure. # As we want CLI users to follow the `init` workflow, we should exit early if we can't find a context YAML. if not FileDataContext._find_context_yml_file(target_directory): raise gx_exceptions.ConfigNotFoundError() context = FileDataContext(context_root_dir=target_directory) ge_config_version: int = context.get_config().config_version # type: ignore[union-attr] # could be dict, str if int(ge_config_version) < CURRENT_GX_CONFIG_VERSION: is_config_ok = False upgrade_message = f"""The config_version of your great_expectations.yml -- {float(ge_config_version)} -- is outdated.
[ 74, 200, 250 ]
def METHOD_NAME(args): options = parser.parse_args(args) with codecs.open(options.spec, "r", encoding="utf-8") as spec_file: spec_yaml = yaml.safe_load(spec_file) if not isinstance(spec_yaml, list): raise ValueError("expected a list of micro-kernels in the spec") tests = """\
[ 57 ]
def METHOD_NAME(): dataset: HfDataset = MsDataset.load( 'wyj123456/instinwild', subset_name='subset', split='train').to_hf_dataset() return _processing_alpaca(dataset)
[ 19, -1, 4334, 126 ]
def METHOD_NAME(self, index): """ Descend into call tree for this node. """ expanding_item = self._model.itemFromIndex(index) if not expanding_item.populated: dbg = self.instance.debugger_mgr.debugger if dbg.am_none: return called = dbg.get_called_functions(expanding_item.event) for func_or_addr, event in called: expanding_item.appendRow(CallTreeItem(func_or_addr, event)) expanding_item.expandable = len(called) > 0 expanding_item.populated = True
[ 69, 1024, 2928 ]
def METHOD_NAME(self) -> Optional[str]: """ For optimistic concurrency control. """ return pulumi.get(self, "e_tag")
[ 1178, 82 ]
def METHOD_NAME(args=(), params=None, out=sys.stdout): """ For use in PHENIX GUI only, fetches pdb filesand/or reflection data from the PDB. Parameters ---------- args : list of str, optional params : libtbx.phil.scope_extract, optional out : file, optional Returns ------- output_files : list of str errors : list of str """ assert (params is not None) output_files = [] errors = [] mirror = "--mirror=%s" % params.fetch_pdb.site for id in params.fetch_pdb.pdb_ids : args = [mirror, id] if (params.fetch_pdb.action in ["all_data","all_plus_mtz"]): args.insert(0, "--all") if (params.fetch_pdb.action == "all_plus_mtz"): args.insert(1, "--mtz") try : data_files = run2(args=args, log=out) print("\n".join(data_files), file=out) output_files.extend(data_files) except Exception as e: errors.append(str(e)) else : pdb_file = run2(args=[mirror,id], log=out) print(pdb_file, file=out) output_files.append(pdb_file) return output_files, errors
[ 22 ]
def METHOD_NAME(self) -> None: """Start application mode.""" loop = asyncio.get_running_loop() def do_exit() -> None: """Callback to force exit.""" asyncio.run_coroutine_threadsafe( self._app._post_message(messages.ExitApp()), loop=loop ) if not WINDOWS: for _signal in (signal.SIGINT, signal.SIGTERM): loop.add_signal_handler(_signal, do_exit) self._write(b"__GANGLION__\n") self.write("\x1b[?1049h") # Alt screen self._enable_mouse_support() self.write("\x1b[?25l") # Hide cursor self.write("\033[?1003h\n") size = Size(80, 24) if self._size is None else Size(*self._size) event = events.Resize(size, size) asyncio.run_coroutine_threadsafe( self._app._post_message(event), loop=loop, ) self._request_terminal_sync_mode_support() self._enable_bracketed_paste() self.flush() self._key_thread.start()
[ 447, 88, 854 ]
def METHOD_NAME(op, data, where, aggcontext=None, **kwargs): return aggcontext.agg( ( data.obj.loc[where].groupby(data.grouping.grouper) if where is not None else data ), np.array, )
[ 750, 877, 1444, 2834 ]
def METHOD_NAME(self, dataset, dataset_info): """Update dataset attributes.""" dataset.attrs.update(self[dataset_info['nc_key']].attrs) dataset.attrs.update(dataset_info) dataset.attrs['sensor'] = 'merged' dataset.attrs['composite_period'] = self.composite_period # remove attributes from original file which don't apply anymore dataset.attrs.pop("nc_key")
[ 86, 1685 ]
def METHOD_NAME(self, identifier=None, rtype=None, name=None, content=None): if identifier is not None: rtype, name, content = self._parse_identifier(identifier) request = {"action": "DELETE", "name": self.domain} if rtype is not None: request["type"] = rtype if name is not None: request["name"] = self._full_name(name) if content is not None: request["value"] = content payload = self._get("/dns/dyndns.jsp", request) if payload.find("is_ok").text != "OK:": raise Exception(f"An error occurred: {payload.find('is_ok').text}") LOGGER.debug("delete_record: %s", True) return True
[ 34, 148 ]
def METHOD_NAME(ctx, grad_output: torch.Tensor) -> tuple: return (None, ) * 5
[ 2955 ]
def METHOD_NAME(self): self.copy("LICENSE.txt", dst="licenses", src=self._source_subfolder) cmake = self._configure_cmake() cmake.install() tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
[ 360 ]
def METHOD_NAME(parallelEnv): comm = MPI.COMM_WORLD myGlobalRank = comm.rank # Create an Albany problem: n_params = 2 filename = "thermal_steady.yaml" parameter = Utils.createParameterList( filename, parallelEnv ) problem = Utils.createAlbanyProblem(parameter, parallelEnv) # ---------------------------------------------- # # 1. Evaluation of the theta star # # ---------------------------------------------- l_min = 8. l_max = 20. n_l = 5 p = 1. l = l_min + np.power(np.linspace(0.0, 1.0, n_l), p) * (l_max-l_min) theta_star, I_star, F_star, P_star = ee.evaluateThetaStar(l, problem, n_params) np.savetxt('theta_star_steady.txt', theta_star) np.savetxt('I_star_steady.txt', I_star) np.savetxt('P_star_steady.txt', P_star) np.savetxt('F_star_steady.txt', F_star) # ---------------------------------------------- # # 2. Evaluation of the prefactor using IS # # ---------------------------------------------- N_samples = 10 mean = np.array([0., 0.]) cov = np.array([[1., 0.], [0., 1.]]) samples = np.random.multivariate_normal(mean, cov, N_samples) angle_1 = 0.49999*np.pi angle_2 = np.pi - angle_1 P_IS = ee.importanceSamplingEstimator(mean, cov, theta_star, F_star, P_star, samples, problem) P_mixed = ee.mixedImportanceSamplingEstimator(mean, cov, theta_star, F_star, P_star, samples, problem, angle_1, angle_2) P_SO = ee.secondOrderEstimator(mean, cov, l, theta_star, I_star, F_star, P_star, problem) np.savetxt('P_steady_IS.txt', P_IS) np.savetxt('P_steady_mixed.txt', P_mixed) np.savetxt('P_steady_SO.txt', P_SO) problem.reportTimers() # ---------------------------------------------- # # 3. Plots # # ---------------------------------------------- if n_params == 2: X = np.arange(0, 6, 0.2) Y = np.arange(0, 6, 0.25) Z1, Z2 = evaluate_responses(X, Y, problem, True) X, Y = np.meshgrid(X, Y) if myGlobalRank == 0: if printPlot: plt.figure() plt.semilogy(F_star, P_star, 'k*-') plt.semilogy(F_star, P_IS, 'b*-') plt.semilogy(F_star, P_mixed, 'r*--') plt.semilogy(F_star, P_SO, 'g*-') plt.savefig('extreme_steady.jpeg', dpi=800) plt.close() if n_params == 2: plt.figure() plt.plot(theta_star[:, 0], theta_star[:, 1], '*-') #plt.contour(X, Y, Z1, levels=I_star, colors='g') #plt.contour(X, Y, Z2, levels=F_star, colors='r') plt.savefig('theta_star.jpeg', dpi=800) plt.close() fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X, Y, Z1) plt.savefig('Z1.jpeg', dpi=800) plt.close() fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X, Y, Z2) plt.savefig('Z2.jpeg', dpi=800) plt.close()
[ 57 ]
def METHOD_NAME(self, video): title = video['title'] vimeo_id = self._search_regex( r'https?://player\.vimeo\.com/external/(\d+)', video['vimeoVideoURL'], 'vimeo id') uploader_id = video.get('hostID') return { '_type': 'url_transparent', 'id': vimeo_id, 'title': title, 'description': video.get('description'), 'url': smuggle_url( 'https://player.vimeo.com/video/' + vimeo_id, { 'http_headers': { 'Referer': 'https://storyfire.com/', } }), 'thumbnail': video.get('storyImage'), 'view_count': int_or_none(video.get('views')), 'like_count': int_or_none(video.get('likesCount')), 'comment_count': int_or_none(video.get('commentsCount')), 'duration': int_or_none(video.get('videoDuration')), 'timestamp': int_or_none(video.get('publishDate')), 'uploader': video.get('username'), 'uploader_id': uploader_id, 'uploader_url': format_field(uploader_id, None, 'https://storyfire.com/user/%s/video'), 'episode_number': int_or_none(video.get('episodeNumber') or video.get('episode_number')), }
[ 214, 1781 ]
def METHOD_NAME(self, raw, upload_type): # Check that the search matches some products ids = set(UPC_SET_REGEX.findall(raw)) # switch for included or excluded products if upload_type == RangeProductFileUpload.EXCLUDED_PRODUCTS_TYPE: products = self.product_range.excluded_products.all() action = _("excluded from this range") else: products = self.product_range.all_products() action = _("added to this range") existing_skus = set( products.values_list("stockrecords__partner_sku", flat=True) ) existing_upcs = set(products.values_list("upc", flat=True)) existing_ids = existing_skus.union(existing_upcs) new_ids = ids - existing_ids if len(new_ids) == 0: self.add_error( "query", _( "The products with SKUs or UPCs matching %(skus)s have " "already been %(action)s" ) % {"skus": ", ".join(ids), "action": action}, ) else: self.products = Product._default_manager.filter( Q(stockrecords__partner_sku__in=new_ids) | Q(upc__in=new_ids) ) if len(self.products) == 0: self.add_error( "query", _("No products exist with a SKU or UPC matching %s") % ", ".join(ids), ) found_skus = set( self.products.values_list("stockrecords__partner_sku", flat=True) ) found_upcs = set(self.products.values_list("upc", flat=True)) found_ids = found_skus.union(found_upcs) self.missing_skus = new_ids - found_ids self.duplicate_skus = existing_ids.intersection(ids)
[ 1356, 539, 41, 172, 44 ]
async def METHOD_NAME(self, field: str, value: str, limit: int = 100) -> StorageRecords: service = self._get_storage_service() return await service.METHOD_NAME(field, value, limit, sort=None)
[ 557, 604 ]
def METHOD_NAME(parser_class): from google.cloud.bigquery.magics.line_arg_parser import ParseError from google.cloud.bigquery.magics.line_arg_parser import TokenType from google.cloud.bigquery.magics.line_arg_parser.lexer import Token # A simple iterable of Tokens is sufficient. fake_lexer = [ Token(TokenType.UNKNOWN, lexeme="@!#", pos=2), Token(TokenType.EOL, lexeme="", pos=5), ] parser = parser_class(fake_lexer) with pytest.raises(ParseError, match=r"Unknown.*position 2.*@!#.*"): parser.destination_var()
[ 9, 3836, 486, 5453, 362 ]