text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(request, room_number): sections = Section.objects.filter(room=room_number).order_by("period") if not sections.exists(): raise Http404 context = {"room_number": room_number, "classes": sections} return render(request, "users/class_room.html", context)
[ 1446, 604, 6703, 1179 ]
def METHOD_NAME(self, track): assert self.__debugPrint("setTrack(track=%s)"%(track,)) if self.track is not None: self.track.pause() self.track = None if track is not None: track.start(self.stateTime) self.track = track
[ 0, 3068 ]
def METHOD_NAME(arg): # -> list[int]: ...
[ 245, 47, 5841 ]
def METHOD_NAME(self): # GIVEN peak_allocations = [ MockAllocationRecord( tid=1, address=0x1000000, size=1024, allocator=AllocatorType.MALLOC, stack_id=1, n_allocations=1, _stack=[], ), ] # WHEN table = TableReporter.from_snapshot( peak_allocations, memory_records=[], native_traces=False ) # THEN assert table.data == [ { "tid": "0x1", "size": 1024, "allocator": "malloc", "n_allocations": 1, "stack_trace": "???", } ]
[ 9, 35, 1501, 2576 ]
def METHOD_NAME(self): super().METHOD_NAME() self._add_defaults_build_sub_commands()
[ 238, 1618 ]
def METHOD_NAME(self) -> str: """ Resource name. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self): """Test that a description must be at least 10 characters long If description is less than 10 characters, an error should be raised """ expected_minimum_length = 10 minimum_description = 'x' * expected_minimum_length project_obj = self.data.unsaved_object project_obj.description = minimum_description[:-1] with self.assertRaises(ValidationError): project_obj.clean_fields() project_obj.description = minimum_description project_obj.clean_fields() project_obj.save() retrieved_obj = Project.objects.get(pk=project_obj.pk) self.assertEqual(minimum_description, retrieved_obj.description)
[ 9, 1067, 3353 ]
def METHOD_NAME(pregenerated_session_num: str) -> tuple: base = ph.get_pregen_session_folder() pcqs = np.load(Path(base) / f"session_{pregenerated_session_num}_ephys_pcqs.npy") len_block = np.load(Path(base) / f"session_{pregenerated_session_num}_ephys_len_blocks.npy") pos = pcqs[:, 0].tolist() cont = pcqs[:, 1].tolist() quies = pcqs[:, 2].tolist() phase = pcqs[:, 3].tolist() len_blocks = len_block.tolist() # If phase patch file exists load that one stim_phase_path = Path(base).joinpath(f"session_{pregenerated_session_num}_stim_phase.npy") if stim_phase_path.exists(): phase = np.load(stim_phase_path).tolist() assert len(pos) == len(cont) == len(quies) == len(phase) == sum(len_blocks) return pos, cont, quies, phase, len_blocks
[ 557, -1, 240, -1 ]
def METHOD_NAME(self) -> str: """ The name of the resource. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(): widget = widget_output.Output() # Try appending a Markdown object. widget.append_display_data(Markdown("# snakes!")) expected = ( { 'output_type': 'display_data', 'data': { 'text/plain': '<IPython.core.display.Markdown object>', 'text/markdown': '# snakes!' }, 'metadata': {} }, ) assert widget.outputs == expected, repr(widget.outputs) # Now try appending an Image. image_data = b"foobar" image_data_b64 = image_data if sys.version_info[0] < 3 else 'Zm9vYmFy\n' widget.append_display_data(Image(image_data, width=123, height=456)) expected += ( { 'output_type': 'display_data', 'data': { 'image/png': image_data_b64, 'text/plain': '<IPython.core.display.Image object>' }, 'metadata': { 'image/png': { 'width': 123, 'height': 456 } } }, ) assert widget.outputs == expected, repr(widget.outputs)
[ 9, 1459, 52, 365 ]
def METHOD_NAME(csa_str): """Read CSA header from string `csa_str` Parameters ---------- csa_str : str byte string containing CSA header information Returns ------- header : dict header information as dict, where `header` has fields (at least) ``type, n_tags, tags``. ``header['tags']`` is also a dictionary with one key, value pair for each tag in the header. """ csa_len = len(csa_str) csa_dict = {'tags': {}} hdr_id = csa_str[:4] up_str = Unpacker(csa_str, endian='<') if hdr_id == b'SV10': # CSA2 hdr_type = 2 up_str.ptr = 4 # omit the SV10 csa_dict['unused0'] = up_str.METHOD_NAME(4) else: # CSA1 hdr_type = 1 csa_dict['type'] = hdr_type csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I') if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS: raise CSAReadError( f'Number of tags `t` should be 0 < t <= {MAX_CSA_ITEMS}. ' f'Instead found {csa_dict["n_tags"]} tags.' ) for tag_no in range(csa_dict['n_tags']): name, vm, vr, syngodt, n_items, last3 = up_str.unpack('64si4s3i') vr = nt_str(vr) name = nt_str(name) tag = { 'n_items': n_items, 'vm': vm, # value multiplicity 'vr': vr, # value representation 'syngodt': syngodt, 'last3': last3, 'tag_no': tag_no, } if vm == 0: n_values = n_items else: n_values = vm # data converter converter = _CONVERTERS.get(vr) # CSA1 specific length modifier if tag_no == 1: tag0_n_items = n_items if n_items > MAX_CSA_ITEMS: raise CSAReadError(f'Expected <= {MAX_CSA_ITEMS} tags, got {n_items}') items = [] for item_no in range(n_items): x0, x1, x2, x3 = up_str.unpack('4i') ptr = up_str.ptr if hdr_type == 1: # CSA1 - odd length calculation item_len = x0 - tag0_n_items if item_len < 0 or (ptr + item_len) > csa_len: if item_no < vm: items.append('') break else: # CSA2 item_len = x1 if (ptr + item_len) > csa_len: raise CSAReadError('Item is too long, aborting read') if item_no >= n_values: assert item_len == 0 continue item = nt_str(up_str.METHOD_NAME(item_len)) if converter: # we may have fewer real items than are given in # n_items, but we don't know how many - assume that # we've reached the end when we hit an empty item if item_len == 0: n_values = item_no continue item = converter(item) items.append(item) # go to 4 byte boundary plus4 = item_len % 4 if plus4 != 0: up_str.ptr += 4 - plus4 tag['items'] = items csa_dict['tags'][name] = tag return csa_dict
[ 203 ]
def METHOD_NAME(self): """Check that the topic and tutorial are already there and retrieve them.""" # check topic if not self.topic.exists(): raise Exception("The topic %s does not exists. It should be created" % self.topic.name) self.topic.init_from_metadata() # initiate the tutorial self.tuto = Tutorial(training=self, topic=self.topic) self.tuto.init_from_existing_tutorial(self.kwds["tutorial_name"]) if "datatypes" in self.kwds: self.tuto.datatype_fp = self.kwds["datatypes"] if "workflow" in self.kwds: self.tuto.init_wf_fp = self.kwds["workflow"] if "workflow_id" in self.kwds: self.tuto.init_wf_id = self.kwds["workflow_id"]
[ 250, 39, 176, -1 ]
def METHOD_NAME(accounts, tester): addr = accounts[1] value = ["blahblah", addr, ["yesyesyes", "0x1234"]] tester.setTuple(value) with brownie.multicall: # the value hasn't been fetched so ret_value is just the proxy # but if we access ret_val again it will update # so use getattr_static to see it has yet to update ret_val = tester.getTuple(addr) assert inspect.getattr_static(ret_val, "__wrapped__") != value assert isinstance(ret_val, Proxy) assert ret_val.__wrapped__ == value
[ 9, 127, 279, 137, 2475, 280, 1929 ]
def METHOD_NAME(self): """ Repair all ships. Pages: in: PORT_CHECK out: PORT_CHECK """ self.ui_click(PORT_GOTO_DOCK, appear_button=PORT_CHECK, check_button=PORT_DOCK_CHECK, skip_first_screenshot=True) skip_first_screenshot = True repaired = False while 1: if skip_first_screenshot: skip_first_screenshot = False else: self.device.screenshot() # End if self.info_bar_count(): break if repaired and self.appear(PORT_DOCK_CHECK, offset=(20, 20)): break # PORT_DOCK_CHECK is button to repair all. if self.appear_then_click(PORT_DOCK_CHECK, offset=(20, 20), interval=2): continue if self.handle_popup_confirm('DOCK_REPAIR'): repaired = True continue self.ui_back(appear_button=PORT_DOCK_CHECK, check_button=PORT_CHECK, skip_first_screenshot=True)
[ 237, 5134, 4894 ]
def METHOD_NAME(n_cascade_levels, n_models=1): """ Generate skill starting at default_start_skill which decay exponentially with scale. """ start_skill = np.resize(default_start_skill, n_models) powers = np.arange(1, n_cascade_levels + 1) return pow(start_skill[:, np.newaxis], powers)
[ 567, 3110, 7260 ]
def METHOD_NAME(compiler): """Do platform-specific customizations of compilers on unix platforms.""" if compiler.compiler_type == "unix": (cc, cxx, cflags) = get_config_vars("CC", "CXX", "CFLAGS") if "CC" in os.environ: cc = os.environ["CC"] if "CXX" in os.environ: cxx = os.environ["CXX"] if "CFLAGS" in os.environ: cflags = cflags + " " + os.environ["CFLAGS"] cc_cmd = cc + " " + cflags # We update executables in compiler to take advantage of distutils arg splitting compiler.set_executables(compiler=cc_cmd, compiler_cxx=cxx)
[ 7822, 1436 ]
def METHOD_NAME(self) -> Dict[str, str]: return self._product_versions
[ 24, 553 ]
def METHOD_NAME(self, row, parent, align): """ Given a row of text, build table cells. """ tr = etree.SubElement(parent, 'tr') tag = 'td' if parent.tag == 'thead': tag = 'th' cells = self._split_row(row) # We use align here rather than cells to ensure every row # contains the same number of columns. for i, a in enumerate(align): c = etree.SubElement(tr, tag) try: c.text = cells[i].strip() except IndexError: # pragma: no cover c.text = "" if a: c.set('align', a)
[ 56, 843 ]
def METHOD_NAME(): # Inject a fake object in the leader-election monitoring logic # don't forget to update the [testenv] in tox.ini with the 'kube' dependency with mock.patch( 'datadog_checks.kube_scheduler.KubeSchedulerCheck._get_record', return_value=ElectionRecordAnnotation( "endpoints", '{"holderIdentity":"pod1","leaseDurationSeconds":15,"leaderTransitions":3,' + '"acquireTime":"2018-12-19T18:23:24Z","renewTime":"2019-01-02T16:30:07Z"}', ), ): yield
[ 248, 2919 ]
def METHOD_NAME(self): return re.sub(r"\[(.+?)\|(.+?)\]", r"\2", self.sentence).replace(" ", " ")
[ 13909, 3421 ]
def METHOD_NAME(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample["target"] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get("attn", None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample["target"] = tgt curr_prob = model.get_normalized_probs( bd, log_probs=len(models) == 1, sample=sample ).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs( curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt ) probs[idx:end] = tgt_probs.view(-1) idx = end sample["target"] = orig_target probs = probs.view(sample["target"].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if torch.is_tensor(attn): attn = attn.data else: attn = attn[0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = ( utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad) if sample["target"] is not None else None ) tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample["net_input"]["src_tokens"][i], sample["target"][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append( [ { "tokens": ref, "score": score_i, "attention": avg_attn_i, "alignment": alignment, "positional_scores": avg_probs_i, } ] ) return hypos
[ 567 ]
def METHOD_NAME(self): """ Errors added through the L{PyUnitResultAdapter} have the same traceback information as if there were no adapter at all, even if the Failure that held the information has been cleaned. """ try: 1 / 0 except ZeroDivisionError: exc_info = sys.exc_info() f = Failure() f.cleanFailure() pyresult = pyunit.TestResult() result = PyUnitResultAdapter(pyresult) result.addError(self, f) tback = "".join(traceback.format_exception(*exc_info)) self.assertEqual( pyresult.errors[0][1].endswith("ZeroDivisionError: division by zero\n"), tback.endswith("ZeroDivisionError: division by zero\n"), )
[ 9, 8890, 280, 1356, 374 ]
def METHOD_NAME(self) -> Any: self.input_file = open(self.table_lineage_file_location, 'r') lineage_event = (json.loads(line) for line in self.input_file) table_lineage = (TableLineage(table_key=lineage['input'], downstream_deps=[lineage['output']]) for lineage in self._extract_dataset_info(lineage_event)) self._iter = table_lineage
[ 557, 7227, 417 ]
def METHOD_NAME(os_window_id: int, tab_id: int, colors: Sequence[int], wg: WindowGroup, borders: bool = False) -> None: geometry = wg.geometry if geometry is None: return pl, pt = wg.effective_padding('left'), wg.effective_padding('top') pr, pb = wg.effective_padding('right'), wg.effective_padding('bottom') left = geometry.left - pl top = geometry.top - pt lr = geometry.right right = lr + pr bt = geometry.bottom bottom = bt + pb if borders: width = wg.effective_border() bt = bottom lr = right left -= width top -= width right += width bottom += width pl = pr = pb = pt = width horizontal_edge(os_window_id, tab_id, colors[1], pt, left, right, top) horizontal_edge(os_window_id, tab_id, colors[3], pb, left, right, bt) vertical_edge(os_window_id, tab_id, colors[0], pl, top, bottom, left) vertical_edge(os_window_id, tab_id, colors[2], pr, top, bottom, lr)
[ 1100, 491 ]
def METHOD_NAME(self): subdir = "msys64" # top-level directoy in tarball return os.path.join(self.source_folder, subdir)
[ -1, 1190 ]
f METHOD_NAME(self, batch_size):
[ 9, 476, 477 ]
def METHOD_NAME(sagemaker_session): estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2") assert estimator.py_version == "py2" estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2") assert estimator.py_version == "py2"
[ 9, 3952, 281, 137, 130, 2497 ]
def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(self) -> str: """ The url to get the next page of results, if any. """ return pulumi.get(self, "next_link")
[ 243, 548 ]
def METHOD_NAME(self): request = self.request_factory.post("/") request.user = self.user data = { "name": "vfrgthj", "time_recurrences": [ { "days": [1, 2, 3, 4, 5], "start": "00:00:00", "end": "16:00:00", } ], } serializer = TimeslotSerializer( data=data, context={"request": request}, ) self.assertTrue(serializer.is_valid(), serializer.errors)
[ 9, 10637, 1386, 137, 1205, 41, 668 ]
def METHOD_NAME(): return mock.MagicMock()
[ -1, 248 ]
def METHOD_NAME( type: str, name: str, crontab: str, owners: list[User], timezone: Optional[str] = None, sql: Optional[str] = None, description: Optional[str] = None, chart: Optional[Slice] = None, dashboard: Optional[Dashboard] = None, database: Optional[Database] = None, validator_type: Optional[str] = None, validator_config_json: Optional[str] = None, log_retention: Optional[int] = None, last_state: Optional[ReportState] = None, grace_period: Optional[int] = None, recipients: Optional[list[ReportRecipients]] = None, report_format: Optional[ReportDataFormat] = None, logs: Optional[list[ReportExecutionLog]] = None, extra: Optional[dict[Any, Any]] = None, force_screenshot: bool = False, ) -> ReportSchedule: owners = owners or [] recipients = recipients or [] logs = logs or [] last_state = last_state or ReportState.NOOP with override_user(owners[0]): report_schedule = ReportSchedule( type=type, name=name, crontab=crontab, timezone=timezone, sql=sql, description=description, chart=chart, dashboard=dashboard, database=database, owners=owners, validator_type=validator_type, validator_config_json=validator_config_json, log_retention=log_retention, grace_period=grace_period, recipients=recipients, logs=logs, last_state=last_state, report_format=report_format, extra=extra, force_screenshot=force_screenshot, ) db.session.add(report_schedule) db.session.commit() return report_schedule
[ 408, 339, 507 ]
def METHOD_NAME(function = None, section = "MANUAL"): """ Decorator for profiling the specific methods. It can be used in two ways: @profile def method(...): ... or @profile(section="SECTION"): def method(...): ... The second form is equivalent to the first with section = "MANUAL". Profiling section is a named set of methods which should be profiled. Supported values of section are listed in SverchokPreferences.profiling_sections. The @profile(section) decorator does profile the method only if all following conditions are met: * profiling for specified section is enabled in settings (profile_mode option), * profiling is currently active. """ def profiling_decorator(func): def wrapper(*args, **kwargs): if is_profiling_enabled(section): global _profile_nesting METHOD_NAME = get_global_profile() _profile_nesting += 1 if _profile_nesting == 1: METHOD_NAME.enable() result = func(*args, **kwargs) _profile_nesting -= 1 if _profile_nesting == 0: METHOD_NAME.disable() return result else: return func(*args, **kwargs) wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ return wrapper if callable(function): return profiling_decorator(function) else: return profiling_decorator
[ 337 ]
def METHOD_NAME(self, *args, **kwargs): if "data" in kwargs: kwargs["data"]["consultation"] = PatientConsultation.objects.get( external_id=self.kwargs["consultation_external_id"] ).id return super().METHOD_NAME(*args, **kwargs)
[ 19, 1386 ]
def METHOD_NAME(population_1, population_2, window_size, critical_quantile): """ Computes the equivalence test to show that mean 2 - window_size/2 < mean 1 < mean 2 + window_size/2 Inputs: - population_1, np.array 1D: an array of 1 dimension with the observations of 1st group - population_2, np.array 1D: an array of 1 dimension with the observations of 2nd group - threshold, float: small value that determines the maximal difference that can be between means - quantile, float: the level of significance, is the 1-q quantile of the distribution Output, bool: True if the null is rejected and False if there is not enough evidence """ population_2_up = population_2 + window_size / 2.0 population_2_low = population_2 - window_size / 2.0 if np.array_equal(population_1, population_2): p_value_low, p_value_up = 0.0, 0.0 else: p_value_up = stats.wilcoxon(population_1, population_2_up, alternative="less")[1] p_value_low = stats.wilcoxon(population_1, population_2_low, alternative="greater")[1] test_up = p_value_up < critical_quantile test_low = p_value_low < critical_quantile if test_low and test_up: print("null hypothesis rejected at a level of significance", critical_quantile) return True, max(p_value_low, p_value_up) else: print("not enough evidence to reject null hypothesis") return False, max(p_value_low, p_value_up)
[ 516, 12051 ]
def METHOD_NAME(self): result = self.integration.details(self.existing_bug_url) self.assertEqual("Hello GitHub", result["title"]) self.assertEqual( "This issue is used in automated tests that verify Kiwi TCMS - GitHub " "bug tracking integration!", result["description"], )
[ 9, 2051, 43, 1609, 274 ]
def METHOD_NAME( self, configuration: Optional[ExpectationConfiguration] ) -> None: """ Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that necessary configuration arguments have been provided for the validation of the expectation. Args: configuration (OPTIONAL[ExpectationConfiguration]): \ An optional Expectation Configuration entry that will be used to configure the expectation Returns: None. Raises InvalidExpectationConfigurationError if the config is not validated successfully """ super().METHOD_NAME(configuration) configuration = configuration or self.configuration # # Check other things in configuration.kwargs and raise Exceptions if needed # try: # assert ( # ... # ), "message" # assert ( # ... # ), "message" # except AssertionError as e: # raise InvalidExpectationConfigurationError(str(e)) return True
[ 187, 830 ]
def METHOD_NAME(df: pd.DataFrame, mappings: dict[str, dict[str, str]]): """ Clean partition columns. The values in `partition_cols` may have characters that are illegal in filenames. Strip them out and return a dataframe we can write into a parquet file. """ for col, val_map in mappings.items(): df[col] = df[col].map(val_map) return df
[ 1356, 2312, 1959 ]
def METHOD_NAME(self, tx: BaseTransaction, partial: bool = True) -> None: """Update 'deps' and 'needed' sub-indexes, removing them when necessary (i.e. validation is complete). Note: this method is idempotent. """ raise NotImplementedError
[ 238, 2543 ]
def METHOD_NAME(self, size=500): return self.compose(filters.METHOD_NAME(size))
[ 266 ]
def METHOD_NAME(event): """Show statusbar comments from menu selection.""" index = main_window.call(event.widget, "index", "active") if index == 0: statustext.set("More information about this program") elif index == 2: statustext.set("Terminate the program") else: statustext.set("This is the statusbar")
[ 0, 8443 ]
def METHOD_NAME(): try: resp = requests.get( "https://api.github.com/repos/Dummerle/Rare/releases/latest", timeout=2, ) tag = resp.json()["tag_name"] return tag except requests.exceptions.ConnectionError: return "0.0.0"
[ 19, 893, 281 ]
def METHOD_NAME( self, interface, pvc_factory, pod_factory, dc_pod_factory, bucket_factory, rgw_bucket_factory, ): """ Knip-678 Automated recovery from failed nodes Proactive case - IPI """ # Get OSD running nodes osd_running_nodes = get_osd_running_nodes() log.info(f"OSDs are running on nodes {osd_running_nodes}") # Label osd nodes with fedora app label_worker_node(osd_running_nodes, label_key="dc", label_value="fedora") # Create DC app pods log.info("Creating DC based app pods") interface = ( constants.CEPHBLOCKPOOL if interface == "rbd" else constants.CEPHFILESYSTEM ) dc_pod_obj = [] for i in range(2): dc_pod = dc_pod_factory(interface=interface, node_selector={"dc": "fedora"}) pod.run_io_in_bg(dc_pod, fedora_dc=True) dc_pod_obj.append(dc_pod) # Get app pods running nodes dc_pod_node_name = get_app_pod_running_nodes(dc_pod_obj) log.info(f"DC app pod running nodes are {dc_pod_node_name}") # Get both osd and app pod running node common_nodes = get_both_osd_and_app_pod_running_node( osd_running_nodes, dc_pod_node_name ) msg = "Common OSD and app running node(s) NOT found" assert len(common_nodes) > 0, msg log.info(f"Common OSD and app pod running nodes are {common_nodes}") # Get the machine name using the node name machine_name = machine.get_machine_from_node_name(common_nodes[0]) log.info(f"{common_nodes[0]} associated machine is {machine_name}") # Get the machineset name using machine name machineset_name = machine.get_machineset_from_machine_name(machine_name) log.info(f"{common_nodes[0]} associated machineset is {machineset_name}") # Add a new node and label it add_new_node_and_label_it(machineset_name) # Delete the machine machine.delete_machine(machine_name) log.info(f"Successfully deleted machine {machine_name}") # DC app pods on the failed node will get automatically created on # other running node. Waiting for all dc app pod to reach running # state pod.wait_for_dc_app_pods_to_reach_running_state(dc_pod_obj) log.info("All the dc pods reached running state") pod.wait_for_storage_pods() # Check basic cluster functionality by creating resources # (pools, storageclasses, PVCs, pods - both CephFS and RBD), # run IO and delete the resources self.sanity_helpers.create_resources( pvc_factory, pod_factory, bucket_factory, rgw_bucket_factory ) self.sanity_helpers.delete_resources() # Perform cluster and Ceph health checks self.sanity_helpers.health_check()
[ 9, 16661, 1300, 280, 1423, 480, 1213 ]
def METHOD_NAME(self, now): nowsecs = str(int(now))[:-1] now = self.now nonstandard_expectations = ( # These are standard but don't have predictable output ('%c', fixasctime(time.asctime(now)), 'near-asctime() format'), ('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), '%m/%d/%y %H:%M:%S'), ('%Z', '%s' % self.tz, 'time zone name'), # These are some platform specific extensions ('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'), ('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'), ('%h', calendar.month_abbr[now[1]], 'abbreviated month name'), ('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'), ('%n', '\n', 'newline character'), ('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm), '%I:%M:%S %p'), ('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'), ('%s', nowsecs, 'seconds since the Epoch in UCT'), ('%t', '\t', 'tab character'), ('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'), ('%3y', '%03d' % (now[0]%100), 'year without century rendered using fieldwidth'), ) for e in nonstandard_expectations: try: result = time.strftime(e[0], now) except ValueError as result: msg = "Error for nonstandard '%s' format (%s): %s" % \ (e[0], e[2], str(result)) if support.verbose: print(msg) continue if re.match(escapestr(e[1], self.ampm), result): if support.verbose: print("Supports nonstandard '%s' format (%s)" % (e[0], e[2])) elif not result or result[0] == '%': if support.verbose: print("Does not appear to support '%s' format (%s)" % \ (e[0], e[2])) else: if support.verbose: print("Conflict for nonstandard '%s' format (%s):" % \ (e[0], e[2])) print(" Expected %s, but got %s" % (e[1], result))
[ 16958 ]
def METHOD_NAME(): p = angr.Project(os.path.join(test_location, "x86_64", "all"), auto_load_libs=False, load_debug_info=True) cfg = p.analyses.CFG(data_references=True, normalize=True) main_func = cfg.kb.functions["main"] # convert function blocks to AIL blocks clinic = p.analyses.Clinic(main_func) # recover regions ri = p.analyses.RegionIdentifier(main_func, graph=clinic.graph) # structure it rs = p.analyses.RecursiveStructurer(ri.region) # simplify it s = p.analyses.RegionSimplifier(main_func, rs.result) codegen = p.analyses.StructuredCodeGenerator(main_func, s.result, cfg=cfg, ail_graph=clinic.graph) print(codegen.text)
[ 9, 53 ]
def METHOD_NAME(spec, state): slashed_indices = _setup_process_slashings_test(spec, state) yield from run_process_slashings(spec, state) for i in slashed_indices: assert state.balances[i] == 0
[ 9, 232, 15320 ]
def METHOD_NAME(self) -> str: """ Provisioning state of the resource. """ return pulumi.get(self, "provisioning_state")
[ 1994, 551 ]
def METHOD_NAME(self, model: BaseModel, dataset: Dataset, meta: pd.DataFrame): features = model.meta.feature_names or dataset.columns.drop(dataset.target, errors="ignore") df_with_meta = dataset.df.join(meta, how="right") column_types = dataset.column_types.copy() column_types[self.LOSS_COLUMN_NAME] = "numeric" dataset_with_meta = Dataset( df_with_meta, target=dataset.target, column_types=column_types, validation=False, ) # For performance dataset_with_meta.load_metadata_from_instance(dataset.column_meta) # Find slices sf = SliceFinder(numerical_slicer=self._numerical_slicer_method) sliced = sf.run(dataset_with_meta, features, target=self.LOSS_COLUMN_NAME) slices = sum(sliced.values(), start=[]) # Keep only slices of size at least 5% of the dataset or 20 samples (whatever is larger) slices = [s for s in slices if max(0.05 * len(dataset), 20) <= len(dataset_with_meta.slice(s))] return slices
[ 416, 920 ]
def METHOD_NAME(self, library) -> None: print_d(f"Reading playlist directory {self.pl_dir} (library: {library})") try: fns = os.listdir(self.pl_dir) except FileNotFoundError as e: print_w(f"No playlist dir found in {self.pl_dir!r}, creating. ({e})") os.mkdir(self.pl_dir) fns = [] # Populate this library by relying on existing signal passing. # Weird, but allows keeping the logic in one place failed = [] for fn in fns: full_path = os.path.join(self.pl_dir, fn) if os.path.isdir(full_path): continue if HIDDEN_RE.match(fsn2text(fn)): print_d(f"Ignoring hidden file {fn!r}") continue try: XSPFBackedPlaylist(self.pl_dir, fn, songs_lib=library, pl_lib=self) except TypeError as e: # Don't add to library - it's temporary legacy = FileBackedPlaylist(self.pl_dir, fn, songs_lib=library, pl_lib=None) if not len(legacy): try: size = os.stat(legacy._last_fn).st_size if size >= _MIN_NON_EMPTY_PL_BYTES: data = {"filename": fn, "size": size / 1024} print_w(_("No library songs found in legacy playlist " "%(filename)r (of size %(size).1f kB).") % data + " " + _("Have you changed library root dir(s), " "but not this playlist?")) continue except OSError: print_e(f"Problem reading {legacy._last_fn!r}") continue finally: failed.append(fn) print_w(f"Converting {fn!r} to XSPF format ({e})") XSPFBackedPlaylist.from_playlist(legacy, songs_lib=library, pl_lib=self) except EnvironmentError: print_w(f"Invalid Playlist {fn!r}") failed.append(fn) if failed: total = len(failed) print_e(ngettext("%d playlist failed to convert", "%d playlists failed to convert", total) % len(failed))
[ 203, 9828 ]
def METHOD_NAME(schema_uri): l = logging.getLogger(__name__) l.debug('Loading schema from %s', schema_uri) url_scheme = six.moves.urllib.parse.urlparse(schema_uri).scheme l.debug('Parsed URL: %s', schema_uri) scheme_dispatch = { 'file': load_json_schema_from_file, 'http': load_json_schema_from_web, 'https': load_json_schema_from_web } try: return scheme_dispatch[url_scheme](schema_uri) except KeyError: raise IrodsError('ERROR: Invalid schema url: {}'.format(schema_uri))
[ 557, 763, 135 ]
def METHOD_NAME(self, response: Dict[str, Any]) -> None: if not response.get('ok'): err = response['error'] if response.get('tb'): err += '\n' + response['tb'] self.print_on_fail = err self.quit_loop(1) return res = response.get('data') if res: self.cmd.bell()
[ 69, 13931, 1660, 17 ]
def METHOD_NAME(): # create tables for db in (db1, db2, db3, db4): Base.metadata.create_all(db)
[ 102 ]
def METHOD_NAME(self, task, entry, config=None): """ Search for name from torrentleech. """ request_headers = {'User-Agent': 'curl/7.54.0'} rss_key = config['rss_key'] # build the form request: data = {'username': config['username'], 'password': config['password']} # POST the login form: try: login = task.requests.post( 'https://www.torrentleech.org/user/account/login/', data=data, headers=request_headers, allow_redirects=True, ) except RequestException as e: raise PluginError('Could not connect to torrentleech: %s' % str(e)) if login.url.endswith('/user/account/login/'): raise PluginError('Could not login to torrentleech, faulty credentials?') if not isinstance(config, dict): config = {} # sort = SORT.get(config.get('sort_by', 'seeds')) # if config.get('sort_reverse'): # sort += 1 categories = config.get('category', 'all') # Make sure categories is a list if not isinstance(categories, list): categories = [categories] # If there are any text categories, turn them into their id number categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories] filter_url = '/categories/{}'.format(','.join(str(c) for c in categories)) entries = set() for search_string in entry.get('search_strings', [entry['title']]): query = normalize_unicode(search_string).replace(":", "") # urllib.quote will crash if the unicode string has non ascii characters, # so encode in utf-8 beforehand url = ( 'https://www.torrentleech.org/torrents/browse/list/query/' + quote(query.encode('utf-8')) + filter_url ) logger.debug('Using {} as torrentleech search url', url) results = task.requests.get(url, headers=request_headers, cookies=login.cookies).json() for torrent in results['torrentList']: entry = Entry() entry['download_headers'] = request_headers entry['title'] = torrent['name'] # construct download URL torrent_url = 'https://www.torrentleech.org/rss/download/{}/{}/{}'.format( torrent['fid'], rss_key, torrent['filename'] ) logger.debug('RSS-ified download link: {}', torrent_url) entry['url'] = torrent_url # seeders/leechers entry['torrent_seeds'] = torrent['seeders'] entry['torrent_leeches'] = torrent['leechers'] entry['torrent_availability'] = torrent_availability( entry['torrent_seeds'], entry['torrent_leeches'] ) entry['content_size'] = parse_filesize(str(torrent['size']) + ' b') entries.add(entry) return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))
[ 1070 ]
def METHOD_NAME(): from dateutil import rrule
[ 9, 512, 11086, 280 ]
f METHOD_NAME(self):
[ 9, 19, 1208, 210, 107, 6873 ]
def METHOD_NAME(): remote = {'url': 'https://s3.amazonaws.com/openneuro.org', 'uuid': '57894849-d0c8-4c62-8418-3627be18a196'} url = parse_rmet_line( remote, """1590213748.042921433s 57894849-d0c8-4c62-8418-3627be18a196:V +iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y#ds002778/dataset_description.json""") assert url == 'https://s3.amazonaws.com/openneuro.org/ds002778/dataset_description.json?versionId=iVcEk18e3J2WQys4zr_ANaTPfpUufW4Y'
[ 9, 214, 7673, 534, 3905 ]
def METHOD_NAME(self): # see also test_root_location_id_supplied location = SQLLocation.objects.get(domain=self.domain, name='Suffolk') location.is_archived = True location.save() exporter = LocationExporter( self.domain, is_archived=False, root_location_ids=[location.location_id], ) self.assertEqual(exporter.base_query.count(), 1) self.assertEqual(exporter.base_query[0].name, 'Boston') # Reset to previous state location.is_archived = False location.save()
[ 9, 1563, 708, 147, 8374, 923, 246 ]
def METHOD_NAME(): run_fdtd_2d(dace.dtypes.DeviceType.CPU)
[ 9, 2265 ]
def METHOD_NAME(self, *args, **kwargs): body = self.request.body self.request.jsoncall = True if body is not None: decoded = body.decode() if decoded: self.request.body = json.loads(decoded) result = method(self, *args, **kwargs) if result is not None: self.set_header('Content-Type', 'application/json; charset=UTF-8') self.write(json.dumps(result, default=json_handler)) else: self.set_header('Content-Type', 'text/plain; charset=UTF-8') self.set_status(204)
[ 291 ]
def METHOD_NAME( top_k: Optional[int] = None, top_p: Optional[float] = None, temperature: Optional[float] = None, ) -> LogitsProcessorList: processor_list = LogitsProcessorList() if temperature is not None and temperature != 1.0: processor_list.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: processor_list.append(TopKLogitsWarper(top_k)) if top_p is not None and top_p < 1.0: processor_list.append(TopPLogitsWarper(top_p)) return processor_list
[ 123, 9016, 2422 ]
def METHOD_NAME(): installed_rpms = [ RPM(name='sendmail', version='8.14.7', release='5.el7', epoch='0', packager='foo', arch='x86_64', pgpsig='bar'), RPM(name='vsftpd', version='3.0.2', release='25.el7', epoch='0', packager='foo', arch='x86_64', pgpsig='bar'), RPM(name='postfix', version='2.10.1', release='7.el7', epoch='0', packager='foo', arch='x86_64', pgpsig='bar')] installed_rpm_facts = InstalledRedHatSignedRPM(items=installed_rpms) res = vsftpdconfigread.is_processable(installed_rpm_facts) assert res is True
[ 9, 137, 14958, 9792, 1255 ]
def METHOD_NAME(): # ### commands auto generated by Alembic - please adjust! ### # op.drop_constraint(None, 'vulnerability', type_='foreignkey') op.drop_column('vulnerability', 'cvssv2_id') op.drop_column('vulnerability', 'cvssv3_id') op.drop_table('cvss_v3') op.drop_table('cvss_v2') op.drop_table('cvss_base') op.execute('drop type cvss_attack_complexity') op.execute('drop type cvss_access_vector') op.execute('drop type cvss_access_complexity') op.execute('drop type cvss_attack_vector') op.execute('drop type cvss_authentication') op.execute('drop type cvss_privileges_required') op.execute('drop type cvss_scope') op.execute('drop type cvss_user_interaction') op.execute('drop type cvss_impact_types_v2') op.execute('drop type cvss_impact_types_v3') # # ### end Alembic commands ###
[ 1502 ]
def METHOD_NAME(dirname): # TODO check source/plugins.rst output and give proper string return dirname
[ -1 ]
def METHOD_NAME(self) -> Iterable[Path]: return self.data_dir.joinpath("parquet").glob("*.parquet")
[ 9, 1537 ]
def METHOD_NAME(self) -> tuple[int, int]: """Return the range (from,to) of this pattern.""" return self.range_from, self.range_to
[ 19, 661 ]
def METHOD_NAME(): m = Model.model_validate(Model(a=10.2)) assert m.model_dump() == {'a': 10.2, 'b': 10}
[ 9, 578, 187, 6231 ]
def METHOD_NAME(hprinter: _win32typing.PyPrinterHANDLE, Form) -> None: ...
[ 238, 1029 ]
def METHOD_NAME(self): try: os.mkdir(test_support.TESTFN) except OSError: pass files = set() for name in self.files: name = os.path.join(test_support.TESTFN, self.norm(name)) with open(name, 'w') as f: f.write((name+'\n').encode("utf-8")) os.stat(name) files.add(name) self.files = files
[ 0, 1 ]
def METHOD_NAME(cls, v: str, values: dict[str, Any]) -> Any: """Build Postgres connection from environment variables.""" if isinstance(v, str): return v if not (user := values.get("FMTM_DB_USER")): raise ValueError("FMTM_DB_USER is not present in the environment") if not (password := values.get("FMTM_DB_PASSWORD")): raise ValueError("FMTM_DB_PASSWORD is not present in the environment") if not (host := values.get("FMTM_DB_HOST")): raise ValueError("FMTM_DB_HOST is not present in the environment") return PostgresDsn.build( scheme="postgresql", user=user, password=password, host=host, path=f"/{values.get('FMTM_DB_NAME') or ''}", )
[ 1893, 1267, 550 ]
def METHOD_NAME(self): return super().METHOD_NAME().filter(organisation=self.organisation)
[ 19, 2386 ]
async def METHOD_NAME( z: int, x: int, y: int, db: Connection = Depends(database.db), params: commons_params.Params = Depends(commons_params.params),
[ 4818, 13453 ]
def METHOD_NAME(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model"]) args = Args(sentencepiece_model=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n")
[ 231, 3138 ]
def METHOD_NAME(env, configuration_name): flavor = GetProjetDirName(env, configuration_name) build_dir = env.Dir("build").Dir(flavor) return env.SConscript( "firmware.scons", variant_dir=build_dir, duplicate=0, exports={ "ENV": env, "fw_build_meta": { "type": configuration_name, "flavor": flavor, "build_dir": build_dir, }, }, )
[ 129, 7412, 56, 465 ]
def METHOD_NAME(): data = "Rică nu știa să zică râu, rățușcă, rămurică." with tempfile.NamedTemporaryFile(mode="wb") as test_file: test_file.write(str.encode(data, "utf-16")) test_file.seek(0) with utils.open_guess_encoding(test_file.name) as fd: assert fd.read() == data
[ 9, 1452, 1363, 2300 ]
def METHOD_NAME(height, diameter): r""" Calculates volume and total surface of a hot water storage. .. calculate_storage_dimensions-equations: :math:`V = \pi \frac{d^2}{4} \cdot h` :math:`A = \pi d h + \pi \frac{d^2}{2}` Parameters ---------- height : numeric Height of the storage [m] diameter : numeric Diameter of the storage [m] Returns ------- volume : numeric Volume of storage surface : numeric Total surface of storage [m2] """ volume = 0.25 * np.pi * diameter ** 2 * height surface = np.pi * diameter * height + 0.5 * np.pi * diameter ** 2 return volume, surface
[ 1593, 948, 5164 ]
def METHOD_NAME(hamiltonian, eigenvalues, eigenstates): test_values, test_states = hamiltonian.eigenstates() eigenvalues = np.array(eigenvalues) eigenstates = np.array(eigenstates) test_order = np.argsort(test_values) test_vectors = [_canonicalise_eigenvector(test_states[i].full()) for i in test_order] expected_order = np.argsort(eigenvalues) expected_vectors = [_canonicalise_eigenvector(eigenstates[i]) for i in expected_order] np.testing.assert_allclose(test_values[test_order], eigenvalues[expected_order], atol=1e-10) for test, expected in zip(test_vectors, expected_vectors): np.testing.assert_allclose(test, expected, atol=1e-10)
[ 9, 3478, 15403 ]
def METHOD_NAME(self): bin_obj = self._bin_obj_generator() small_table = self.gen_data(10000, 50, 2) split_points = bin_obj.fit_split_points(small_table) iv_calculator = IvCalculator(adjustment_factor=0.5, role="guest", party_id=9999) ivs = iv_calculator.cal_local_iv(small_table, split_points) print(f"iv result: {ivs.summary()}")
[ 9, 2754, 8688 ]
def METHOD_NAME(): # check the influence of the seed when computing the hashes raw_X = [ ["foo", "bar", "baz", "foo".encode("ascii")], ["bar".encode("ascii"), "baz", "quux"], ] raw_X_ = (((f, 1) for f in x) for x in raw_X) indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False) raw_X_ = (((f, 1) for f in x) for x in raw_X) indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0) assert_array_equal(indices, indices_0) assert_array_equal(indptr, indptr_0) raw_X_ = (((f, 1) for f in x) for x in raw_X) indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1) with pytest.raises(AssertionError): assert_array_equal(indices, indices_1)
[ 9, 6476, 1053, 484 ]
def METHOD_NAME(shelve_type, g_s_b): """ Test if all parameter combinations from gain (g), slope (s), and bandwidth (b) yield the correct values. Two out of the three parameters must be given. """ # test with missing third parameter g_s_b_test = _shelving_cascade_slope_parameters( g_s_b[0], g_s_b[1], None, shelve_type) npt.assert_equal(g_s_b_test, g_s_b) # test with missing second parameter g_s_b_test = _shelving_cascade_slope_parameters( g_s_b[0], None, g_s_b[2], shelve_type) npt.assert_equal(g_s_b_test, g_s_b) # test with missing first parameter g_s_b_test = _shelving_cascade_slope_parameters( None, g_s_b[1], g_s_b[2], shelve_type) npt.assert_equal(g_s_b_test, g_s_b)
[ 9, 15533, 3285, 95, 386 ]
def METHOD_NAME(self, client, host): with client.renders_template("generic_form.html"): client.assert_url_ok(url_for("host.interface_create", host_id=host.id))
[ 9, 129, 1090, 19 ]
def METHOD_NAME(self, response: HTTPResponse, request: Request) -> Sequence[Cookie]: ...
[ 93, 880 ]
def METHOD_NAME(self): """ Remove all items from this OrderedSet. """ del self.items[:] self.map.METHOD_NAME()
[ 537 ]
def METHOD_NAME(self): "Test updating recipe through auth" changed = auth.create_analysis(project=self.project, json_text=self.recipe.json_text, template=self.recipe.template, uid=self.recipe.uid, update=True) self.assertEqual(changed.uid, self.recipe.uid)
[ 9, 3912, 86 ]
def METHOD_NAME(response): """ handler work_weixin response and errcode """ try: response = response.json() except ValueError: logger.error(response) return None errcode = response.get('errcode', None) if errcode != 0: logger.error(json.dumps(response)) return None return response
[ 1519, 3160, 498, 58, 17 ]
def METHOD_NAME(ds): feed_dict = {} feed_dict["batch_nodes"] = np.array(ds.nodes_idx, dtype="int64") feed_dict["labels"] = np.array(ds.labels, dtype="int64") def r(): yield feed_dict return r
[ 324, 2277, -1 ]
def METHOD_NAME(): binary_tteesstt("thicken", centrosome.cpmorphology.thicken)
[ 9, 808, -1 ]
def METHOD_NAME(self): resize = Resize(spatial_size=(128, 64), mode="bilinear") set_track_meta(False) for p in TEST_NDARRAYS_ALL: im = p(self.imt[0]) result = resize(im) assert_allclose(im, result, type_test=False) set_track_meta(True)
[ 9, 16676 ]
def METHOD_NAME(): """Get kwargs for the analyzer. Returns: Info to connect to MISP. """ misp_url = current_app.config.get("MISP_URL") misp_api_key = current_app.config.get("MISP_API_KEY") if not misp_api_key or not misp_url: logger.error("MISP conf not found") return [] matcher_kwargs = [{"misp_url": misp_url, "misp_api_key": misp_api_key}] return matcher_kwargs
[ 19, 1475 ]
def METHOD_NAME(self): self.pre_operations() yield self.VirtualMachineScaleSetRollingUpgradesCancel(ctx=self.ctx)() self.post_operations()
[ 750, 710 ]
def METHOD_NAME(self, path, md5, size): self.records.append(ManifestRecordFile(path, md5, size))
[ 238, 171 ]
def METHOD_NAME(test, params, env): """ Test for adding controller for usb. """ # get the params from params vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) index = params.get("index", "1") index_conflict = "yes" == params.get("index_conflict", "no") index_multiple = index == "multiple" model = params.get("model", "nec-xhci") status_error = "yes" == params.get("status_error", "no") vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() controllers = vm_xml.get_devices(device_type="controller") devices = vm_xml.get_devices() for dev in controllers: if dev.type == "usb": devices.remove(dev) # Test add multiple controllers in disorder. if index_multiple: # Remove input devices dependent on usb controller. inputs = vm_xml.get_devices(device_type="input") for input_device in inputs: if input_device.type_name == "tablet": vm_xml.del_device(input_device) model = 'ich9-hci' # Initialize one usb controller list. controller_list = [('0', 'ich9-uhci2'), ('2', 'ich9-uhci2'), ('0', 'ich9-uhci3'), ('1', 'ich9-uhci1'), ('2', 'ich9-uhci3'), ('0', 'ich9-ehci1'), ('2', 'ich9-ehci1'), ('1', 'ich9-uhci3'), ('1', 'ich9-uhci2'), ('1', 'ich9-ehci1'), ('2', 'ich9-uhci1'), ('0', 'ich9-uhci1')] # Add multiple usb controllers in random order. for usb_tuple in controller_list: controller = Controller("controller") controller.type = "usb" controller.index = usb_tuple[0] controller.model = usb_tuple[1] devices.append(controller) else: controller = Controller("controller") controller.type = "usb" controller.index = index controller.model = model devices.append(controller) if index_conflict: controller_1 = Controller("controller") controller_1.type = "usb" controller_1.index = index devices.append(controller) vm_xml.set_devices(devices) try: try: vm_xml.sync() vm.start() # Validate multiple usb controllers result, disorder controllers will be organized by index group. if index_multiple: validate_multiple_controller(test, vm_name) if status_error: test.fail("Add controller successfully in negative case.") else: try: session = vm.wait_for_login() except (LoginError, ShellError) as e: error_msg = "Test failed in positive case.\n error: %s\n" % e test.fail(error_msg) cmd = "dmesg -c | grep %s" % model.split('-')[-1] stat_dmesg = session.cmd_status(cmd) if stat_dmesg != 0: test.cancel("Fail to run dmesg in guest") session.close() except (LibvirtXMLError, VMStartError) as e: if not status_error: test.fail("Add controller failed. Detail: %s" % e) finally: vm_xml_backup.sync()
[ 22 ]
def METHOD_NAME(): raise m.MyException("nested error")
[ 1471, -1 ]
def METHOD_NAME(cls): # import source parameters type, connection, query, path are always str return str
[ 19, 616, 1461, 119 ]
def METHOD_NAME(get_configuration): """ Restart Authd. """ truncate_file(LOG_FILE_PATH) control_service("restart", daemon=DAEMON_NAME)
[ 1141, 11575 ]
def METHOD_NAME(self, config): self.config = config self.config.readSubscriptionConf() if self.config.is_subscribe_done: try: from mmc.plugins.dashboard.manager import DashboardManager from mmc.plugins.base.panel import SupportPanel DM = DashboardManager() DM.register_panel(SupportPanel("support")) except ImportError: pass
[ 176 ]
async def METHOD_NAME(self) -> None: await self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(cls, criteria_cls_name) -> "ComparisonCriteria": """`criteria_cls_name` must be a valid criteria class name.""" return locate(f"framework.stats.criteria.{criteria_cls_name}")
[ 19 ]
def METHOD_NAME(self): """Ensure that creating an instance of a deprecated class emits a warning.""" with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") _ = self.DeprecatedClass() # Instantiate the deprecated class self.assertEqual(len(w), 1)
[ 9, 2, 3527, 3437 ]
def METHOD_NAME(self): urls = { build_operation_url("https://app.stage.neptune.ai", "api/leaderboard/v1/attributes/download"), build_operation_url( "https://app.stage.neptune.ai", "/api/leaderboard/v1/attributes/download", ), build_operation_url( "https://app.stage.neptune.ai/", "api/leaderboard/v1/attributes/download", ), build_operation_url( "https://app.stage.neptune.ai/", "/api/leaderboard/v1/attributes/download", ), build_operation_url("app.stage.neptune.ai", "api/leaderboard/v1/attributes/download"), build_operation_url("app.stage.neptune.ai", "/api/leaderboard/v1/attributes/download"), build_operation_url("app.stage.neptune.ai/", "api/leaderboard/v1/attributes/download"), build_operation_url("app.stage.neptune.ai/", "/api/leaderboard/v1/attributes/download"), } self.assertEqual( {"https://app.stage.neptune.ai/api/leaderboard/v1/attributes/download"}, urls, )
[ 9, 4218, 2206, 274 ]
def METHOD_NAME(self, signal_name, callback): """ Connect a responder to a specific signal. Args: signal_name (str): The name of the signal to respond to callback (callable): The callable that is called when the signal is queried """ signal_responders = self.responders.setdefault(signal_name, []) if callback not in signal_responders: signal_responders.append(callback)
[ 238, 11677 ]