text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, value): value = normalizers.normalizeTransformationMatrix(value) self._set_transformation(value)
[ 0, 414, 1224 ]
async def METHOD_NAME(self): request_context = RequestContext.test_context() request_context.message_receipt = MessageReceipt() request_context.connection_record = async_mock.MagicMock() with async_mock.patch.object( test_module, "CredentialManager", autospec=True ) as mock_cred_mgr: mock_cred_mgr.return_value.receive_credential = async_mock.CoroutineMock() request_context.message = CredentialIssue() request_context.connection_ready = False handler = test_module.CredentialIssueHandler() responder = MockResponder() with self.assertRaises(test_module.HandlerException) as err: await handler.handle(request_context, responder) assert err.exception.message == "Connection used for credential not ready" assert not responder.messages
[ 9, 259, 130, 1338 ]
def METHOD_NAME(self, *, stream): stream._finalized = True assert stream.group_by(Mock()) is stream
[ 9, 846, 604, 10148 ]
f METHOD_NAME(self):
[ 9, 434, 1499, 6638, 489, 724, 920 ]
def METHOD_NAME(self): optprobes_file = "/proc/sys/debug/kprobes-optimization" if not self.check_kernel_support(): self.log.info("No support available for optprobes, skipping optprobes test") return if not os.path.exists(optprobes_file): self.log.info("optprobes control file %s missing, skipping optprobes test", optprobes_file) return cur_val = genio.read_one_line(optprobes_file) genio.write_one_line(optprobes_file, "0") self.log.info("================= Disabling optprobes ==================") if "0" not in genio.read_one_line(optprobes_file): self.fail("Not able to disable optprobes") self.execute_test() self.log.info("================= Restoring optprobes ==================") genio.write_one_line(optprobes_file, cur_val) if cur_val not in genio.read_one_line(optprobes_file): self.fail("Not able to restore optprobes to %s", cur_val)
[ -1, 193, 9 ]
def METHOD_NAME(): matrix = [[[1, 2, 3], [7, 8, 9], [4, 5, 6]]] trans = linear_transforms(dims=['x'], values=matrix) assert trans.unit == 'one'
[ 9, 1783, 9652, 235, 805, 137, 12035 ]
def METHOD_NAME(self): """ Test 'force_publish' command with invalid course key """ errstring = "Invalid course key." with self.assertRaisesRegex(CommandError, errstring): call_command('force_publish', 'TestX/TS01')
[ 9, 532, 1122, 59 ]
def METHOD_NAME(): v1 = memref.Alloc.get(f32, shape=[2]).memref riscv.CustomAssemblyInstructionOp("do_stuff_with_alloc", (v1,), ())
[ 53, 5262 ]
def METHOD_NAME(self, format_json): return METHOD_NAME(format_json)
[ 19, 275, 2718 ]
def METHOD_NAME(params): buffer = self.buffer nonzero_buffer = False if len(params) > 1: offset = 0 for p in params: sz = p.numel() if p.grad is not None: buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) nonzero_buffer = True else: buffer[offset : offset + sz].zero_() offset += sz else: # we only have a single grad to all-reduce p = params[0] if p.grad is not None: buffer = p.grad.data nonzero_buffer = True elif p.numel() <= self.buffer.numel(): buffer = buffer[: p.numel()] buffer.zero_() else: buffer = torch.zeros_like(p) if nonzero_buffer: buffer.div_(self.world_size) utils.all_reduce(buffer, self.process_group) # copy all-reduced grads back into their original place offset = 0 for p in params: sz = p.numel() if p.grad is not None: p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) else: p.grad = buffer[offset : offset + sz].view_as(p).clone() offset += sz
[ 75, 332, 434 ]
def METHOD_NAME(): pass
[ 717 ]
def METHOD_NAME(self, data): mimetype = 'application/json' name = data.get('name') if name is None: raise ProcessorExecuteError('Cannot process without a name') message = data.get('message', '') value = f'Hello {name}! {message}'.strip() outputs = { 'id': 'echo', 'value': value } return mimetype, outputs
[ 750 ]
def METHOD_NAME(): return CommandHandler('time', declare_time)
[ 5995, 104, 475, 182 ]
def METHOD_NAME(self, input, errors="strict"): return self._map(codecs.METHOD_NAME, str, UnicodeDecodeError, input, errors)
[ 1268 ]
async def METHOD_NAME(app): app['bgtask_dummy_controls_msg'] = asyncio.create_task(dummy_controls_msg(app))
[ 447, 2272, 620 ]
def METHOD_NAME(ping): return ( numpy.array(ping.data, dtype=numpy.float64) .reshape((ping.samples, ping.channels)) .transpose() )
[ 1677, 24, 700 ]
def METHOD_NAME(self, event_type: str, fl_ctx: FLContext): if event_type == EventType.START_RUN: self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx) elif event_type == EventType.END_RUN: self.tenseal_context = None
[ 276, 417 ]
def METHOD_NAME(test_client): rv = test_client.get(f'/admin/re-do_analysis/{test_fw_a.uid}') assert ( b'<input type="hidden" name="file_name" id="file_name" value="' + test_fw_a.file_name.encode() + b'">' in rv.data ), 'file name not set in re-do page'
[ 2647, 74, 689, 19 ]
def METHOD_NAME(self): return self._last_triggered_step
[ 679, 6819, 367 ]
def METHOD_NAME(self): resp = self.client.post(reverse("competitions:submission_delete", kwargs={"pk": self.submission_1.pk})) self.assertEqual(resp.status_code, 302)
[ 9, 34, 1179, 610, 1612, 217, 130 ]
def METHOD_NAME(self): scheme = self.scheme if scheme == 'https': url = 'https://%s:%s/xmlrpc/' % (self.interface(), self.PORT) proxy = ServerProxy(url, transport=HTTPSTransport()) else: url = 'http://%s:%s/xmlrpc/' % (self.interface(), self.PORT) proxy = ServerProxy(url) # begin the tests ... self.getPage('/xmlrpc/foo') self.assertBody('Hello world!') self.assertEqual(proxy.return_single_item_list(), [42]) self.assertNotEqual(proxy.return_single_item_list(), 'one bazillion') self.assertEqual(proxy.return_string(), 'here is a string') self.assertEqual(proxy.return_tuple(), list(('here', 'is', 1, 'tuple'))) self.assertEqual(proxy.return_dict(), {'a': 1, 'c': 3, 'b': 2}) self.assertEqual(proxy.return_composite(), [{'a': 1, 'z': 26}, 'hi', ['welcome', 'friend']]) self.assertEqual(proxy.return_int(), 42) self.assertEqual(proxy.return_float(), 3.14) self.assertEqual(proxy.return_datetime(), DateTime((2003, 10, 7, 8, 1, 0, 1, 280, -1))) self.assertEqual(proxy.return_boolean(), True) self.assertEqual(proxy.test_argument_passing(22), 22 * 2) # Test an error in the page handler (should raise an xmlrpclib.Fault) try: proxy.test_argument_passing({}) except Exception: x = sys.exc_info()[1] self.assertEqual(x.__class__, Fault) self.assertEqual(x.faultString, ('unsupported operand type(s) ' "for *: 'dict' and 'int'")) else: self.fail('Expected xmlrpclib.Fault') # https://github.com/cherrypy/cherrypy/issues/533 # if a method is not found, an xmlrpclib.Fault should be raised try: proxy.non_method() except Exception: x = sys.exc_info()[1] self.assertEqual(x.__class__, Fault) self.assertEqual(x.faultString, 'method "non_method" is not supported') else: self.fail('Expected xmlrpclib.Fault') # Test returning a Fault from the page handler. try: proxy.test_returning_Fault() except Exception: x = sys.exc_info()[1] self.assertEqual(x.__class__, Fault) self.assertEqual(x.faultString, ('custom Fault response')) else: self.fail('Expected xmlrpclib.Fault')
[ 9, 399, 1064 ]
def METHOD_NAME( self, ) -> ModbusPDUWriteMultipleHoldingRegistersResponse: modbus_pdu_write_multiple_holding_registers_response: ModbusPDUWriteMultipleHoldingRegistersResponse = ModbusPDUWriteMultipleHoldingRegistersResponse( self.starting_address, self.quantity ) return modbus_pdu_write_multiple_holding_registers_response
[ 56 ]
def METHOD_NAME(): mock_data = { "userID": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "itemID": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "rating": [2.0, 4.0, 1.0, 4.0, 1.0, 2.0, 5.0, 1.0, 1.0, 2.0], "genre": [ "Action|Comedy", "Drama", "Drama|Romance|War", "Drama|Sci-Fi", "Horror", "Action|Horror|Sci-Fi|Thriller", "Drama|Romance|War", "Western", "Comedy", "Horror", ], "occupation": [ "engineer", "student", "retired", "administrator", "writer", "administrator", "student", "executive", "student", "other", ], } return pd.DataFrame(mock_data)
[ 2057 ]
def METHOD_NAME(self): return self._validation_support
[ 19, 437, 167 ]
def METHOD_NAME(video_prop, fps, score_thread, iou_thread, \ nms_id=5, nms_thread=0.01, nms_delta=10, backgroundid=0): """process_video_classify""" prop_filter = [] for item in video_prop: if item[2] == backgroundid: continue prop_filter.append(item) # prop_filter = sorted(prop_filter, key=lambda x: x[nms_id], reverse=True) prop_filter = base_nms(prop_filter, nms_thread, nms_delta, nms_id) prop_filter = sorted(prop_filter, key=lambda x: x[0]) video_results = [] for item in prop_filter: start_sec = item[0] / fps end_sec = item[1] / fps start_id_frame = item[0] end_id_frame = item[1] # start_time = "%02d:%02d:%02d" % ((start_id_frame / fps) / 3600, \ # ((start_id_frame / fps) % 3600) / 60, (start_id_frame / fps) % 60) # end_time = "%02d:%02d:%02d" % ((end_id_frame / fps) / 3600, \ # ((end_id_frame / fps) % 3600) / 60, (end_id_frame / fps) % 60) start_time = int(start_id_frame / fps) end_time = int(end_id_frame / fps) label_id = item[2] label_name = item[3] label_classify_score = item[4] label_iou_score = item[5] if label_classify_score > score_thread and label_iou_score > iou_thread: video_results.append({"start_time": start_time, "end_time": end_time, "label_id": label_id, "label_name": label_name, "classify_score": label_classify_score, "iou_score": label_iou_score}) return video_results
[ 356, 1781, 6144 ]
def METHOD_NAME(country_match_scope, country_match_country): country_match = {"match": {"{}_country_code".format(country_match_scope): country_match_country}} return country_match
[ 56, 1078, 590 ]
def METHOD_NAME(self): parameters = { **self.serialize_query_param( "api-version", "2016-03-01", required=True, ), } return parameters
[ 539, 386 ]
def METHOD_NAME(self): # Gets the schema.json file from the modeling rule folder schema_files = list( Path(self.file_path).parent.glob( "*_[sS][cC][hH][eE][mM][aA].[jJ][sS][oO][nN]" ) ) has_schema = len(schema_files) > 0 if not has_schema: error_message, error_code = Errors.modeling_rule_missing_schema_file( self.file_path ) if self.handle_error(error_message, error_code, file_path=self.file_path): self._is_valid = False return has_schema return has_schema
[ 137, 135, 171, 954 ]
def METHOD_NAME(self, other): return len(self.intersection(other)) == 0
[ 9390 ]
def METHOD_NAME(error): log.error(error.exception) return repr(error.exception)
[ 168, 1519, 4616 ]
f METHOD_NAME(self, message_payload: str) -> Any:
[ 2411, 277 ]
def METHOD_NAME(self, new_value): raise ShellClassError("Must implement set_value in {0}".format(self.__class__.__name__))
[ 0, 99 ]
def METHOD_NAME(self): package_ids = {} h = rhnSQL.prepare(self._query_get_packages) for channel_id in list(self._channels_hash.values()): h.execute(channel_id=channel_id) while 1: row = h.fetchone_dict() if not row: break package_id = row['package_id'] package_ids[package_id] = (row['path'], row['header_start'], row['header_end']) self._channel_packages = {} orphaned_packages = {} # Now, for each package, get the channels it's part of h = rhnSQL.prepare(self._query_get_channel_packages) for package_id in package_ids: h.execute(package_id=package_id) while 1: row = h.fetchone_dict() if not row: break channel_label = row['label'] if package_id in self._channel_packages: l = self._channel_packages[package_id] else: l = self._channel_packages[package_id] = [] l.append(channel_label) if channel_label not in self._channels_hash: orphaned_packages[package_id] = None if orphaned_packages: print("Bailing out because of packages shared with other channels") for package_id in orphaned_packages: channels = self._channel_packages[package_id] print((package_id, channels)) return None return package_ids
[ 19, 2975 ]
def METHOD_NAME() -> int: return getProcessLogonSessionId(winKernel.GetCurrentProcess())
[ 19, 1056, 356, 15702, 240, 147 ]
def METHOD_NAME(path): import stat st = os.stat(path) os.chmod(path, st.st_mode | stat.S_IEXEC)
[ 93, 2777 ]
def METHOD_NAME(file_path): snippets = {} with open(file_path) as f: lines = iter(f) for line in lines: if _is_start(line): break else: raise ParseError() targets = [] code_lines = [] for line in lines: target_match = _match_target(line) if target_match: if code_lines: code = ''.join(code_lines).rstrip() for target in targets: snippets[target] = code del code_lines[:], targets[:] targets.append(target_match.group(1)) elif _ignored(line): pass elif _is_end(line): break else: code_lines.append(line) else: if not snippets: raise ParseError() return snippets
[ 214, -1 ]
def METHOD_NAME(text, tag, slot): i = 0 while text[i] in "><=~": i += 1 op = text[:i] text = text[i:] range = ops[op] slot = f' slot="{slot}"' if slot else "" return f'<{tag} range="{range}"{slot}>{text}</{tag}>'
[ 197, 661 ]
def METHOD_NAME(self): copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) cmake = CMake(self) cmake.install() rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
[ 360 ]
def METHOD_NAME(tmp_path): tmpfile = os.path.join(str(tmp_path), "compressed.asdf") zlib_data = np.array(list(range(1000))) bzp2_data = np.array(list(range(1000))) tree = {"zlib_data": zlib_data, "bzp2_data": bzp2_data} with asdf.AsdfFile(tree) as af_out: af_out.set_array_compression(zlib_data, "zlib", level=1) af_out.set_array_compression(bzp2_data, "bzp2", compresslevel=9000) with pytest.raises(ValueError, match=r"compresslevel must be between 1 and 9"): af_out.write_to(tmpfile) af_out.set_array_compression(bzp2_data, "bzp2", compresslevel=9) af_out.write_to(tmpfile) assert af_out.get_array_compression_kwargs(bzp2_data)["compresslevel"] == 9 with asdf.open(tmpfile) as af_in: assert af_in.get_array_compression(af_in.tree["zlib_data"]) == "zlib" assert af_in.get_array_compression(af_in.tree["bzp2_data"]) == "bzp2"
[ 9, 0, 877, 4483 ]
def METHOD_NAME(self): # Given feature_segment = FeatureSegment.objects.create( feature=self.remote_config, segment=self.segment, environment=self.environment, priority=1, ) new_env = Environment.objects.create( name="Test environment New", project=self.project ) # When feature_segment_clone = feature_segment.clone(new_env) # Then assert feature_segment_clone.id != feature_segment.id assert feature_segment_clone.priority == feature_segment.priority assert feature_segment_clone.environment.id == new_env.id
[ 9, 670, 576, 385, 80, 279 ]
def METHOD_NAME(modules): """Give the pure yaml dictionaries.""" # this is the pure yaml config cfg = read_from_yaml(modules.DEFAULT_CONFIG) return cfg, modules
[ 298, 406, 5260 ]
def METHOD_NAME( self, prev_output_tokens, src_lengths=None, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): features = self.extract_features(prev_output_tokens, incremental_state) lm_logits = self.model.lm_head(features) return (lm_logits,)
[ 76 ]
def METHOD_NAME(self): registration_admin = RegistrationAdmin(Registration, self.site) request = self.factory.get("/fake-url/") request.user = self.admin self.assertEqual(["id"], registration_admin.get_readonly_fields(request)) self.assertEqual( ["id", "event"], registration_admin.get_readonly_fields(request, self.registration), )
[ 9, 2776, 342 ]
async def METHOD_NAME(self, event): await self.send(text_data=json.dumps({"update": True}))
[ 2831, 3457 ]
def METHOD_NAME(self): opname = dis.opname opmap = dis.opmap self.assertEqual(opname[opmap["LOAD_FAST"]], "LOAD_FAST")
[ 9, 16585 ]
def METHOD_NAME(handle, sfp_module): port_attributes_list = new_sx_port_attributes_t_arr(SX_PORT_ATTR_ARR_SIZE) port_cnt_p = new_uint32_t_p() uint32_t_p_assign(port_cnt_p, SX_PORT_ATTR_ARR_SIZE) rc = sx_api_port_device_get(handle, DEVICE_ID, SWITCH_ID, port_attributes_list, port_cnt_p) assert rc == SX_STATUS_SUCCESS, "sx_api_port_device_get failed, rc = %d" % rc port_cnt = uint32_t_p_value(port_cnt_p) log_port_list = [] for i in range(0, port_cnt): port_attributes = sx_port_attributes_t_arr_getitem(port_attributes_list, i) if not is_nve(int(port_attributes.log_port)) \ and not is_cpu(int(port_attributes.log_port)) \ and port_attributes.port_mapping.module_port == sfp_module \ and is_port_admin_status_up(port_attributes.log_port): log_port_list.append(port_attributes.log_port) return log_port_list
[ 19, 390, 907 ]
def METHOD_NAME(cls, PathStr): if TAB_WORKSPACE in PathStr: PathList = PathStr.split() if PathList: for i, str in enumerate(PathList): MacroStartPos = str.find(TAB_WORKSPACE) if MacroStartPos != -1: Substr = str[MacroStartPos:] Path = Substr.replace(TAB_WORKSPACE, cls.WORKSPACE).strip() if not os.path.exists(Path): for Pkg in cls.PACKAGES_PATH: Path = Substr.replace(TAB_WORKSPACE, Pkg).strip() if os.path.exists(Path): break PathList[i] = str[0:MacroStartPos] + Path PathStr = ' '.join(PathList) return PathStr
[ 276, 368, 1307 ]
def METHOD_NAME(self): hmap = HoloMap({0: Image(np.random.rand(10,10))}).options(xaxis=None) opts = Store.lookup_options('matplotlib', hmap.last, 'plot') self.assertIs(opts.kwargs['xaxis'], None)
[ 9, 8085, 1881 ]
def METHOD_NAME(self, option: str) -> bool: """Check if `option` is present.""" return self._get_index(option) is not None
[ 1992 ]
def METHOD_NAME(): assert utils.get_attr_chain(A(1), "") is None
[ 9, 19, 35, 864 ]
def METHOD_NAME(scene): a = Annulus() scene.add(a)
[ 9, 12262 ]
async def METHOD_NAME( coresys: CoreSys, addonsdata_system: dict[str, Data], capture_exception: Mock, os_environ, ): """Test error adding host when addon is run.""" await coresys.dbus.timedate.connect(coresys.dbus.bus) docker_addon = get_docker_addon( coresys, addonsdata_system, "basic-addon-config.json" ) with patch.object(DockerAddon, "stop"), patch.object( AddonOptions, "validate", new=PropertyMock(return_value=lambda _: None) ), patch.object(PluginDns, "add_host", side_effect=(err := CoreDNSError())): await docker_addon.run() capture_exception.assert_called_once_with(err)
[ 9, 1555, 22, 238, 1806, 168 ]
def METHOD_NAME(sender, status, subject): if status == STATUS_STARTED: status = "RUNNING" else: status = status.upper() _save_status(status, subject) _cleanup(subject.app_name, subject.namespace)
[ 276, 900 ]
def METHOD_NAME(self, clusterName, nodeName, timeStamp, params): """ Send multiple monitored parameters to MonALISA. - clusterName is the name of the cluster being monitored. The first time this function is called, this paramenter must not be None. Then, it can be None; last given clusterName will be used instead. - nodeName is the name of the node for which are the parameters. If this is None, the full hostname of this machine will be sent instead. - timeStamp, if > 0, is given time for the parameters. This is in seconds from Epoch. Note that this option should be used only if you are sure about the time for the result. Otherwize, the parameters will be assigned a correct time(obtained from NTP servers) in MonALISA service. This option can be usefull when parsing logs, for example. - params is a dictionary containing pairs with: - key: parameter name - value: parameter value, either int or float. or params is a vector of tuples(key, value). This version can be used in case you want to send the parameters in a given order. NOTE that python doesn't know about 32-bit floats(only 64-bit floats!) """ return
[ 353, 3516, 386 ]
def METHOD_NAME(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return True
[ 137, 735, 1205 ]
def METHOD_NAME(self): app = METHOD_NAME(testing=True) app.secret_key = "secret_key" app.config["WTF_CSRF_METHODS"] = [] return app
[ 129, 991 ]
def METHOD_NAME(self, config_path): with open(config_path, 'r') as f: cluster_config = json.load(f) workers = cluster_config["workers"].values() for worker in workers: host = worker['host'] port = worker['port'] self.add_worker(host, port)
[ 238, 5930, 280, 2059, 200 ]
def METHOD_NAME(job, jobname, case, ensemble): env_batch = case.get_env("batch") batch_system_type = env_batch.get_batch_system_type() batchsubmit = env_batch.get_value("batch_submit") submit_args = env_batch.get_submit_args(case, job) case_path_string = cylc_get_case_path_string(case, ensemble) return ( """ [[{jobname}<member>]] script = cd {case_path_string} ./case.submit --job {job} [[[job]]] batch system = {batch_system_type} batch submit command template = {batchsubmit} {submit_args} '%(job)s' [[[directives]]]
[ -1, 2277, 202, 671 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(filename, fmt=None, verbose=False): def loader(filename): return load_dataset(filename, format_name=fmt) return _load_file(filename, loader, "parameter", verbose)
[ 557, 126, 171 ]
def METHOD_NAME(model, model_name="main", out_dir="."): """Compile coreml model and return the compiled model path.""" mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel") mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc") metadata = {"inputs": list(model.input_description), "outputs": list(model.output_description)} # Use the description field to send info to CoreML runtime model.short_description = json.dumps(metadata) model.save(mlmodel_path) res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir]) if not os.path.isdir(mlmodelc_path): raise RuntimeError(f"Compile failed: {res}") return mlmodelc_path
[ 296, -1 ]
def METHOD_NAME(self): return self._optimizer.METHOD_NAME()
[ 19, 6941 ]
def METHOD_NAME(builder, blockMap): """This method is deprecated. Please switch to AddBlockMap.""" return AddBlockMap(builder, blockMap)
[ 6582, 386, 238, 573, 422 ]
def METHOD_NAME(self, sections_info_dump): first_line = sections_info_dump.readline() archive_path = (Literal('In archive').suppress() + White().suppress() + # trim the colon and line ending characters from archive_path rest_of_line.set_results_name('archive_path').set_parse_action( lambda s, loc, toks: s.rstrip(':\n\r '))) parser = archive_path try: results = parser.parseString(first_line, parseAll=True) except ParseException as p: raise ParseException('Parsing sections info for library ' + sections_info_dump.name + ' failed. ' + p.msg) archive = os.path.basename(results.archive_path) self.sections[archive] = EntityDB.__info(sections_info_dump.name, sections_info_dump.read())
[ 238, 1446, 100 ]
def METHOD_NAME(tags: Iterable[str]) -> Iterable[str]: """Performance optimization to normalize tags only once.""" if isinstance(tags, NormalizedTags): return tags if isinstance(tags, str): tags = [tags] return NormalizedTags([normalize(t, ignore='_') for t in tags])
[ 1137, 114 ]
def METHOD_NAME(self): if self.tbai_certificate_id: return self.tbai_certificate_id.get_p12() else: return None
[ 9020, 1548, 19, 11432 ]
def METHOD_NAME(test_case, device, shape): x = flow.randn(shape) y = flow.randn_like(x, device=flow.device(device)) test_case.assertTrue(x.shape == y.shape)
[ 9, 8620 ]
def METHOD_NAME() -> str: """响应触发完整消息""" return Depends(_fullmatch, use_cache=False)
[ 10326 ]
def METHOD_NAME(self): """ Test that I{get} and I{put} commands are responded to correctly by L{postfix.PostfixTCPMapServer} when its factory is an instance of L{postifx.PostfixTCPMapDeferringDictServerFactory}. """ factory = postfix.PostfixTCPMapDeferringDictServerFactory(self.data) transport = StringTransport() protocol = postfix.PostfixTCPMapServer() protocol.service = factory protocol.factory = factory protocol.makeConnection(transport) for input, expected_output in self.chat: protocol.lineReceived(input) self.assertEqual( transport.value(), expected_output, "For {!r}, expected {!r} but got {!r}".format( input, expected_output, transport.value() ), ) transport.clear() protocol.setTimeout(None)
[ 9, 2514, 3337 ]
f METHOD_NAME(self):
[ 600, 2305 ]
f METHOD_NAME(self):
[ 102, 3239, -1, 463 ]
async def METHOD_NAME( db, policy, rollbar_connection_config, rollbar_dataset_config, rollbar_identity_email, ) -> None: """Full access request based on the Rollbar SaaS config""" privacy_request = PrivacyRequest( id=f"test_rollbar_access_request_task_{random.randint(0, 1000)}" ) identity = Identity(**{"email": rollbar_identity_email}) privacy_request.cache_identity(identity) dataset_name = rollbar_connection_config.get_saas_config().fides_key merged_graph = rollbar_dataset_config.get_graph() graph = DatasetGraph(merged_graph) v = await graph_task.run_access_request( privacy_request, policy, graph, [rollbar_connection_config], {"email": rollbar_identity_email}, db, ) assert_rows_match( v[f"{dataset_name}:projects"], min_size=1, keys=["id", "account_id", "status", "date_created", "date_modified", "name"], ) assert_rows_match( v[f"{dataset_name}:project_access_tokens"], min_size=1, keys=[ "project_id", "name", "status", "date_created", "date_modified", "scopes", ], ) assert_rows_match( v[f"{dataset_name}:instances"], min_size=1, keys=[ "id", "project_id", "timestamp", "version", "data", "billable", "item_id", ], ) # verify we only returned data for our identity email for instance in v[f"{dataset_name}:instances"]: assert instance["data"]["person"]["email"] == rollbar_identity_email
[ 9, 17037, 1089, 377, 758 ]
def METHOD_NAME(): """Update PATH to include installed Multipass, if not already set.""" assert sys.platform == "win32" key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Environment") paths = os.environ["PATH"].split(";") # Drop empty placeholder for trailing comma, if present. if paths[-1] == "": del paths[-1] reg_user_path, _ = winreg.QueryValueEx(key, "Path") for path in reg_user_path.split(";"): if path not in paths and "Multipass" in path: paths.append(path) # Restore path with trailing comma. os.environ["PATH"] = ";".join(paths) + ";"
[ 3239, 1372, -1, 157, 485 ]
def METHOD_NAME(self): self.tool._selected_hotspot = "top" value = (self.component.position[:], self.component.bounds[:]) deltas_and_results = [ ([10, 10], ([50, 50], [100, 110])), ([-10, 10], ([50, 50], [100, 110])), ([10, -10], ([50, 50], [100, 90])), ([-10, -10], ([50, 50], [100, 90])), ([10, -90], ([50, 50], [100, 20])), ([10, -80], ([50, 50], [100, 20])), ([10, -79], ([50, 50], [100, 21])), ] for (x, y), (position, bounds) in deltas_and_results: self.tool.set_delta(value, x, y) self.assertEqual(self.component.position, position) self.assertEqual(self.component.bounds, bounds)
[ 9, 0, 1364, 1635 ]
def METHOD_NAME(self, inp_fname, options, exp_format, prefix): hashkey = (inp_fname + str(options) + str(self.builder.config.xfig_fig2dev) + str(self.builder.config.xfig_fig2dev_args) ).encode('utf-8') fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), exp_format) print(fname) relfn = posixpath.join(self.builder.imgpath, fname) outfn = path.join(self.builder.outdir, getattr(self.builder, 'imagedir', '_images'), fname) if path.isfile(outfn): return relfn, outfn ensuredir(path.dirname(outfn)) xfig_args = [self.builder.config.xfig_fig2dev] xfig_args = ['fig2dev'] xfig_args.extend(['-L', exp_format]) xfig_args.extend(self.builder.config.xfig_fig2dev_args) xfig_args.extend(options) xfig_args.append(inp_fname) xfig_args.append(outfn) print(xfig_args) try: p = Popen(xfig_args, stdout=PIPE, stdin=PIPE, stderr=PIPE) p.wait() except OSError as err: if err.errno != ENOENT: # No such file or directory raise self.builder.warn('xfig command %r cannot be run (needed for xfig ' 'output), check the xfig_fig2dev setting' % self.builder.config.xfig_fig2dev) return None, None return relfn, outfn
[ -1, 171 ]
def METHOD_NAME(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) parser.add_argument( "--header-path", metavar="HEADER_FILE", help="""
[ 57 ]
def METHOD_NAME(exc_to_raise, exc): ...
[ 447, 1245, 442, 1155 ]
def METHOD_NAME(container_registry, application): with patch("UM.Settings.ContainerRegistry.ContainerRegistry.getInstance", MagicMock(return_value = container_registry)): with patch("cura.CuraApplication.CuraApplication.getInstance", MagicMock(return_value=application)): container_tree = ContainerTree() container_tree.machines._machines["current_global_stack"] = MagicMock() # Mock so that we can track whether the getQualityGroups function gets called with correct parameters. result = container_tree.getCurrentQualityChangesGroups() # As defined in the fixture for application. expected_variant_names = ["current_global_stack_left_variant_name", "current_global_stack_right_variant_name"] expected_material_base_files = ["current_global_stack_left_material_base_file", "current_global_stack_right_material_base_file"] expected_is_enabled = [True, True] container_tree.machines["current_global_stack"].getQualityChangesGroups.assert_called_with(expected_variant_names, expected_material_base_files, expected_is_enabled) assert result == container_tree.machines["current_global_stack"].getQualityChangesGroups.return_valu
[ 9, 19, 1056, 4391, 1103, 861 ]
def METHOD_NAME(self): return "double"
[ 232, 4120, 669 ]
def METHOD_NAME(self): return "GET"
[ 103 ]
def METHOD_NAME(self, sample: list) -> list: filtered_sample = [s for s in sample if isinstance(s[1], numbers.Number)] cfg, _ = max(filtered_sample, key=lambda x: x[1]) return cfg["arch_seq"]
[ 1472, 935 ]
def METHOD_NAME(self, hosting_provider, base_importer): """ Test saving Autonomous System Numbers (ASN) networks to the database """ testing_asn = "AS27407" assert ( GreencheckASN.objects.all().count() == 0 ) # Test: database is empty (for ASN) hosting_provider.save() # Initialize hosting provider in database # Import a single ASN network BaseImporter.save_asn(base_importer, testing_asn) assert ( GreencheckASN.objects.all().count() == 1 ) # Test: ASN is saved after insertion
[ 9, 73, 2616 ]
def METHOD_NAME(numnodes, printnodes): os.system('killall lzilliqa') if os.path.exists(LOCAL_RUN_FOLDER) != True: # shutil.rmtree(LOCAL_RUN_FOLDER) os.makedirs(LOCAL_RUN_FOLDER) for x in range(0, numnodes): testsubdir = LOCAL_RUN_FOLDER + 'node_' + str(x+1).zfill(4) if os.path.exists(testsubdir) != True : os.makedirs(testsubdir) shutil.copyfile('./tests/Zilliqa/zilliqa', testsubdir + '/lzilliqa') st = os.stat(testsubdir + '/lzilliqa') os.chmod(testsubdir + '/lzilliqa', st.st_mode | stat.S_IEXEC) if printnodes: testfolders_list = get_immediate_subdirectories(LOCAL_RUN_FOLDER) count = len(testfolders_list) for x in range(0, count): print ('[Node ' + str(x + 1).ljust(3) + '] [Port ' + str(NODE_LISTEN_PORT + x) + '] ' + LOCAL_RUN_FOLDER + testfolders_list[x]) keypairs = [] # Generate keypairs (sort by public key) for x in range(0, count): process = Popen(["./tests/Zilliqa/genkeypair"], stdout=PIPE, universal_newlines=True) (output, err) = process.communicate() exit_code = process.wait() keypairs.append(output.strip()) keypairs.sort() patch_lookup_pubkey(LOCAL_FOLDER + "/constants_local.xml", keypairs, count) patch_seed_pubkey(LOCAL_FOLDER + "/constants_local.xml", keypairs, count) nodes = ET.Element("nodes") # Store sorted keys list in text file keys_file = open(LOCAL_RUN_FOLDER + 'keys.txt', "w") for x in range(0, count): keys_file.write(keypairs[x] + '\n') keypair = keypairs[x].split(" ") if (x < count): peer = ET.SubElement(nodes, "peer") ET.SubElement(peer, "pubk").text = keypair[0] ET.SubElement(peer, "ip").text = '127.0.0.1' ET.SubElement(peer, "port").text = str(NODE_LISTEN_PORT + x) keys_file.close()
[ 22, 102 ]
def METHOD_NAME(): # import common.test_utils as t_utils; t_utils.save_to_test(request.json,"tests/lets_talk_in.json",indent=4) # TEST # responses = handler(request.json, RANDOM_SEED) # TEST # import common.test_utils as t_utils; t_utils.save_to_test(responses,"tests/lets_talk_out.json",indent=4) # TEST responses = handler(request.json) return jsonify(responses)
[ 5792 ]
def METHOD_NAME(): def resolve(records): classical_data = cirq.ClassicalDataDictionaryStore(_records=records) return init_sympy_condition.resolve(classical_data) assert resolve({'0:a': [[1]]}) assert resolve({'0:a': [[2]]}) assert resolve({'0:a': [[0, 1]]}) assert resolve({'0:a': [[1, 0]]}) assert not resolve({'0:a': [[0]]}) assert not resolve({'0:a': [[0, 0]]}) assert not resolve({'0:a': [[]]}) assert not resolve({'0:a': [[0]], 'b': [[1]]}) with pytest.raises( ValueError, match=re.escape("Measurement keys ['0:a'] missing when testing classical control"), ): _ = resolve({}) with pytest.raises( ValueError, match=re.escape("Measurement keys ['0:a'] missing when testing classical control"), ): _ = resolve({'0:b': [[1]]})
[ 9, 4202, 405, 1014 ]
def METHOD_NAME(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions)
[ 232, 2758 ]
def METHOD_NAME(self): self.gobgp.local('gobgp neighbor all softresetin') time.sleep(1)
[ 9, 4468, 238, 623, 54, 24, 649 ]
def METHOD_NAME(self) -> "Resource": try: if not os.environ.get( "ECS_CONTAINER_METADATA_URI" ) and not os.environ.get("ECS_CONTAINER_METADATA_URI_V4"): raise RuntimeError( "Missing ECS_CONTAINER_METADATA_URI therefore process is not on ECS." ) container_id = "" try: with open( "/proc/self/cgroup", encoding="utf8" ) as container_info_file: for raw_line in container_info_file.readlines(): line = raw_line.strip() # Subsequent IDs should be the same, exit if found one if len(line) > _CONTAINER_ID_LENGTH: container_id = line[-_CONTAINER_ID_LENGTH:] break except FileNotFoundError as exception: logger.warning( "Failed to get container ID on ECS: %s.", exception ) base_resource = Resource( { ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value, ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_ECS.value, ResourceAttributes.CONTAINER_NAME: socket.gethostname(), ResourceAttributes.CONTAINER_ID: container_id, } ) metadata_v4_endpoint = os.environ.get( "ECS_CONTAINER_METADATA_URI_V4" ) if not metadata_v4_endpoint: return base_resource # Returns https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v4.html#task-metadata-endpoint-v4-response metadata_container = json.loads(_http_get(metadata_v4_endpoint)) metadata_task = json.loads( _http_get(f"{metadata_v4_endpoint}/task") ) task_arn = metadata_task["TaskARN"] base_arn = task_arn[0 : task_arn.rindex(":")] # noqa cluster: str = metadata_task["Cluster"] cluster_arn = ( cluster if cluster.startswith("arn:") else f"{base_arn}:cluster/{cluster}" ) logs_resource = _get_logs_resource(metadata_container) return base_resource.merge(logs_resource).merge( Resource( { ResourceAttributes.AWS_ECS_CONTAINER_ARN: metadata_container[ "ContainerARN" ], ResourceAttributes.AWS_ECS_CLUSTER_ARN: cluster_arn, ResourceAttributes.AWS_ECS_LAUNCHTYPE: metadata_task[ "LaunchType" ].lower(), ResourceAttributes.AWS_ECS_TASK_ARN: task_arn, ResourceAttributes.AWS_ECS_TASK_FAMILY: metadata_task[ "Family" ], ResourceAttributes.AWS_ECS_TASK_REVISION: metadata_task[ "Revision" ], } ) ) # pylint: disable=broad-except except Exception as exception: if self.raise_on_error: raise exception logger.warning("%s failed: %s", self.__class__.__name__, exception) return Resource.get_empty()
[ 2991 ]
def METHOD_NAME(featureName): if ( "Number_of" in featureName or "Center_of" in featureName or "Bounding_Box" in featureName or featureName == "Size_in_pixels" ): formats.append("%d") else: formats.append("%f")
[ 1459, 275 ]
def METHOD_NAME(self, lvname): for vg in self.vg.values(): for lv in vg.lv.values(): if lv.name == lvname: return vg, lv return None, None
[ 416, 10340 ]
def METHOD_NAME(self, name: str) -> List[WebElement]: return self.selenium.find_elements(By.TAG_NAME, name)
[ 416, 75, 604, 82 ]
def METHOD_NAME(self): self.mocks = [mock_ec2()] for mock in self.mocks: mock.start() ec2_client = boto3.client("ec2", region_name="us-east-1") ec2 = boto3.resource("ec2", region_name="us-east-1") vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") self.vpc_id = vpc.id self.ec2 = ec2 self.ec2_client = ec2_client
[ 102, 103 ]
def METHOD_NAME(): # BINARY_SUSCR[_LIST_INT] immediately follows the call to C.__del__ [0][C(0).x]
[ 4197 ]
def METHOD_NAME(query, kwargs, candidate_column, cycle_column=None, year_column=None): if not kwargs.get('office'): return query utils.check_election_arguments(kwargs) cycle = get_cycle(kwargs) query = query.join( models.CandidateHistory, candidate_column == models.CandidateHistory.candidate_id, ).filter( models.CandidateHistory.two_year_period == cycle, models.CandidateHistory.office == kwargs['office'][0].upper(), ) if kwargs.get('state'): query = query.filter(models.CandidateHistory.state == kwargs['state']) if kwargs.get('district'): query = query.filter(models.CandidateHistory.district == kwargs['district']) return query
[ 527, 6315 ]
def METHOD_NAME(params): return True
[ 4964, 437 ]
def METHOD_NAME(msg): assert msg.kind == msg.KIND_SERVICE msg_underscored_name = msg.full_name.replace('.','_')+'_req' msg_resp_underscored_name = msg.full_name.replace('.','_')+'_res' return { 'msg_underscored_name': msg_underscored_name, 'msg_header_file_name': msg_header_name_request(msg), 'msg_c_type': underscored_name_to_ctype(msg_underscored_name), 'msg_union': msg.request_union, 'msg_fields': msg.request_fields, 'msg_constants': msg.request_constants, 'msg_max_bitlen': msg.get_max_bitlen_request(), 'msg_dt_sig': msg.get_data_type_signature(), 'msg_default_dtid': msg.default_dtid, 'msg_kind': 'request', 'msg_resp_underscored_name': msg_resp_underscored_name, 'msg_resp_header_file_name': msg_header_name_response(msg) }
[ 19, 17624, 485, 377 ]
def METHOD_NAME(self): matches = self.parser.find_dir("TestVariablePort") assert len(matches) == 1 assert self.parser.get_arg(matches[0]) == "1234"
[ 9, 756, 1210, 4078 ]
def METHOD_NAME(env, **kw) -> None: """ Generate `msgfmt` tool """ if sys.platform == 'win32': msgfmt = SCons.Tool.find_program_path( env, 'msgfmt', default_paths=MINGW_DEFAULT_PATHS + CYGWIN_DEFAULT_PATHS ) if msgfmt: msgfmt_bin_dir = os.path.dirname(msgfmt) env.AppendENVPath('PATH', msgfmt_bin_dir) else: SCons.Warnings.warn( # MsgfmtToolWarning, # using this breaks test, so keep: SCons.Warnings.SConsWarning, 'msgfmt tool requested, but binary not found in ENV PATH', ) try: env['MSGFMT'] = _detect_msgfmt(env) except StopError: env['MSGFMT'] = 'msgfmt' env.SetDefault( MSGFMTFLAGS=[SCons.Util.CLVar('-c')], MSGFMTCOM='$MSGFMT $MSGFMTFLAGS -o $TARGET $SOURCE', MSGFMTCOMSTR='', MOSUFFIX=['.mo'], POSUFFIX=['.po'], ) env.Append(BUILDERS={'MOFiles': _create_mo_file_builder(env)})
[ 567 ]
def METHOD_NAME(self): filename = TESTFN_NONASCII self.addCleanup(unlink, filename) with gdbm.open(filename, 'c') as db: db[b'key'] = b'value' self.assertTrue(os.path.exists(filename)) with gdbm.open(filename, 'r') as db: self.assertEqual(list(db.keys()), [b'key']) self.assertTrue(b'key' in db) self.assertEqual(db[b'key'], b'value')
[ 9, 2503, 1147 ]
def METHOD_NAME( order: int, coord_dtype: np.dtype, lazy: bool, with_bounds: bool ) -> None: """Test extent calculation of vector dimension coordinates.""" metadata = create_metadata( dim_coord=True, scalar=False, order=order, coord_dtype=coord_dtype, lazy=lazy, with_bounds=with_bounds, ) dim_metadata = [_CoordMetaData(metadata.coord, metadata.dims)] cube_signature = MockCubeSignature( dim_coords=[metadata.coord], dim_metadata=dim_metadata ) coord_signature = _CoordSignature(cube_signature) assert len(coord_signature.dim_extents) == 1 (actual,) = coord_signature.dim_extents first, last = coord_dtype(0), coord_dtype((N_POINTS - 1) * SCALE_FACTOR) if order == _CONSTANT: emsg = f"Expected 'order' of '{_DECREASING}' or '{_INCREASING}', got '{order}'." raise ValueError(emsg) points_extent = _Extent(min=first, max=last) bounds_extent = None if with_bounds: offset = SCALE_FACTOR // 2 if order == _INCREASING: bounds_extent = ( _Extent(min=first - offset, max=last - offset), _Extent(min=first + offset, max=last + offset), ) else: bounds_extent = ( _Extent(min=first + offset, max=last + offset), _Extent(min=first - offset, max=last - offset), ) expected = _CoordExtent(points=points_extent, bounds=bounds_extent) assert actual == expected
[ 9, 3014 ]