text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME( matched_rows: ResultProxy, new_scopes: List[str] ): """Helper method to tack on new scopes to the fidesuserpermissions.scopes if user has existing scope.""" for row in matched_rows: scopes: List[str] = row["scopes"] or [] scopes.extend(new_scopes) add_new_scopes_query: TextClause = text( "UPDATE fidesuserpermissions SET scopes= :scopes WHERE id= :id" ) bind.execute( add_new_scopes_query, {"scopes": sorted(list(set(scopes))), "id": row["id"]}, )
[ 238, 80, 3040, 24, 8229, 21, 804 ]
def METHOD_NAME(self): self.patch_object(g, '_options', new={}) g.set_option("this.interesting.key", 1) self.assertEqual(g._options['this']['interesting']['key'], 1) g.set_option("this.other.thing", 2) self.assertEqual(g._options['this']['interesting']['key'], 1) self.assertEqual(g._options['this']['other']['thing'], 2) g.set_option("this.list.1", 3) self.assertEqual(g._options['this']['list'][1], 3) self.assertEqual(g._options['this']['list'][0], None) g.set_option("this.list.1", "goodbye") self.assertEqual(g._options['this']['list'][1], "goodbye") # re-write an option type g.set_option("this.list.is", "on", override=True) self.assertEqual(g._options['this']['list']['is'], "on")
[ 9, 0, 1335 ]
def METHOD_NAME(self, basepath): '''Determine the target base name of this comic file and make sure the directory exists.''' comicdir = self.scraper.get_download_dir(basepath) if not os.path.isdir(comicdir): os.makedirs(comicdir) return os.path.join(comicdir, self.filename)
[ -1 ]
def METHOD_NAME(pivot, particles, deltaRMax, deltaRMin=1e-5): '''Returns the list of particles that are less than deltaRMax away from pivot.''' dR2Max = deltaRMax ** 2 dR2Min = deltaRMin ** 2 if deltaRMin > 0 else -1 results = [] for ptc in particles: dR2 = deltaR2(pivot.eta(), pivot.phi(), ptc.eta(), ptc.phi()) if dR2Min < dR2 and dR2 < dR2Max: results.append(ptc) return results
[ 623, 5568, 1098 ]
async def METHOD_NAME( api_client: TestClient, coresys: CoreSys, boards_service: BoardsService ): """Test supervised board info.""" await mock_dbus_services({"agent_boards_supervised": None}, coresys.dbus.bus) boards_service.board = "Supervised" await coresys.dbus.agent.board.update() with patch("supervisor.os.manager.CPE.get_product", return_value=["not-hassos"]): await coresys.os.load() assert (await api_client.get("/os/boards/supervised")).status == 200 assert (await api_client.post("/os/boards/supervised", json={})).status == 405 assert (await api_client.get("/os/boards/yellow")).status == 400 assert (await api_client.get("/os/boards/not-real")).status == 400
[ 9, 58, 3261, 3872, 100 ]
def METHOD_NAME() -> None: res = list( q.find(plot.references(), {'size': {q.EQ: 5}}), ) assert len(res) == 1 res = list( q.find(plot.references(), {'size': {q.NEQ: 5}}), ) assert len(res) == 0 res = list( q.find(plot.references(), {'size': {q.GEQ: 5}}), ) assert len(res) == 1 res = list( q.find(plot.references(), {'size': {q.LEQ: 5}}), ) assert len(res) == 1 res = list( q.find(plot.references(), {'size': {q.GT: 5}}), ) assert len(res) == 0 res = list( q.find(plot.references(), {'size': {q.LT: 5}}), ) assert len(res) == 0
[ 9, 829 ]
def METHOD_NAME(cls, username): """Return the oldest User record for the provided username.""" return cls.query.filter_by(username=username).order_by(User.creation_date.desc()).first()
[ 416, 604, 2072 ]
def METHOD_NAME(self, mock_send_job): mock_send_job.side_effect = fake_send_job job = create_survey_job() job_requeuing.requeue_survey_job(job) self.assertEqual(len(mock_send_job.mock_calls), 1) jobs = SurveyJob.objects.order_by("id") original_job = jobs[0] self.assertTrue(original_job.retried) self.assertEqual(original_job.num_retries, 0) self.assertFalse(original_job.success) retried_job = jobs[1] self.assertEqual(retried_job.num_retries, 1)
[ 9, 16445, 1281, 202 ]
def METHOD_NAME(self): event = self.receive_message() event_dict = event.to_dict(hierarchical=False) for field in self.flatten_fields: if field in event_dict: val = event_dict[field] # if it's a string try to parse it as JSON if isinstance(val, str): try: val = loads(val) except ValueError: pass if isinstance(val, Mapping): for key, value in val.items(): event_dict[field + '_' + key] = value event_dict.pop(field) # For ES 2.x, replace dots with a specified replacement character if self.replacement_char and self.replacement_char != '.': event_dict = replace_keys(event_dict, replacement=self.replacement_char) self.es.index(index=self.get_index(event_dict, default_date=datetime.today().date()), body=event_dict) self.acknowledge_message()
[ 356 ]
f METHOD_NAME( self, file_system, mount_point, storage_writer, plugin):
[ 22, 9653, 2793, 69, 3239, 510, 99 ]
def METHOD_NAME(type_): if type_ == "TCGv": return "target_ulong" else: return type_
[ 6305, 988, 1087, 2483 ]
def METHOD_NAME(self, width, label, add_classes, alist): """Create the tag.""" # Create list of all classes and remove duplicates classes = list( set( ["progress"] + self.config.get('add_classes', '').split() + add_classes ) ) classes.sort() el = etree.Element("div") el.set('class', ' '.join(classes)) bar = etree.SubElement(el, 'div') bar.set('class', "progress-bar") bar.set('style', 'width:%s%%' % width) p = etree.SubElement(bar, 'p') p.set('class', 'progress-label') p.text = label if alist is not None: el.tail = alist if 'attr_list' in self.md.treeprocessors: ProgressBarTreeProcessor(self.md).run(el) return el
[ 129, 82 ]
f METHOD_NAME(self, n):
[ 19, 199 ]
def METHOD_NAME(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.list.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore else: # make call to next link with the client's api-version _parsed_next_link = urllib.parse.urlparse(next_link) _next_request_params = case_insensitive_dict( { key: [urllib.parse.quote(v) for v in value] for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() } ) _next_request_params["api-version"] = self._config.api_version request = HttpRequest( "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore request.method = "GET" return request
[ 123, 377 ]
def METHOD_NAME(self): return []
[ 141, 275 ]
def METHOD_NAME(self): return ["ATM", "LND", "ROF", "ICE", "CPL", "OCN"]
[ 19, 984, 811 ]
f METHOD_NAME(self):
[ 9, 1119 ]
def METHOD_NAME(self, release_notes_comments): for comment in release_notes_comments: if not ( re.match(self.MULTI_LINE_REAL_COMMENT_REGEX, comment) or re.match(self.COMMENT_FILLER_REGEX, comment) ): return False return True
[ 137, 1205, 457, 534, 1591 ]
def METHOD_NAME(field, value): return get_default_field_value(field, value)
[ 89, 1987, 1445 ]
f METHOD_NAME(self):
[ 9, 503, 513 ]
def METHOD_NAME(da): sos = butter(da.coords['xx'], N=4, Wn=20 / da.coords['xx'].unit) out2d = sosfiltfilt(da, 'xx', sos=sos) assert sc.identical(out2d['yy', 0], sosfiltfilt(da['yy', 0], 'xx', sos=sos)) assert sc.identical(out2d['yy', 1], sosfiltfilt(da['yy', 1], 'xx', sos=sos))
[ 9, 5614, 55, 1741, 24, 55, 47 ]
def METHOD_NAME(self, product: str) -> Optional[str]: version = self.latest_version(product) assert version is not None, (product, str(self._product_versions)) return product_version_to_resource(product, version)
[ 191, 156 ]
def METHOD_NAME(searcher, prediction_fn, output_fn, qid2text_time, qid2reldocids, K=1000): f = open(prediction_fn, "w") outa = open(os.path.join(output_fn, "a.toks"), "w") # , encoding="utf-8" outb = open(os.path.join(output_fn, "b.toks"), "w") # , encoding="utf-8" outid = open(os.path.join(output_fn, "id.txt"), "w") # , encoding="utf-8" outsim = open(os.path.join(output_fn, "sim.txt"), "w") # , encoding="utf-8" outurl = open(os.path.join(output_fn, "url.txt"), "w", encoding="utf-8") # , encoding="utf-8" for qid in qid2text_time: a, t = qid2text_time[qid] hits = searcher.search(JString(a), K, t) for i in range(len(hits)): sim = hits[i].score docno = hits[i].docid label = 1 if qid in qid2reldocids and docno in qid2reldocids[qid] else 0 b, url = parse_doc_from_index_tweet(hits[i].content) b = "".join(filter(lambda x: x in printable, b)) f.write("{} Q0 {} {} {:.6f} Anserini\n".format(qid, docno, i+1, sim)) outa.write("{}\n".format(a)) outb.write("{}\n".format(b)) outid.write("{} Q0 {} {} {:.6f} Anserini\n".format(qid, docno, i+1, sim)) outsim.write("{}\n".format(label)) outurl.write("{}\n".format(url)) outa.close() outb.close() outid.close() outsim.close() outurl.close() f.close()
[ 1070, 13901 ]
def METHOD_NAME(regexp): try: re.compile(regexp) return regexp except re.error as e: raise ArgumentTypeError(e)
[ 871, 44 ]
def METHOD_NAME(message): """Show error.""" click.secho(str(message), fg="red")
[ 168 ]
async def METHOD_NAME( self, response: disnake.InteractionResponse, adapter, parent_type, with_message, expected ) -> None: response._parent.type = parent_type await response.defer(with_message=with_message) adapter.create_interaction_response.assert_awaited_once_with( response._parent.id, response._parent.token, session=response._parent._session, **expected, )
[ 9, 1185 ]
def METHOD_NAME(input: dace.int32[12, 10], output: dace.int32[12, 10]): with dace.tasklet: m << input[0:2, 4] n >> output[0:4, 5] n = m
[ 457, 11971 ]
def METHOD_NAME(self): image_shape = (4, 8, 8, 3) image = tf.cast( tf.random.uniform(shape=image_shape) * 255.0, dtype=tf.uint8 ) layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255)) output = layer(image) self.assertAllClose(image, output, atol=1e-5, rtol=1e-5) layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255)) output = layer(image) self.assertNotAllClose(image, output)
[ 9, 41, 5493 ]
def METHOD_NAME(self, indices): subA = [] subB = [] for i in indices: subA.append(self.a[i]) subB.append(self.b[i]) py_dist = UniformNdPy(subA, subB) return ot.Distribution(py_dist)
[ 19, 11123 ]
def METHOD_NAME(): mrts_before = timeseries.MultiRegionDataset.from_csv( io.StringIO( "location_id,date,cases\n" "iso1:us#fips:97111,2020-01-01,100\n" "iso1:us#fips:97111,2020-01-02,50\n" "iso1:us#fips:97111,2020-01-03,75\n" "iso1:us#fips:97111,2020-01-04,74\n" "iso1:us#fips:97111,,75\n" ) ) mrts_expected = timeseries.MultiRegionDataset.from_csv( io.StringIO( "location_id,date,cases,new_cases\n" "iso1:us#fips:97111,2020-01-01,100,\n" "iso1:us#fips:97111,2020-01-02,50,\n" "iso1:us#fips:97111,2020-01-03,75,25\n" "iso1:us#fips:97111,2020-01-04,74,0\n" "iso1:us#fips:97111,,75,0.0\n" ) ) timeseries_after = new_cases_and_deaths.add_new_cases(mrts_before) test_helpers.assert_dataset_like(mrts_expected, timeseries_after)
[ 9, 80, 2041, 188, 2927 ]
def METHOD_NAME(self, queryset, name, affiliation): if affiliation == "": return queryset else: q = Q(affiliation__icontains=affiliation) | Q( person__affiliation__icontains=affiliation ) return queryset.filter(q).distinct()
[ 527, 6172 ]
def METHOD_NAME(self) -> str: """ Provisioning state of the resource. """ return pulumi.get(self, "provisioning_state")
[ 1994, 551 ]
def METHOD_NAME(request: Request, tid): try: slogger.task[tid].info("push repository request") rq_id = "git.push.{}".format(tid) queue = django_rq.get_queue(settings.CVAT_QUEUES.EXPORT_DATA.value) queue.enqueue_call(func = CVATGit.push, args = (tid, request.user, request.scheme, request.get_host()), job_id = rq_id) return Response({ "rq_id": rq_id }) except Exception as ex: with contextlib.suppress(Exception): slogger.task[tid].error("error occurred during pushing repository request", exc_info=True) return HttpResponseBadRequest(str(ex))
[ 1013, 1230 ]
def METHOD_NAME(self, args): return True
[ 74, 1484 ]
def METHOD_NAME(photosdb): albums = photosdb.album_info for album in albums: folder_names = album.folder_names assert folder_names == ALBUM_FOLDER_NAMES_DICT[album.title]
[ 9, 11365, 451, 83 ]
def METHOD_NAME(self): iam_client = client("iam") # update password policy iam_client.update_account_password_policy(RequireNumbers=False) from prowler.providers.aws.services.iam.iam_service import IAM current_audit_info = self.set_mocked_audit_info() with mock.patch( "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info", new=current_audit_info, ), mock.patch( "prowler.providers.aws.services.iam.iam_password_policy_number.iam_password_policy_number.iam_client", new=IAM(current_audit_info), ): # Test Check from prowler.providers.aws.services.iam.iam_password_policy_number.iam_password_policy_number import ( iam_password_policy_number, ) check = iam_password_policy_number() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( "IAM password policy does not require at least one number.", result[0].status_extended, ) assert result[0].resource_id == AWS_ACCOUNT_NUMBER assert result[0].resource_arn == AWS_ACCOUNT_ARN assert result[0].region == AWS_REGION
[ 9, 1694, 2897, 54, 654, 106, 584 ]
def METHOD_NAME(self): return { "outside": { "dynamic": [{ "dynamic_block_name": { "content": { "dynamic": [{ "dynamic_block_1": { "content": { "key": 1 } }, "dynamic_block_2": { "content": { "key": "2" } } }] } } }] } }
[ 107, 2111, 573, 365 ]
def METHOD_NAME(self, space): explore = ResampleExplore(probability=0.5) rng = RNGStub() rng.randint = lambda low, high, size: [1] rng.random = lambda: 0.5 params = {"x": 1.0, "y": 2, "z": 0, "f": 10} assert explore(rng, space, params) is params rng.random = lambda: 0.4 assert explore(rng, space, params) is not params
[ 9, 4182, 4583 ]
def METHOD_NAME(self): # delete a file first seafile_api.del_file(self.repo_id, '/', json.dumps([self.file_name]), self.user_name) self.login_as(self.user) resp = self.client.get(self.url) self.assertEqual(200, resp.status_code) json_resp = json.loads(resp.content) assert json_resp['data'][0]['obj_name'] == self.file_name assert json_resp['data'][0]['is_dir'] == False
[ 9, 1046, 19 ]
def METHOD_NAME() -> List[Window]: return _GetWindows( filterUsingWindow=lambda window: windll.user32.IsWindowVisible(window.hwndVal) and bool(window.title) )
[ 19, 2999, 3239 ]
def METHOD_NAME(self, signal_symbol) -> SignalConnector: """Get or create signal connector.""" for sc in self.signal_connectors: if sc.symbol == signal_symbol: signal_connector = sc break else: signal_connector = SignalConnector(signal_symbol) if self.initialized: self._init_sc(signal_connector) self.signal_connectors.append(signal_connector) return signal_connector
[ 19, 894, 129, 3555 ]
def METHOD_NAME(start, end, size, orphan, sequence): if size < 1: if start > 0 and end > 0 and end >= start: size = end + 1 - start else: size = 7 if start > 0: try: sequence[start - 1] except IndexError: start = len(sequence) if end > 0: if end < start: end = start else: end = start + size - 1 try: sequence[end + orphan] except IndexError: end = len(sequence) elif end > 0: try: sequence[end - 1] except IndexError: end = len(sequence) start = end + 1 - size if start - 1 < orphan: start = 1 else: start = 1 end = start + size - 1 try: sequence[end + orphan - 1] except IndexError: end = len(sequence) return start, end, size
[ 1671 ]
def METHOD_NAME(self): """Checks that the output is created""" self.assertModule( "r.learn.train", group=self.group, training_points=self.training_points, field="value", model_name="RandomForestRegressor", n_estimators=100, save_model=self.model_file, ) self.assertFileExists(filename=self.model_file) self.assertModule( "r.learn.predict", group=self.group, load_model=self.model_file, output=self.output, ) self.assertRasterExists(self.output, msg="Output was not created")
[ 9, 146, 152, 1399 ]
def METHOD_NAME(circuit: qiskit.QuantumCircuit) -> None: """Displays library information for a quantum circuit. Args: circuit: Input quantum circuit. """ shell = get_ipython() circ = shell.ev(circuit) circuit_library_widget(circ)
[ 1708, 3106, 100 ]
def METHOD_NAME(db, request): """ Require a CherryPy server that listens on a port. Provides a started CherryPy server with a bound port and a request method for performing local requests against it. Note: this fixture requires the db fixture. The returned value has an `boundPort` property identifying where the server can be reached. The server can then be accessed via http via an address like `'http://127.0.0.1:%d/api/v1/...' % boundServer.boundPort`. """ registry = PluginRegistry() with registry(): plugins = _getPluginsFromMarker(request, registry) with serverContext(plugins, bindPort=True) as server: yield server
[ 4432, 163 ]
def METHOD_NAME(x, a=True): return False
[ 4197 ]
def METHOD_NAME( self, prune_ratio, random_prune=False, ): model = self.model.cpu() total_weight = 0 layer_to_rank = {} for name, module in model.named_modules(): if ( isinstance(module, nn.Conv2d) ): total_weight += module.weight.numel() layer_to_rank[name] = module.weight.data.clone().numpy() layer_to_rank[name].fill(0) accumulate_coverage, log_names = self.load_nc_info() all_weight_coverage, adv_weight_coverage = [], [] for layer_name, (input_coverage, output_coverage) in accumulate_coverage.items(): input_dim, output_dim = len(input_coverage), len(output_coverage) for input_idx in range(input_dim): for output_idx in range(output_dim): coverage_score = input_coverage[input_idx] + output_coverage[output_idx] all_weight_coverage.append((coverage_score, (layer_name, input_idx, output_idx))) # prune_ratio = 0.05 sorted_coverage = sorted(all_weight_coverage, key=lambda item: item[0]) accumulate_index = 0 for (coverage_score, pos) in sorted_coverage: layer_name, input_idx, output_idx = pos layer_to_rank[layer_name][output_idx, input_idx] = accumulate_index h, w = layer_to_rank[layer_name].shape[2:] accumulate_index += h*w start = time.time() layer_idx = 0 weight_list = [] for name, module in model.named_modules(): if ( isinstance(module, nn.Conv2d) ): weight_copy = module.weight.data.abs().clone().numpy() output_dim, input_dim, h, w = weight_copy.shape for output_idx in range(output_dim): for input_idx in range(input_dim): for h_idx in range(h): for w_idx in range(w): weight_score = weight_copy[output_idx, input_idx, h_idx, w_idx] weight_list.append( (weight_score, (layer_idx, input_idx, output_idx, h_idx, w_idx)) ) layer_idx += 1 sorted_weight = sorted(weight_list, key=lambda item: item[0]) end = time.time() weight_sort_time = end - start log = f"Sort weight time {weight_sort_time}" self.prune_record(log) for weight_rank, (weight_score, pos) in enumerate(sorted_weight): layer_idx, input_idx, output_idx, h_idx, w_idx = pos layer_name = log_names[layer_idx] layer_to_rank[layer_name][output_idx, input_idx, h_idx, w_idx] -= weight_rank start = time.time() nc_weight_ranks = [] for layer_name in log_names: nc_weight_ranks.append( layer_to_rank[layer_name].flatten() ) nc_weight_ranks = np.concatenate(nc_weight_ranks) nc_weight_ranks = np.sort(nc_weight_ranks) end = time.time() weight_sort_time = end - start log = f"Sort nc weight rank time {weight_sort_time}" self.prune_record(log) total = len(nc_weight_ranks) thre_index = int(total * prune_ratio) if thre_index == total: thre_index -= 1 thre = nc_weight_ranks[thre_index] log = f"Pruning threshold: {thre:.4f}" self.prune_record(log) pruned = 0 for name, module in model.named_modules(): if ( isinstance(module, nn.Conv2d) ): mask = layer_to_rank[name] mask = torch.Tensor(mask > thre) pruned = pruned + mask.numel() - torch.sum(mask) # np.random.shuffle(mask) module.weight.data.mul_(mask) remain_ratio = int(torch.sum(mask)) / mask.numel() log = (f"layer {name} \t total params: {mask.numel()} \t " f"remaining params: {int(torch.sum(mask))}({remain_ratio:.2f})") self.prune_record(log) log = (f"Total conv params: {total_weight}, Pruned conv params: {pruned}, " f"Pruned ratio: {pruned/total_weight:.2f}") self.prune_record(log) self.model = model.cuda() self.check_param_num()
[ 1336, 3724 ]
def METHOD_NAME(test_set, exp_diff, exp_case): """ Test CaseInsensitiveFrozenSet for correct symmetric difference """ from masci_tools.util.case_insensitive_dict import CaseInsensitiveFrozenSet s = CaseInsensitiveFrozenSet(TEST_INIT) actual = s.symmetric_difference(test_set) print(actual.original_case) assert isinstance(actual, CaseInsensitiveFrozenSet) assert actual == exp_diff assert s ^ test_set == actual assert actual.original_case == exp_case
[ 9, 331, 4419, 4257, 6794, 614 ]
def METHOD_NAME(self): return
[ 8106 ]
def METHOD_NAME( self ): return self._GetAttribute( 'Upnp.DeviceXml' )
[ 19, 398, 399 ]
def METHOD_NAME(self): client = Client() exhibitor = Exhibitor.objects.get(pk=1) adminUser = User.objects.create( username="admin", email="[email protected]", is_superuser=True ) adminUser.set_password("hej") adminUser.save() login = client.login(username="admin", password="hej") self.assertEqual(login, True) # Should NOT work to send email for user without staffstatus response = client.get("/fairs/2017/exhibitors/1/") self.assertEqual(response.status_code, 200) response = client.get("/fairs/2017/exhibitors/1/send_emails/") self.assertEqual(response.status_code, 403) response = client.get("/fairs/2017/exhibitors/1/send_cr_receipts") self.assertEqual(response.status_code, 403) # Should work to send email for user with staff-status adminUser.is_staff = True adminUser.save() response = client.get("/fairs/2017/exhibitors/1/send_emails/") self.assertEqual(response.status_code, 200) response = client.get("/fairs/2017/exhibitors/1/send_cr_receipts") self.assertEqual(response.status_code, 200) response = client.get("/fairs/2017/exhibitors/1/emails_confirmation/") self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 1) # Correct message if no orders self.assertEqual( mail.outbox[0].body, get_template("exhibitors/cr_receipt.html").render( ( { "orders_info": [], "total_price": 0, "exhibitor_name": exhibitor.company.name, } ) ), ) # Correct message if adding some orders product1 = Product.objects.get(name="product1") product2 = Product.objects.get(name="product2") Order.objects.create(exhibitor=exhibitor, product=product1, amount=1) Order.objects.create(exhibitor=exhibitor, product=product2, amount=2) response = client.get("/fairs/2017/exhibitors/1/send_emails/") self.assertEqual(response.status_code, 200) response = client.get("/fairs/2017/exhibitors/1/send_cr_receipts") self.assertEqual(response.status_code, 200) response = client.get("/fairs/2017/exhibitors/1/emails_confirmation/") self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 2) self.assertEqual( mail.outbox[1].body, get_template("exhibitors/cr_receipt.html").render( ( { "orders_info": [ {"product": "product1", "price": 100, "amount": 1}, {"product": "product2", "price": 400, "amount": 2}, ], "total_price": 500, "exhibitor_name": exhibitor.company.name, } ) ), )
[ 9, 487, 559 ]
def METHOD_NAME(frames_to_skip: int = 0, frames_to_log: int = 32) -> infra.Stack: """Returns the current C++ call stack. This function utilizes `torch.utils.cpp_backtrace` to get the current C++ call stack. The returned C++ call stack is a concatenated string of the C++ call stack frames. Each frame is separated by a newline character, in the same format of r"frame #[0-9]+: (?P<frame_info>.*)". More info at `c10/util/Backtrace.cpp`. """ # NOTE: Cannot use `@_beartype.beartype`. It somehow erases the cpp stack frame info. frames = cpp_backtrace.get_cpp_backtrace(frames_to_skip, frames_to_log).split("\n") frame_messages = [] for frame in frames: segments = frame.split(":", 1) if len(segments) == 2: frame_messages.append(segments[1].strip()) else: frame_messages.append("<unknown frame>") return infra.Stack( frames=[ infra.StackFrame(location=infra.Location(message=message)) for message in frame_messages ] )
[ 7728, 128, 1501 ]
async def METHOD_NAME(redis_client): with override_config("redis", dict(service_name="myredis")): val = await redis_client.get("cheese") assert val is None await redis_client.set("cheese", "my-cheese") val = await redis_client.get("cheese") if isinstance(val, bytes): val = val.decode() assert val == "my-cheese"
[ 9, 345, 549, 156 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self): with self.assertRaises(ValueError): GroupMul([["a", 10]], self._public_testcases)
[ 9, 511, 532, 909, 232, 747, 44 ]
def METHOD_NAME(beat): energy = [] for sample in beat: energy.append(sample * sample) return energy.index(max(energy))
[ 232, 5121, 724 ]
def METHOD_NAME(lst): count_dict = defaultdict(int) for element in lst: count_dict[element] += 1 return dict(count_dict)
[ 129, 553, 41, 2496 ]
def METHOD_NAME(self): self.palsspin.setEnabled(self.als_type == 0) self.ratior.setEnabled(self.als_type == 1) self.porderairplsspin.setEnabled(self.als_type == 2)
[ 3655, 882 ]
def METHOD_NAME(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(module_path: str) -> Callable[[str], dict]: """ Return a function that can load a json from a filename and return a dict, but also the filename """ def _json_loader(filename: str): return _load_json_file_with_name(module_path, filename) return _json_loader
[ 557, 763, 171, 41, 156 ]
def METHOD_NAME(self): ti = TrackInterval() nt.assert_equals(ti.track, 0) ti.track = 5 nt.assert_equals(ti.track, 5) ti.track = -12 nt.assert_equals(ti.track, -12) ti.track = 0 nt.assert_equals(ti.track, 0) # Check initial id ti = TrackInterval(20, Timestamp(), Timestamp()) nt.assert_equals(ti.track, 20) ti.track = 5 nt.assert_equals(ti.track, 5) ti.track = -12 nt.assert_equals(ti.track, -12) ti.track = 0 nt.assert_equals(ti.track, 0)
[ 9, 19, 0, 3068, 147 ]
def METHOD_NAME(workspace): strategy = os.environ.get("MEM_USAGE_STRATEGY", "") cmd_str = "" if "xmin" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";" cmd_str += "export MALLOC_CONF=decay_time:0;" elif "xmid" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";" elif "min" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";" cmd_str += "export MALLOC_CONF=dirty_decay_ms:0,muzzy_decay_ms:0;" elif "mid" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";" cmd_str += "export MALLOC_CONF=background_thread:true,dirty_decay_ms:10000,muzzy_decay_ms:10000;" elif "max" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";" cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;" elif "244" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";" elif "251" == strategy: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";" cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:60000,muzzy_decay_ms:60000;" elif "close" == strategy: pass else: cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";" cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;" return cmd_str
[ 0, 3824, 281 ]
def METHOD_NAME(stubber, message, is_retryable): if is_retryable: stubber.responses.append(RetryableException(message=message)) else: stubber.responses.append(NonRetryableException(message))
[ 238, 966, 442 ]
def METHOD_NAME() -> str: return f"test_environment_{random_id()[-8:]}"
[ 236, 1027, 156 ]
def METHOD_NAME(self, text): """Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluable in self.namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated. """ import re m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text) if not m: return [] expr, attr = m.group(1, 3) try: thisobject = eval(expr, self.namespace) except Exception: return [] # get the content of the object, except __builtins__ words = set(dir(thisobject)) words.discard("__builtins__") if hasattr(thisobject, '__class__'): words.add('__class__') words.update(get_class_members(thisobject.__class__)) matches = [] n = len(attr) if attr == '': noprefix = '_' elif attr == '_': noprefix = '__' else: noprefix = None while True: for word in words: if (word[:n] == attr and not (noprefix and word[:n+1] == noprefix)): match = "%s.%s" % (expr, word) try: val = getattr(thisobject, word) except Exception: pass # Include even if attribute not set else: match = self._callable_postfix(val, match) matches.append(match) if matches or not noprefix: break if noprefix == '_': noprefix = '__' else: noprefix = None matches.sort() return matches
[ 864, 855 ]
def METHOD_NAME(self, result): self.print_result_line(result)
[ 1887, 750 ]
def METHOD_NAME(location: Optional[str] = None, project: Optional[str] = None, repository: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRepositoryIamPolicyResult: """ Retrieves the current IAM policy data for repository ## example ```python import pulumi import pulumi_gcp as gcp policy = gcp.artifactregistry.get_repository_iam_policy(project=google_artifact_registry_repository["my-repo"]["project"], location=google_artifact_registry_repository["my-repo"]["location"], repository=google_artifact_registry_repository["my-repo"]["name"]) ``` :param str location: The name of the location this repository is located in. Used to find the parent resource to bind the IAM policy to :param str project: The ID of the project in which the resource belongs. If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. :param str repository: Used to find the parent resource to bind the IAM policy to """ __args__ = dict() __args__['location'] = location __args__['project'] = project __args__['repository'] = repository opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('gcp:artifactregistry/getRepositoryIamPolicy:getRepositoryIamPolicy', __args__, opts=opts, typ=GetRepositoryIamPolicyResult).value return AwaitableGetRepositoryIamPolicyResult( etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), location=pulumi.get(__ret__, 'location'), policy_data=pulumi.get(__ret__, 'policy_data'), project=pulumi.get(__ret__, 'project'), repository=pulumi.get(__ret__, 'repository'))
[ 19, 1230, 1694, 54 ]
METHOD_NAME( self ) :
[ 19, 913 ]
def METHOD_NAME(): spec = Specification(description="""Import a workflow in json format.""", map_input_pin_spec={ 0 : PinSpecification(name = "json_workflow", type_names=["string","data_sources"], optional=False, document="""Input json data as either a data source or a string""")}, map_output_pin_spec={ 0 : PinSpecification(name = "workflow", type_names=["workflow"], optional=False, document="""Instantiate workflow.""")}) return spec
[ 1457 ]
def METHOD_NAME(c): return self._replace_sensitive_data(c)
[ 369, 1778 ]
def METHOD_NAME(m: Union[nn.Module, nn.Sequential]) -> None: if isinstance(m, nn.Sequential): constant_init(m[-1], val=0) else: constant_init(m, val=0)
[ 679, 313, 176 ]
def METHOD_NAME(): for size in range(1, 1000): assert next_fast_len(size) == fftpack.next_fast_len(size)
[ 9, 243, 2602, 5148 ]
def METHOD_NAME(self, user): print(f"{user.szUsername}was updated")
[ 69, 1660, 21, 86 ]
def METHOD_NAME(ql: Qiling, address: int, params): return __GetSystemInfo(ql, address, params)
[ 1021, 19, 1577, 112, 100 ]
def METHOD_NAME(self): """TestSuiteSpec makes up a label for unlabeled input paths.""" spec = TestSuiteSpec("name", [None, None], ["/foo", "/bar"]) for label in spec.labels: self.assertEqual(label, TestSuiteSpec.UNLABELED_STRING)
[ 9, 654, 636 ]
def METHOD_NAME(data, out, opts=None, **kwargs): """ Return the formatted outputter string for the passed data """ return try_printout(data, out, opts, **kwargs)
[ 1737, 275 ]
def METHOD_NAME(label, version, branch, debug): MODES = {'Binaries': 'build', 'Sources': 'src'} def getAndWriteCommitInfo(repo): """ We write the last commit info in the commit.info file """ cwd = os.getcwd() os.chdir('src/%s' % repo) hash = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"], stdout=subprocess.PIPE).stdout.read().decode("utf-8") with open('commit.info', 'w') as file: file.write("%s (%s)" % (branch, hash.strip())) os.chdir(cwd) def makeTarget(target, label): if exists(target): print("'%s' already exists. Removing it..." % target) os.system("rm -rf %s" % target) print("...preparing the bundle...") sys.stdout.flush() cwd = os.getcwd() os.system('git clone https://github.com/I2PC/xmipp %s -b %s' % (target, branch)) if debug: # in debug mode, the main script and this one is packed os.system('cp xmipp %s/xmipp' % target) os.system('cp scripts/tar.py %s/scripts/tar.py' % target) os.chdir(target) os.system('./xmipp get_devel_sources %s' % branch) getAndWriteCommitInfo('xmipp') getAndWriteCommitInfo('xmippCore') getAndWriteCommitInfo('xmippViz') getAndWriteCommitInfo('scipion-em-xmipp') os.environ['CUDA'] = 'True' # To include cuFFTAdvisor os.system('./xmipp config noAsk') # just to write the config file os.system('./xmipp get_dependencies') os.chdir(cwd) excludeTgz = '' if label.startswith('Bin'): print("Recompiling to make sure that last version is there...") sublabel = label.split('Bin')[1] target = 'xmippBin_%s-%s' % (sublabel, version) makeTarget(target, label) try: # doing compilation and install separately to skip overwriting config cwd = os.getcwd() os.chdir(target) os.system("./xmipp compile 8") os.system("./xmipp install %s" % target) os.chdir(cwd) except: raise Exception(" ...some error occurred during the compilation!!!\n") checkFile = isfile(join(target, 'bin', 'xmipp_cuda_movie_alignment_correlation')) if not checkFile: print("\n" " ERROR: %s not found. \n" " Xmipp should be compiled using CUDA to make the binaries.tgz." % checkFile) sys.exit(1) os.system("rm %s/v%s" % (target, version)) os.system("touch %s/v%s_%s" % (target, version, sublabel)) excludeTgz = "--exclude='*.tgz' --exclude='*.h' --exclude='*.cpp' " \ "--exclude='*.java' --exclude='resources/test' " \ "--exclude='*xmipp_test*main'" elif label == 'Sources': target = 'xmippSrc-v'+version makeTarget(target, label) excludeTgz = (" --exclude='xmipp.conf' --exclude='xmippEnv.json'" " --exclude='src/scipion-em-xmipp' ") else: usage("Incorrect <mode>") # FIXME: This is breaking the Sources bundle. Please, use a clean dir and skip this # excludeTgz += " --exclude='*.o' --exclude='*.os' --exclude='*pyc'" # excludeTgz += " --exclude='*.gz' --exclude='*.bashrc' --exclude='*.fish'" # excludeTgz += " --exclude=tests/data --exclude='*.scons*' --exclude=.git" excludeTgz += ("--exclude=.* --exclude=sonar-project.properties " "--exclude='xmipp.bashrc' --exclude='xmipp.fish' " "--exclude=src/scipion-em-xmipp") cmdStr = "tar czf %(target)s.tgz %(excludeTgz)s %(target)s" args = {'excludeTgz': excludeTgz, 'target': target} cmd = cmdStr % args if exists(target+'.tgz'): print("%s.tgz already exists. Removing it..." % target) os.system("rm -rf %s.tgz" % target) print(cmd) os.system(cmd) os.system("rm -rf %s" % target)
[ 22 ]
def METHOD_NAME( project_id: int, release: Optional[str], dist: Optional[str] ) -> ArtifactBundleFlatFileIndex: index = ArtifactBundleFlatFileIndex.objects.create( project_id=project_id, release_name=release or "", dist_name=dist or "", ) index.update_flat_file_index("{}") return index
[ 248, 2301, 171, 724 ]
def METHOD_NAME(self) -> None: quiz, bot_handler = self.get_test_quiz() bot_handler.storage.put("Q001", quiz) self.assertEqual(get_quiz_from_id("Q001", bot_handler), quiz)
[ 9, 19, 13463, 280, 147 ]
def METHOD_NAME(self): return self.construct_function(lambda n, surf: (n - 1) * surf, self.refractive_index, self.surface_sag)
[ -1 ]
async def METHOD_NAME(self, *args, **kwargs): """ Open Purge Requests for a worker pool List the caches for this `workerPoolId` that should to be purged if they are from before the time given in the response. This is intended to be used by workers to determine which caches to purge. This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
[ 4717, 311 ]
def METHOD_NAME(): sweeps = [cirq.Linspace('a', 0, 1, 10), cirq.Linspace('b', 0, 1, 10)] assert cirq.study.to_sweeps(sweeps) == sweeps
[ 9, 24, 14994, 2439, 14994 ]
def METHOD_NAME(self): for bundle_item in all_bundle_items_except_money: with self.subTest(msg=bundle_item.item.name): self.assertIn(bundle_item.item.name, logic.item_rules)
[ 9, 1393, 1727, 1024, 868, 137, 1272 ]
def METHOD_NAME (self): ''' Starts (Subscribes) the client. ''' self.sub = rospy.Subscriber(self.topic, Odometry, self.__callback)
[ 447 ]
def METHOD_NAME(self: uml.Property) -> list[bool | None]: """Get navigability of an association end. If no association is related to the property, then unknown navigability is assumed. """ assoc = self.association if not (assoc and self.opposite): return [None] # assume unknown owner = self.opposite.type if ( isinstance(owner, (uml.Class, uml.DataType, uml.Interface)) and isinstance(self.type, (uml.Class, uml.DataType, uml.Interface)) and (self in owner.ownedAttribute) or self in assoc.navigableOwnedEnd ): return [True] elif isinstance(assoc.ownedEnd, uml.ExtensionEnd): return [self is assoc.ownedEnd] elif assoc.ownedEnd is None or self in assoc.ownedEnd: return [None] else: return [False]
[ 1042, 9157 ]
def METHOD_NAME(self): return """\ font Sets this legend group's title font. text Sets the title of the legend group. """
[ 1302, 1303 ]
def METHOD_NAME(model: torch.nn.Module, d_vae: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, log_writer=None, lr_scheduler=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): # assign learning rate & weight decay for each step it = start_steps + step # global training iteration if lr_schedule_values is not None or wd_schedule_values is not None: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] if wd_schedule_values is not None and param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_schedule_values[it] samples, images, bool_masked_pos = batch images = images.to(device, non_blocking=True) samples = samples.to(device, non_blocking=True) bool_masked_pos = bool_masked_pos.to(device, non_blocking=True) with torch.no_grad(): input_ids = d_vae.get_codebook_indices(images).flatten(1) bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool) labels = input_ids[bool_masked_pos] with torch.cuda.amp.autocast(): outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False) loss = nn.CrossEntropyLoss()(input=outputs, target=labels) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) optimizer.zero_grad() # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() mlm_acc = (outputs.max(-1)[1] == labels).float().mean().item() metric_logger.update(mlm_acc=mlm_acc) if log_writer is not None: log_writer.update(mlm_acc=mlm_acc, head="loss") metric_logger.update(loss=loss_value) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: log_writer.update(loss=loss_value, head="loss") log_writer.update(loss_scale=loss_scale_value, head="opt") log_writer.update(lr=max_lr, head="opt") log_writer.update(min_lr=min_lr, head="opt") log_writer.update(weight_decay=weight_decay_value, head="opt") log_writer.update(grad_norm=grad_norm, head="opt") log_writer.set_step() if lr_scheduler is not None: lr_scheduler.step_update(start_steps + step) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
[ 849, 206, 1165 ]
def METHOD_NAME(self, mock_get): g = self.create_getter() # test the non error operation job_response = {"jobs": "jobs"} mock_get.return_value = test_utils.Response(job_response) self.set_counts() jobs = g.get_possible_jobs() self.assertEqual(jobs, job_response["jobs"]) self.compare_counts() # check when the server responds incorrectly job_response = {"none": "none"} mock_get.return_value = test_utils.Response(job_response) self.set_counts() jobs = g.get_possible_jobs() self.compare_counts() self.assertEqual(jobs, None) # check when requests has bad status code mock_get.return_value = test_utils.Response(job_response, do_raise=True) self.set_counts() jobs = g.get_possible_jobs() self.compare_counts() self.assertEqual(jobs, None)
[ 9, 19, 1234, 494 ]
def METHOD_NAME(self, some_string): # maybe "foo/bar" is some group's path try: # try with exact case return self.get_group(some_string) except NotFoundException: # try case insensitive groups = self._make_requests_to_api( "groups?search=%s", some_string.lower(), method="GET", ) for group in groups: if group["full_path"].lower() == some_string.lower(): return group raise NotFoundException( f"Group/subgroup with path '{some_string}' not found." )
[ 19, 846, 331, 4419 ]
def METHOD_NAME(self, cfg: Union[DictConfig, Namespace]): if (isinstance(cfg, DictConfig) and cfg._name == "characters_asr") or ( isinstance(cfg, Namespace) and getattr(cfg, "bpe", None) == "characters_asr" ): self.bpe = encoders.METHOD_NAME( cfg, space_symbol=self.space_word, non_lang_syms=self.non_lang_syms ) else: self.bpe = encoders.METHOD_NAME(cfg)
[ 56, 3138 ]
def METHOD_NAME(app, session, status, legal_type, submitter_role): """Assert that the dissolution email processor for firms works as expected.""" # setup filing + business for email legal_name = 'test business' filing = prep_dissolution_filing(session, 'FM1234567', '1', status, legal_type, legal_name, submitter_role) token = 'token' # test processor with patch.object(dissolution_notification, '_get_pdfs', return_value=[]) as mock_get_pdfs: with patch.object(dissolution_notification, 'get_recipient_from_auth', return_value='[email protected]'): with patch.object(dissolution_notification, 'get_user_email_from_auth', return_value='[email protected]'): email = dissolution_notification.process( {'filingId': filing.id, 'type': 'dissolution', 'option': status}, token) if status == 'PAID': assert email['content']['subject'] == legal_name + \ ' - Confirmation of Filing from the Business Registry' else: assert email['content']['subject'] == \ legal_name + ' - Dissolution Documents from the Business Registry' if submitter_role: assert f'{submitter_role}@email.com' in email['recipients'] else: assert '[email protected]' in email['recipients'] assert '[email protected]' in email['recipients'] assert '[email protected]' in email['recipients'] assert email['content']['body'] assert email['content']['attachments'] == [] assert mock_get_pdfs.call_args[0][0] == status assert mock_get_pdfs.call_args[0][1] == token assert mock_get_pdfs.call_args[0][2]['identifier'] == 'FM1234567' assert mock_get_pdfs.call_args[0][2]['legalName'] == legal_name assert mock_get_pdfs.call_args[0][2]['legalType'] == legal_type assert mock_get_pdfs.call_args[0][3] == filing
[ 9, 17605, -1, 857 ]
def METHOD_NAME(self, sel_mock): sel_mock.return_value = [10, 20] jdata = {"model": {"descriptor": {"type": "se_e2_a", "rcut": 6, "sel": "auto"}}} expected_out = { "model": {"descriptor": {"type": "se_e2_a", "rcut": 6, "sel": [12, 24]}} } jdata = update_sel(jdata) self.assertEqual(jdata, expected_out)
[ 9, 86, 10289 ]
def METHOD_NAME(self): now = datetime.datetime.utcnow() self.kube._node_name = 'foo' elector = LeaderElector(self.kube) cm = { 'kind': 'ConfigMap', 'apiVersion': 'v1', 'data': {}, 'metadata': { 'name': 'datadog-leader-elector', 'namespace': 'default', 'resourceVersion': '5563782', 'creationTimestamp': '2017-08-21T17:37:32Z', 'annotations': {'acquired_time': datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%S.%f"), 'creator': 'dd-agent-284pl' }, 'selfLink': '/api/v1/namespaces/default/configmaps/datadog-leader-elector', 'uid': '697b957c-8697-11e7-b62f-42010af002d4' }, } time.sleep(1) pl = elector._build_update_cm_payload(cm) self.assertEqual(pl['data'], cm['data']) self.assertEqual(pl['metadata']['name'], cm['metadata']['name']) self.assertEqual(pl['metadata']['namespace'], cm['metadata']['namespace']) self.assertEqual(pl['metadata']['annotations'][CREATOR_ANNOTATION], cm['metadata']['annotations'][CREATOR_ANNOTATION]) self.assertTrue(pl['metadata']['annotations'][ACQUIRE_TIME_ANNOTATION] > cm['metadata']['annotations'][ACQUIRE_TIME_ANNOTATION])
[ 9, 56, 86, 8616, 288 ]
def METHOD_NAME(self) -> None: """Clears all storage.""" self._general.METHOD_NAME() self._users.METHOD_NAME() for filepath in globals.storage_path.glob('storage_*.json'): filepath.unlink()
[ 537 ]
def METHOD_NAME(self): return self.child.METHOD_NAME.copy()
[ 386 ]
def METHOD_NAME(self, serialised: str, filename: str) -> Tuple[List[str], List[str]]: config = configparser.ConfigParser(interpolation = None) config.read_string(serialised) if not config.has_section("general"): raise UM.VersionUpgrade.FormatException("No \"general\" section.") # Make z_seam_x and z_seam_y options visible. In a clean 2.4 they are visible by default. if config.has_option("general", "visible_settings"): visible_settings = config.get("general", "visible_settings") visible_set = set(visible_settings.split(";")) visible_set.add("z_seam_x") visible_set.add("z_seam_y") config.set("general", "visible_settings", ";".join(visible_set)) config.set("general", "version", value="4") output = io.StringIO() config.write(output) return [filename], [output.getvalue()]
[ 738, 3958 ]
def METHOD_NAME(self, source, loads): rval = loads(source) (a, b), (c, d) = sorted(rval[0]), sorted(rval[1]) self.assertIs(a, c) self.assertIs(b, d)
[ 250, 219, 1772 ]
def METHOD_NAME(self, environment, args=None): Path(self.dir, environment).mkdir() self.database = migrator.db.Database({'database_driver': 'sqlite'}, environment) self.database.DynamicBase.metadata.create_all(self.database.engine)
[ 102, 9 ]
def METHOD_NAME(self): with vega_fluxd.set({'V': 3631 * u.Jy}): vega = Vega(None) fluxd = vega.observe('V', unit='Jy') assert np.isclose(fluxd.value, 3631)
[ 9, 6427, 16174, -1 ]
def METHOD_NAME(self): """Posts should be sorted chronologically.""" t = ThreadFactory() t.post_set.create(creator=t.creator, content="foo") t.post_set.create(creator=t.creator, content="bar") posts = t.post_set.all() for i in range(len(posts) - 1): self.assertLessEqual(posts[i].created, posts[i + 1].created)
[ 9, 72, 3053 ]