text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(default_conf, caplog) -> None: instance_strategy_updater = StrategyUpdater() modified_code3 = instance_strategy_updater.update_code("""
[ 9, 1554, 9185, 891 ]
def METHOD_NAME(self, permission, object, context): return 1
[ 250, 204 ]
async def METHOD_NAME(self, request: web.Request): ability = await self.create_or_update_on_disk_object(request) return web.json_response(ability.display)
[ 129, 894, 86, 11458 ]
def METHOD_NAME(x, axis=-1): """Perform softmax activation on the data. Use approximation to compute exponent for faster speed. Parameters ---------- data : tvm.te.Tensor can be any dimension axis : int channel axis Returns ------- output : tvm.te.Tensor output shape is the same as input """ return softmax_common(x, axis, True)
[ 2602, 4027 ]
METHOD_NAME( self, menu ) :
[ 0, 2470 ]
def METHOD_NAME(self, user, kc_user=None, is_superuser=False, delete=False): # map a kc user to a django user if not kc_user: kc_user = self._get_kc_user(user) user_auth_level = self.auth_service.get_auth_level(kc_user) if user_auth_level == AuthorizationLevel.ADMIN: is_superuser = True # update the superuser and staff status of the user user.is_staff = is_superuser user.is_superuser = is_superuser user.username = kc_user.get("username", kc_user["email"]) user.first_name = kc_user.get("firstName", "") user.last_name = kc_user.get("lastName", "") user.email = kc_user.get("email", "") user.is_active = kc_user.get("enabled", delete) return user
[ 422, 14845, 21 ]
def METHOD_NAME(self): """ test that a basic couch user gets created when calling CouchUser.from_web_user """ username = "joe" email = "[email protected]" password = "password" domain = "test" domain_obj = create_domain(domain) self.addCleanup(domain_obj.delete) couch_user = WebUser.create(domain, username, password, None, None, email=email) self.addCleanup(couch_user.delete, domain, deleted_by=None) self.assertEqual(couch_user.domains, [domain]) self.assertEqual(couch_user.email, email) self.assertEqual(couch_user.username, username) django_user = couch_user.get_django_user() self.assertEqual(django_user.email, email) self.assertEqual(django_user.username, username)
[ 9, 129, 756, 2412, 21 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "clusterName", self.ctx.args.cluster_name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(transition=0.5, width=0.1): """Colormap often used in H.E.S.S. collaboration publications. This colormap goes black -> blue -> red -> yellow -> white. A sharp blue -> red -> yellow transition is often used for significance images with a value of red at ``transition ~ 5`` or ``transition ~ 7`` so that the following effect is achieved: - black, blue: non-significant features, not well visible - red: features at the detection threshold ``transition`` - yellow, white: significant features, very well visible The transition parameter is defined between 0 and 1. To calculate the value from data units an `~astropy.visualization.mpl_normalize.ImageNormalize` instance should be used (see example below). Parameters ---------- transition : float (default = 0.5) Value of the transition to red (between 0 and 1). width : float (default = 0.5) Width of the blue-red color transition (between 0 and 1). Returns ------- colormap : `matplotlib.colors.LinearSegmentedColormap` Colormap Examples -------- >>> from gammapy.visualization import colormap_hess >>> from astropy.visualization.mpl_normalize import ImageNormalize >>> from astropy.visualization import LinearStretch >>> normalize = ImageNormalize(vmin=-5, vmax=15, stretch=LinearStretch()) >>> transition = normalize(5) >>> cmap = colormap_hess(transition=transition) """ # Compute normalised values (range 0 to 1) that # correspond to red, blue, yellow. red = float(transition) if width > red: blue = 0.1 * red else: blue = red - width yellow = 2.0 / 3.0 * (1 - red) + red black, white = 0, 1 # Create custom colormap # List entries: (value, (R, G, B)) colors = [ (black, "k"), (blue, (0, 0, 0.8)), (red, "r"), (yellow, (1.0, 1.0, 0)), (white, "w"), ] return LinearSegmentedColormap.from_list(name="hess", colors=colors)
[ 3517, 5019 ]
def METHOD_NAME(self, serial, realms): parameters = {"serial": serial, "realms": realms} response = self.make_admin_request("tokenrealm", params=parameters) return response
[ 0, 466, 2440 ]
def METHOD_NAME(self, columnvalue=None): self.current_row = [] if columnvalue: self.current_row.append(columnvalue)
[ -1, 843 ]
def METHOD_NAME(self): """Random logged-in users cannot delete playlists unrelated to them.""" user = factories.UserFactory() playlist = factories.PlaylistFactory() jwt_token = UserAccessTokenFactory(user=user) self.assertEqual(models.Playlist.objects.count(), 1) response = self.client.delete( f"/api/playlists/{playlist.id}/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}", ) self.assertEqual(response.status_code, 403) self.assertEqual(models.Playlist.objects.count(), 1)
[ 9, 34, 556, 604, 236, 2717, 623 ]
def METHOD_NAME(self, wav, target_size): size = len(wav) diff = size - target_size if diff <= 0: return wav start = np.random.randint(0, diff + 1) end = size - diff + start return wav[start:end]
[ 712, 24, 232, 1318 ]
def METHOD_NAME( self, method: str, url: str, data: Optional[Dict[Any, Any]] = {} ) -> Any: try: r = getattr(self.client, method)(url, data, format="json") self.assertNotEqual(r.status_code, 403) return r except KeyError: pass
[ 250, 851 ]
def METHOD_NAME(self, expr): return '-Infinity'
[ 38, 2927, 7037 ]
def METHOD_NAME(self, app: Quart, session: SessionMixin) -> datetime | None: """Helper method to return the Session expiration time. If the session is not 'permanent' it will expire as and when the browser stops accessing the app. """ if session.permanent: return datetime.utcnow() + app.permanent_session_lifetime else: return None
[ 19, 3650, 104 ]
def METHOD_NAME(self): self.snk.METHOD_NAME()
[ -1 ]
def METHOD_NAME(monkeypatch): size = (60, 15) def text_size(*args): return size monkeypatch.setattr("gaphor.diagram.text.Layout.size", text_size) return size
[ 3110, 526, 1318 ]
def METHOD_NAME(self): for object_relation, content in self.intelmq_fields['event'].items(): self.misp_object_intelmq_event['attributes'].update( {object_relation: self._intelmq_misp_mapping(content, object_relation)} ) for object_relation, content in self.intelmq_fields['report'].items(): self.misp_object_intelmq_report['attributes'].update( {object_relation: self._intelmq_misp_mapping(content, object_relation)} )
[ 567, 1914 ]
def METHOD_NAME(self): return self._last_result["solution"]["answers"]
[ 19, 9099 ]
def METHOD_NAME(self, *args, **kwargs): result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True) return result
[ 146 ]
def METHOD_NAME(self): # given fetch_messages_endpoint_under_test = pubnub.fetch_messages() fetch_messages_endpoint_under_test.channels("channel1") # when fetch_messages_endpoint_under_test.validate_params() # then assert fetch_messages_endpoint_under_test._count == EXPECTED_SINGLE_CHANNEL_DEFAULT_MESSAGES
[ 9, 97, 307, 2659, 403, 235, 1646 ]
def METHOD_NAME(self, *args): return _ESL.ESLevent_setPriority(self, *args)
[ 0, 2654 ]
def METHOD_NAME(user, job, pricing): return pricing.get_price_for_user( user=user, list_type=job.list_type)
[ 19, 202, 806 ]
def METHOD_NAME(self): """ Verify that we have enough points for this interpolation algorithm. """ pass
[ 250, 200 ]
def METHOD_NAME(self, xp, order): a = testing.shaped_arange((2, 1), xp) b = xp.broadcast_to(a, (3, 2, 4)) assert not b.flags.c_contiguous and not b.flags.f_contiguous b = b.ravel(order) assert b.flags.c_contiguous return b
[ 9, 6221, -1 ]
def METHOD_NAME(self, arg_strings, *args, **kwargs): in_args = set(arg_strings) d_sp = self.__default_subparser if d_sp is not None and not {"-h", "--help"}.intersection(in_args): for x in self._subparsers._actions: subparser_found = isinstance( x, argparse._SubParsersAction ) and in_args.intersection(x._name_parser_map.keys()) if subparser_found: break else: logger.warning( "Please use `ucc-gen build` if you want to build " "an add-on, using just `ucc-gen` will be deprecated" ) arg_strings = [d_sp] + arg_strings return super().METHOD_NAME(arg_strings, *args, **kwargs)
[ 214, 3478, 335 ]
def METHOD_NAME(self, **kwargs): return ""
[ 19, 857, 366, 382 ]
def METHOD_NAME(self, pool_handle, checker_request, checker, loop=None): if loop is None: loop = asyncio.get_event_loop() results = loop.run_until_complete(self.ensure_previous_request_applied(pool_handle, checker_request, checker)) return results
[ 22, 7767, 602, 1511, 377, 2350 ]
def METHOD_NAME(line, cell): # search for federatedml path base_path = None for p in sys.path: if p.endswith('/fate/python'): base_path = p break if base_path is None: raise ValueError( 'cannot find fate/python in system path, please check your configuration') base_path = base_path + '/federatedml/' model_pth = 'nn/model_zoo/' dataset_pth = 'nn/dataset/' trainer_pth = 'nn/homo/trainer/' aggregator_pth = 'framework/homo/aggregator/' loss_path = 'nn/loss/' mode_map = { 'model': model_pth, 'trainer': trainer_pth, 'aggregator': aggregator_pth, 'dataset': dataset_pth, 'loss': loss_path } args = line.split() assert len( args) == 2, "input args len is not 2, got {} \n expect format: %%save_to_fate SAVE_MODE FILENAME \n SAVE_MODE in ['model', 'dataset', 'trainer', 'loss', 'aggregator'] FILE_NAME xxx.py".format(args) modes_avail = ['model', 'dataset', 'trainer', 'aggregator', 'loss'] save_mode = args[0] file_name = args[1] assert save_mode in modes_avail, 'avail modes are {}, got {}'.format( modes_avail, save_mode) assert file_name.endswith('.py'), 'save file should be a .py' with open(base_path + mode_map[save_mode] + file_name, 'w') as f: f.write(cell) ipy.get_ipython().run_cell(cell)
[ 73, 24, 16660 ]
def METHOD_NAME(session): """test function for gpu""" gpu_lib = None try: gpu_lib = export_gpu_add_lib() test_add(session, gpu_lib, "/gpu:0") finally: if gpu_lib is not None: os.remove(gpu_lib)
[ 1667, 9 ]
def METHOD_NAME(firmware: Firmware, fo_entry: FileObjectEntry) -> FirmwareEntry: return FirmwareEntry( uid=firmware.uid, submission_date=time(), release_date=datetime.strptime(firmware.release_date, '%Y-%m-%d'), version=firmware.version, vendor=firmware.vendor, device_name=firmware.device_name, device_class=firmware.device_class, device_part=firmware.part, firmware_tags=firmware.tags, root_object=fo_entry, )
[ 129, 1730, 475 ]
def METHOD_NAME(self): self.requires("libiconv/1.17")
[ 5186 ]
def METHOD_NAME(): plist = get_installed_products() lines = [] for p in plist: x = [Env.nodename, p.ProductName, p.VersionString, "", "msi", p.InstallDate] lines.append(x) return lines
[ -1 ]
def METHOD_NAME(self) -> None: self.mock_store.is_partial_state_room.return_value = True d = ensureDeferred(self.tracker.await_full_state("room_id")) # there should be no result yet self.assertNoResult(d) # notifying that the room has been de-partial-stated should unblock self.tracker.notify_un_partial_stated("room_id") self.successResultOf(d)
[ 9, 37, 43, 2351, 6703, 551 ]
def METHOD_NAME(self, inputs, outputs): from panopticapi.utils import id2rgb for input, output in zip(inputs, outputs): panoptic_img, segments_info = output["panoptic_seg"] panoptic_img = panoptic_img.cpu().numpy() if segments_info is None: # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label, and add 1 to panoptic_img since the official # evaluation script uses 0 for VOID label. label_divisor = self._metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_img): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = ( pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values() ) segments_info.append( { "id": int(panoptic_label) + 1, "category_id": int(pred_class), "isthing": bool(isthing), } ) # Official evaluation script uses 0 for VOID label. panoptic_img += 1 file_name = os.path.basename(input["file_name"]) file_name_png = os.path.splitext(file_name)[0] + ".png" with io.BytesIO() as out: Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") segments_info = [self._convert_category_id(x) for x in segments_info] self._predictions.append( { "image_id": input["image_id"], "file_name": file_name_png, "png_string": out.getvalue(), "segments_info": segments_info, } )
[ 356 ]
def METHOD_NAME(self, hard=False): """Reset internal state of the ODE solver.""" if self._is_set: state = self.get_state() if hard: self._prepare() if self._is_set: self.set_state(*state)
[ 656 ]
def METHOD_NAME(self) -> str: data = { "key": self.data["key"], "value": self.data["value"], "match": MATCH_CHOICES[self.data["match"]], } return self.label.format(**data)
[ 338, 636 ]
def METHOD_NAME(self, sdfg, state): in_edges = state.in_edges(self) if len(in_edges) != 2: raise ValueError("Expected exactly two inputs to axpy") in_memlets = [in_edges[0].data, in_edges[1].data] out_edges = state.out_edges(self) if len(out_edges) != 1: raise ValueError("Expected exactly one output from axpy") out_memlet = out_edges[0].data size = in_memlets[0].subset.size() if len(size) != 1: raise ValueError("axpy only supported on 1-dimensional arrays") if size != in_memlets[1].subset.size(): raise ValueError("Inputs to axpy must have equal size") if size != out_memlet.subset.size(): raise ValueError("Output of axpy must have same size as input") if (in_memlets[0].wcr is not None or in_memlets[1].wcr is not None or out_memlet.wcr is not None): raise ValueError("WCR on axpy memlets not supported") return True
[ 187 ]
def METHOD_NAME(self): user = UserFactory.create(is_staff=False) self.client.force_authenticate(user) response = self.client.get( reverse_lazy( "api:form-price-logic-rules", kwargs={"uuid_or_slug": self.form.uuid} ) ) self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
[ 9, 1045, 21, 984 ]
def METHOD_NAME(metric, patience): return EarlyStopping( monitor=f"val_{metric}", # does this need avg? mode="min" if "loss" in metric else "max", patience=patience, verbose=True, )
[ 19, 5040, 8449, 1076 ]
def METHOD_NAME(self, auto_schema: 'AutoSchema', direction: Direction): """ override for customized serializer field mapping """ pass # pragma: no cover
[ 422, 1386, 101 ]
def METHOD_NAME(self, row: int) -> None: super().METHOD_NAME(row) if row != -1: # Update allocation var widget states, since allocation may differ from # the previous color space. self.param_edit.update_available_allocation_vars()
[ 69, 1056, 843, 1180 ]
def METHOD_NAME(sample_shape, batch_shape, event_shape): ones_shape = torch.Size((1,) * len(batch_shape)) mask = torch.empty(ones_shape).bernoulli_(0.5).bool() zero = torch.zeros(ones_shape + event_shape) d0 = dist.Uniform(zero - 2, zero + 1).to_event(len(event_shape)) d1 = dist.Uniform(zero - 1, zero + 2).to_event(len(event_shape)) d = dist.MaskedMixture(mask, d0, d1) assert d.sample().shape == ones_shape + event_shape assert d.mean.shape == ones_shape + event_shape assert d.variance.shape == ones_shape + event_shape assert d.sample(sample_shape).shape == sample_shape + ones_shape + event_shape assert ( d.expand(sample_shape + batch_shape).batch_shape == sample_shape + batch_shape ) assert ( d.expand(sample_shape + batch_shape).sample().shape == sample_shape + batch_shape + event_shape ) assert ( d.expand(sample_shape + batch_shape).mean.shape == sample_shape + batch_shape + event_shape ) assert ( d.expand(sample_shape + batch_shape).variance.shape == sample_shape + batch_shape + event_shape )
[ 9, 2450 ]
def METHOD_NAME(result, reproducible, logger): try: print(result['reproducible'][reproducible]['diffoscope.text']) return 0 except KeyError: print("reproducible '%s' not found" % reproducible) return 1
[ 697, 12833 ]
def METHOD_NAME(): engine = create_engine('sqlite:///:memory:', echo=False) with engine.connect() as conn: yield conn
[ 1267 ]
def METHOD_NAME(self): r""" Return whether command ``ffmpeg`` in the path is functional. EXAMPLES:: sage: from sage.features.ffmpeg import FFmpeg sage: FFmpeg().is_functional() # optional - ffmpeg FeatureTestResult('ffmpeg', True) """ # Create the content of 1-pixel png file content = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00:~\x9bU\x00\x00\x00\nIDATx\x9cc`\x00\x00\x00\x02\x00\x01H\xaf\xa4q\x00\x00\x00\x00IEND\xaeB`\x82' # NOTE: # # This is how the above content of a 1 pixel PNG was created:: # # sage: import numpy as np # sage: from PIL import Image # sage: image = Image.fromarray(np.array([[100]], dtype=np.uint8)) # sage: image.save('file.png') # sage: with open('file.png', 'rb') as f: # ....: content = f.read() # create a png file with the content from sage.misc.temporary_file import tmp_filename base_filename_png = tmp_filename(ext='.png') with open(base_filename_png, 'wb') as f: f.write(content) # Set up filenames import os base, filename_png = os.path.split(base_filename_png) filename, _png = os.path.splitext(filename_png) # Setting a list of commands (taken from sage/plot/animate.py) # The `-nostdin` is needed to avoid the command to hang, see # https://stackoverflow.com/questions/16523746/ffmpeg-hangs-when-run-in-background commands = [] for ext in ['.avi', '.flv', '.gif', '.mkv', '.mov', #'.mpg', '.mp4', '.ogg', '.ogv', '.webm', '.wmv']: cmd = ['ffmpeg', '-nostdin', '-y', '-f', 'image2', '-r', '5', '-i', filename_png, '-pix_fmt', 'rgb24', '-loop', '0', filename + ext] commands.append(cmd) for ext in ['.avi', '.flv', '.gif', '.mkv', '.mov', '.mpg', '.mp4', '.ogg', '.ogv', '.webm', '.wmv']: cmd = ['ffmpeg', '-nostdin', '-y', '-f', 'image2', '-i', filename_png, filename + ext] commands.append(cmd) # Running the commands and reporting any issue encountered from subprocess import run for cmd in commands: try: result = run(cmd, cwd=base, capture_output=True, text=True) except OSError as e: return FeatureTestResult(self, False, reason='Running command "{}" ' 'raised an OSError "{}" '.format(' '.join(cmd), e)) # If an error occurred, return False if result.returncode: return FeatureTestResult(self, False, reason='Running command "{}" ' 'returned non-zero exit status "{}" with stderr ' '"{}" and stdout "{}".'.format(result.args, result.returncode, result.stderr.strip(), result.stdout.strip())) # If necessary, run more tests here # ... # The command seems functional return FeatureTestResult(self, True)
[ 137, 4167 ]
def METHOD_NAME(self): self.account.is_customer_billing_account = True self.assertRaises(NewSubscriptionError, lambda: Subscription.new_domain_subscription( self.account, self.domain.name, self.advanced_plan ))
[ 9, 2448, 145, 130, 4398, 24, 1487 ]
def METHOD_NAME(columns, names, page_size=None, format_strings=None): """ Return an html table of this data Parameters ---------- columns : list of numpy arrays names : list of strings The list of columns names page_size : {int, None}, optional The number of items to show on each page of the table format_strings : {lists of strings, None}, optional The ICU format string for this column, None for no formatting. All columns must have a format string if provided. Returns ------- html_table : str A str containing the html code to display a table of this data """ if page_size is None: page = 'disable' else: page = 'enable' div_id = uuid.uuid4() column_descriptions = [] for column, name in zip(columns, names): if column.dtype.kind == 'S' or column.dtype.kind == 'U': ctype = 'string' else: ctype = 'number' column_descriptions.append((ctype, name)) data = [] for item in zip(*columns): data.append(list(item)) return google_table_template.render(div_id=div_id, page_enable=page, column_descriptions = column_descriptions, page_size=page_size, data=data, format_strings=format_strings, )
[ 382, 410 ]
def METHOD_NAME(action): if chr(action) == 'D': action_str = "DELETE" else: action_str = "RENAME" return action_str
[ -1 ]
def METHOD_NAME(self) -> Optional[Mapping[str, str]]: """ Specifies tags """ return pulumi.get(self, "tags")
[ 114 ]
def METHOD_NAME(load_config_sample): """Test failure when no downgrade path is available.""" config_initial = load_config_sample('reference/5.json') # target higher than initial with pytest.raises(ConfigurationError): downgrade_config(copy.deepcopy(config_initial), 6) # no migration available with pytest.raises(ConfigurationError): downgrade_config(copy.deepcopy(config_initial), -1) # circular dependency with pytest.raises(ConfigurationError): downgrade_config(config_initial, 4, migrations=[CircularMigration])
[ 9, 1502, 157, 180 ]
def METHOD_NAME(self, nodes, repeat=None): # Compile STRING | NAME [Details] | (...) | [...] assert len(nodes) >= 1 node = nodes[0] if node.type == token.STRING: value = unicode(literals.evalString(node.value)) return pytree.LeafPattern(_type_of_literal(value), value) elif node.type == token.NAME: value = node.value if value.isupper(): if value not in TOKEN_MAP: raise PatternSyntaxError("Invalid token: %r" % value) if nodes[1:]: raise PatternSyntaxError("Can't have details for token") return pytree.LeafPattern(TOKEN_MAP[value]) else: if value == "any": type = None elif not value.startswith("_"): type = getattr(self.pysyms, value, None) if type is None: raise PatternSyntaxError("Invalid symbol: %r" % value) if nodes[1:]: # Details present content = [self.compile_node(nodes[1].children[1])] else: content = None return pytree.NodePattern(type, content) elif node.value == "(": return self.compile_node(nodes[1]) elif node.value == "[": assert repeat is None subpattern = self.compile_node(nodes[1]) return pytree.WildcardPattern([[subpattern]], min=0, max=1) assert False, node
[ 296, 756 ]
def METHOD_NAME(iterator): radius_result = dict() for k, v in iterator: if v[0] not in radius_result: radius_result[v[0]] = v[1] elif v[1] >= radius_result[v[0]]: radius_result[v[0]] = v[1] return radius_result
[ 232, 3662 ]
def METHOD_NAME(self): self.product_image = ProductImageFactory() self.context = template.Context({"image": self.product_image}) self.template = template.Template( "{% load image_tags %}" '{% oscar_thumbnail image.original "x155" upscale=False %}' ) self.template_as_context_value = template.Template( "{% load image_tags %}" '{% oscar_thumbnail image.original "x155" upscale=False as thumb %}' "{{ thumb.url }}" )
[ 0, 1 ]
f METHOD_NAME(self):
[ 531, 481 ]
def METHOD_NAME(self, m): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, std=0.02) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Embedding): nn.init.normal_(m.weight, std=0.02) if m.padding_idx is not None: nn.init.zeros_(m.weight[m.padding_idx])
[ 176, 733 ]
def METHOD_NAME(self, data): # Callback stub for the DUT's main. self._stderr += data.encode("utf-8")
[ 77, 3929 ]
def METHOD_NAME(self): """No cross-talk between attendance for different meetings""" # numbers are arbitrary here first_mtg = MeetingFactory(type_id='ietf', number='114') second_mtg = MeetingFactory(type_id='ietf', number='115') # Create a person who attended a remote session for first_mtg and onsite for second_mtg without # checking in for either. p = MeetingRegistrationFactory(meeting=second_mtg, reg_type='onsite', attended=False, checkedin=False).person AttendedFactory(session__meeting=first_mtg, person=p) MeetingRegistrationFactory(meeting=first_mtg, person=p, reg_type='remote', attended=False, checkedin=False) AttendedFactory(session__meeting=second_mtg, person=p) att = first_mtg.get_attendance() self.assertEqual(att.onsite, 0) self.assertEqual(att.remote, 1) att = second_mtg.get_attendance() self.assertEqual(att.onsite, 1) self.assertEqual(att.remote, 0)
[ 9, 19, 9560, 7489, 15741, 3156 ]
METHOD_NAME(self):
[ 793 ]
def METHOD_NAME(self) -> None: self._stream_config.schemaless = False self._validation_policy.record_passes_validation_policy.return_value = False self._parser.parse_records.side_effect = [self._iter([self._A_RECORD, ValueError("An error")])] messages = list(self._stream.read_records_from_slice({"files": [ RemoteFile(uri="invalid_file", last_modified=self._NOW), RemoteFile(uri="valid_file", last_modified=self._NOW), ]})) assert messages[0].log.level == Level.ERROR assert messages[1].log.level == Level.WARN
[ 9, 1393, 442, 1887, 10682, 2530, 1646 ]
def METHOD_NAME(self): while(self.gui.client == None): pass previous_time = datetime.now() while(True): # Sleep for 2 seconds time.sleep(2) # Measure the current time and subtract from previous time to get real time interval current_time = datetime.now() dt = current_time - previous_time ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0 previous_time = current_time # Get the time period try: # Division by zero self.measured_cycle = ms / self.iteration_counter except: self.measured_cycle = 0 # Reset the counter self.iteration_counter = 0
[ 599, 600 ]
def METHOD_NAME(full_path, sr=None): data, sr = librosa.load(full_path, sr=sr) data = np.clip(data, -1, 1) # potentially out of [-1, 1] due to resampling data = data * 32768.0 # match values loaded by scipy return torch.FloatTensor(data.astype(np.float32)), sr
[ 557, 4097, 24, 3296 ]
def METHOD_NAME(self): transformed_points = _TransformPoints( self.points, self.rotation.GetAsMatrix4()) expected_rotated_points = np.array([[-1, 1, 0], [-1, 2, 0]]).T self.assertTrue( np.allclose(transformed_points, expected_rotated_points))
[ 9, 2271 ]
def METHOD_NAME(self, Q: torch.Tensor, k=10): if k <= 10: if self.config.ncells is None: self.configure(ncells=1) if self.config.centroid_score_threshold is None: self.configure(centroid_score_threshold=0.5) if self.config.ndocs is None: self.configure(ndocs=256) elif k <= 100: if self.config.ncells is None: self.configure(ncells=2) if self.config.centroid_score_threshold is None: self.configure(centroid_score_threshold=0.45) if self.config.ndocs is None: self.configure(ndocs=1024) else: if self.config.ncells is None: self.configure(ncells=4) if self.config.centroid_score_threshold is None: self.configure(centroid_score_threshold=0.4) if self.config.ndocs is None: self.configure(ndocs=max(k * 4, 4096)) pids, scores = self.ranker.rank(self.config, Q, k) return pids[:k], list(range(1, k+1)), scores[:k]
[ 3829, 1070 ]
def METHOD_NAME(self, connection): some_table = self.tables.some_table some_other_table = self.tables.some_other_table connection.execute( some_other_table.insert().from_select( ["id", "data", "parent_id"], select(some_table) ) ) cte = ( select(some_table) .where(some_table.c.data.in_(["d2", "d3", "d4"])) .cte("some_cte") ) connection.execute( some_other_table.delete().where( some_other_table.c.data == select(cte.c.data) .where(cte.c.id == some_other_table.c.id) .scalar_subquery() ) ) eq_( connection.execute( select(some_other_table).order_by(some_other_table.c.id) ).fetchall(), [(1, "d1", None), (5, "d5", 3)], )
[ 9, 34, 1997, -1, 3834, 3835 ]
def METHOD_NAME(self, closed_is_special=True): now = time.time() expiration = self._connection.get_timer() if expiration is None: expiration = now + 3600 # arbitrary "big" value interval = max(expiration - now, 0) if self._closed and closed_is_special: # lower sleep interval to avoid a race in the closing process # which can lead to higher latency closing due to sleeping when # we have events. interval = min(interval, 0.05) return (expiration, interval)
[ 19, 2401, 199 ]
def METHOD_NAME(f, args, a, b, c, fa, fb): tol = float_info_epsilon * 2 if (b - a) < 2 * tol * a: c = a + (b - a) / 2 elif c <= a + abs(a) * tol: c = a + abs(a) * tol elif c >= b - abs(b) * tol: c = b - abs(a) * tol fc = f(c, *args) if fc == 0: a = c fa = 0 d = 0 fd = 0 else: if fa * fc < 0: d = b fd = fb b = c fb = fc else: d = a fd = fa a = c fa = fc return a, b, fa, fb, d, fd
[ 4283 ]
def METHOD_NAME(conf): """ Detects intltool-merge """ if not conf.env.PERL: conf.find_program('perl', var='PERL') conf.env.INTLCACHE_ST = '--cache=%s' conf.env.INTLFLAGS_DEFAULT = ['-q', '-u'] conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL')
[ 416, 11451, 411 ]
def METHOD_NAME(self, index: int): index, key = self._map_index(index) return self.datasets[key].METHOD_NAME(index)
[ 181, 1735 ]
def METHOD_NAME(xml_writer, file_id, labels, threshold=30, sil=["pau", "br", "sil"]): xml_reader = SingingScoreReader(os.path.join(args.scp, "score.scp")) segments = [] segment = SegInfo() for i in range(len(labels)): # remove unlabeled part if "08" in file_id and i < 135: continue label = labels[i] if label.label_id in sil: if len(segment.segs) > 0: segments.extend(segment.split(threshold=threshold)) segment = SegInfo() continue segment.add(label.start, label.end, label.label_id) if len(segment.segs) > 0: segments.extend(segment.split(threshold=threshold)) segments_w_id = {} id = 0 for seg in segments: while len(seg) > 0: key = pack_zero(file_id, id) score = xml_reader[key] val, seg, notes = compare(key, score["note"], seg) segments_w_id[key] = val score["note"] = notes score["item_list"].append("phn") xml_writer[key] = score id += 1 return segments_w_id
[ 93, 4373 ]
def METHOD_NAME(self) -> str: return pulumi.get(self, "id")
[ 147 ]
def METHOD_NAME( project: str, location: str, root_dir: str, num_selected_trials: int, deadline_hours: float, num_parallel_trials: int, single_run_max_secs: int, metadata: Input[Artifact], transform_output: Input[Artifact], materialized_train_split: Input[Artifact], materialized_eval_split: Input[Artifact], tuning_result_input_path: Input[Artifact], gcp_resources: dsl.OutputPath(str), tuning_result_output: Output[Artifact], worker_pool_specs_override_json: Optional[list] = [], encryption_spec_key_name: Optional[str] = '',
[ 9443, 13550, 3164, 988, 13227 ]
def METHOD_NAME(self, ShortcutsDialog): _translate = QtCore.QCoreApplication.translate ShortcutsDialog.setWindowTitle(_translate("ShortcutsDialog", "Shortcuts")) self.label.setText(_translate("ShortcutsDialog", "Filter:")) self.cbNoShortcut.setText(_translate("ShortcutsDialog", "Show items without shortcut")) self.label_2.setText(_translate("ShortcutsDialog", "Display type:")) self.cbDisplayType.setItemText(0, _translate("ShortcutsDialog", "Tooltip")) self.cbDisplayType.setItemText(1, _translate("ShortcutsDialog", "Text")) self.cbDisplayType.setItemText(2, _translate("ShortcutsDialog", "ObjectName")) self.cbAdvanced.setText(_translate("ShortcutsDialog", "Advanced Settings")) self.cbHighlight.setText(_translate("ShortcutsDialog", "Highlight on all Windows"))
[ 4735, 882 ]
def METHOD_NAME(fresh_database): run_writer_with_args("dummy_property_requiring_link") assert db.get_default_session().query(db.core.HaloProperty).count() == 0 run_writer_with_args("dummy_link") run_writer_with_args("dummy_property_requiring_link") assert db.get_default_session().query(db.core.HaloProperty).count() == 15
[ 9, 548, 2913 ]
def METHOD_NAME(self) -> 'outputs.ManagementPolicySchemaResponse': """ The Storage Account ManagementPolicy, in JSON format. See more details in: https://docs.microsoft.com/en-us/azure/storage/common/storage-lifecycle-managment-concepts. """ return pulumi.get(self, "policy")
[ 54 ]
def METHOD_NAME(self): """Diagnose setup's success in creating points""" self.assertEqual(Destination.objects.filter(name='testWithin')[0].point, self.test_point_inside) self.assertEqual(Destination.objects.filter(name='testWithout')[0].point, self.test_point_outside)
[ 9, 182, 954 ]
f METHOD_NAME(self, inp, num_groups, affine, weight, bias, ifp, wfp, bfp):
[ 1792, 846, 387 ]
def METHOD_NAME(self) -> Optional[str]: """ The time the data should be kept in cache for fast queries in TimeSpan. """ return pulumi.get(self, "hot_cache_period")
[ 3269, 596, 688 ]
def METHOD_NAME(self): """Re-sending e-invoice raises UserError""" e_invoice = self._create_e_invoice() self._create_fetchmail_pec_server() e_invoice.send_to_sdi() self.assertEqual(e_invoice.state, "sent") # Cannot re-send e-invoice whose state is 'sent' with self.assertRaises(UserError): e_invoice.send_to_sdi() # Cannot reset e-invoice whose state is 'sent' with self.assertRaises(UserError): e_invoice.reset_to_ready()
[ 9, 9973, 656 ]
def METHOD_NAME(self, parameters): parameters[TAU_SYN_E] = self.__tau_syn_E parameters[TAU_SYN_I] = self.__tau_syn_I
[ 238, 386 ]
def METHOD_NAME(self): vocab_facet = "vocab_%s" % TEST_VOCAB_NAME context = {"model": model, "session": model.Session} data = { "q": "warandpeace", "facet": "true", "facet.field": ["groups", "tags", vocab_facet], "facet.limit": "50", "facet.mincount": 1, } result = logic.get_action("package_search")(context, data) facets = result["search_facets"] facet_tags = [t["name"] for t in facets["tags"]["items"]] assert len(facet_tags) # make sure vocab tags are not in 'tags' facet assert "tag1" not in facet_tags assert "tag2" not in facet_tags # make sure vocab tags are in vocab_<TEST_VOCAB_NAME> facet vocab_facet_tags = [t["name"] for t in facets[vocab_facet]["items"]] assert "tag1" in vocab_facet_tags assert "tag2" in vocab_facet_tags
[ 9, 3259, 6798 ]
def METHOD_NAME(self, indentation): generic.print_dbg(indentation, "Base costs") for cost in self.costs: cost.METHOD_NAME(indentation + 2)
[ 290, 38 ]
f METHOD_NAME(self, signal, pivot, targets, settings):
[ 90 ]
f METHOD_NAME(*args):
[ 457 ]
def METHOD_NAME(gl_syms): return ['RGLSYM' + x.upper() + 'PROC ' + '__rglgen_' + x + ';' for x in gl_syms]
[ 567, 8654 ]
def METHOD_NAME(cls): if cls._schema_on_200 is not None: return cls._schema_on_200 cls._schema_on_200 = AAZObjectType() _schema_on_200 = cls._schema_on_200 _schema_on_200.id = AAZStrType( flags={"read_only": True}, ) _schema_on_200.location = AAZStrType( flags={"required": True}, ) _schema_on_200.name = AAZStrType( flags={"read_only": True}, ) _schema_on_200.properties = AAZObjectType( flags={"required": True, "client_flatten": True}, ) _schema_on_200.system_data = AAZObjectType( serialized_name="systemData", flags={"read_only": True}, ) _schema_on_200.tags = AAZDictType() _schema_on_200.type = AAZStrType( flags={"read_only": True}, ) properties = cls._schema_on_200.properties properties.annotation = AAZStrType() properties.network_devices = AAZListType( serialized_name="networkDevices", flags={"read_only": True}, ) properties.network_fabric_id = AAZStrType( serialized_name="networkFabricId", flags={"required": True}, ) properties.network_rack_type = AAZStrType( serialized_name="networkRackType", ) properties.provisioning_state = AAZStrType( serialized_name="provisioningState", flags={"read_only": True}, ) network_devices = cls._schema_on_200.properties.network_devices network_devices.Element = AAZStrType() system_data = cls._schema_on_200.system_data system_data.created_at = AAZStrType( serialized_name="createdAt", ) system_data.created_by = AAZStrType( serialized_name="createdBy", ) system_data.created_by_type = AAZStrType( serialized_name="createdByType", ) system_data.last_modified_at = AAZStrType( serialized_name="lastModifiedAt", ) system_data.last_modified_by = AAZStrType( serialized_name="lastModifiedBy", ) system_data.last_modified_by_type = AAZStrType( serialized_name="lastModifiedByType", ) tags = cls._schema_on_200.tags tags.Element = AAZStrType() return cls._schema_on_200
[ 56, 135, 69, 1072 ]
def METHOD_NAME(cls): return {}
[ 19, 3616, 49, 83 ]
def METHOD_NAME(): assert isinstance(data_dir_load(wt), FLASHDataset)
[ 9, 6062, 126 ]
def METHOD_NAME(x_y_i): (x, y, i) = x_y_i return (x, (x, y, i))
[ 1104, 3366, 59 ]
def METHOD_NAME(sender, msg): ac_list = msg['ac_list'] for ac_id in ac_list: self.get_config(ac_id) #ac_list = [int(a) for a in msg['ac_list'].split(',') if a] if self.verbose: print("aircrafts: {}".format(ac_list))
[ 13924, 905 ]
def METHOD_NAME(self, owner_id): if not self.blacklist: return False if owner_id in self.qtile.windows_map: owner = self.qtile.windows_map[owner_id].window else: owner = xcbq.window.XWindow(self.qtile.core.conn, owner_id) owner_class = owner.get_wm_class() if owner_class: for wm_class in self.blacklist: if wm_class in owner_class: return True
[ 137, 3713 ]
def METHOD_NAME(self): self.assertEqual(bounding_box.as_relative("yxyx"), "rel_yxyx") self.assertEqual(bounding_box.as_relative("rel_xywh"), "rel_xywh")
[ 9, 947, 1821, 1960 ]
def METHOD_NAME(): """ Is the REST server up? """ r = salt.utils.http.query( __context__["rest_sample"]["url"] + "ping", decode_type="json", decode=True ) try: return r["dict"].get("ret", False) except Exception: # pylint: disable=broad-except return False
[ 1677 ]
def METHOD_NAME(pgcnx): result = pgcnx.query("""SELECT am.amname, opc.opcname, opr.oprname FROM pg_am am, pg_amop amop, pg_opclass opc, pg_operator opr WHERE amop.amopid = am.oid and amop.amopclaid = opc.oid AND amop.amopopr = opr.oid order by amname, opcname, oprname""") return result
[ 245, 441, 2 ]
def METHOD_NAME(self): if self.options.shared: self.options.rm_safe("fPIC")
[ 111 ]
def METHOD_NAME(in_input): print('\t\tInput type: {0}'.format(in_input.__class__)) print('\t\tbKeepWorldTransform: {0}'.format(in_input.keep_world_transform)) print('\t\tbImportAsReference: {0}'.format(in_input.import_as_reference)) if isinstance(in_input, unreal.HoudiniPublicAPIGeoInput): print('\t\tbPackBeforeMerge: {0}'.format(in_input.pack_before_merge)) print('\t\tbExportLODs: {0}'.format(in_input.export_lo_ds)) print('\t\tbExportSockets: {0}'.format(in_input.export_sockets)) print('\t\tbExportColliders: {0}'.format(in_input.export_colliders)) elif isinstance(in_input, unreal.HoudiniPublicAPICurveInput): print('\t\tbCookOnCurveChanged: {0}'.format(in_input.cook_on_curve_changed)) print('\t\tbAddRotAndScaleAttributesOnCurves: {0}'.format(in_input.add_rot_and_scale_attributes_on_curves)) input_objects = in_input.get_input_objects() if not input_objects: print('\t\tEmpty input!') else: print('\t\tNumber of objects in input: {0}'.format(len(input_objects))) for idx, input_object in enumerate(input_objects): print('\t\t\tInput object #{0}: {1}'.format(idx, input_object)) if hasattr(in_input, 'supports_transform_offset') and in_input.supports_transform_offset(): print('\t\t\tObject Transform Offset: {0}'.format(in_input.get_input_object_transform_offset(idx))) if isinstance(input_object, unreal.HoudiniPublicAPICurveInputObject): print('\t\t\tbClosed: {0}'.format(input_object.is_closed())) print('\t\t\tCurveMethod: {0}'.format(input_object.get_curve_method())) print('\t\t\tCurveType: {0}'.format(input_object.get_curve_type())) print('\t\t\tReversed: {0}'.format(input_object.is_reversed())) print('\t\t\tCurvePoints: {0}'.format(input_object.get_curve_points()))
[ 38, 58, 362 ]
def METHOD_NAME(testdir, cli, schema_url): module = testdir.make_importable_pyfile( hook=f""" import schemathesis import time TOKEN = "{TOKEN}" @schemathesis.auth() class TokenAuth: def __init__(self): self.get_calls = 0 def get(self, context): self.get_calls += 1 time.sleep(0.05) return TOKEN def set(self, case, data, context): case.headers = {{"Authorization": f"Bearer {{data}}"}} @schemathesis.hook def after_call(context, case, response): provider = schemathesis.auths.GLOBAL_AUTH_STORAGE.providers[0].provider assert provider.get_calls == 1, provider.get_calls """ ) result = cli.main( "run", schema_url, "--workers", "2", "--hypothesis-max-examples=1", "--show-errors-tracebacks", hooks=module.purebasename, ) # Then CLI should run successfully assert result.exit_code == ExitCode.OK, result.stdout
[ 9, 107, 1573 ]
def METHOD_NAME(self): self.actual_test(save_restore=True)
[ 9, 22, 1061, 73, 1032 ]
def METHOD_NAME(self, path, method): operation = super().METHOD_NAME(path, method) if method == "POST": operation["operationId"] = "CreateAttributeType" elif method == "PATCH": operation["operationId"] = "UpdateAttributeType" elif method == "PUT": operation["operationId"] = "ReplaceAttributeType" elif method == "DELETE": operation["operationId"] = "DeleteAttributeType" operation["tags"] = ["Tator"] return operation
[ 19, 2206 ]