text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(font, master): for glyph in font.glyphs: for layer in glyph.layers: if ( layer.layerId != master.id or layer.associatedMasterId != master.id or not layer.hints ): continue if any(h.type.upper() == "CORNER" for h in layer.hints): return True return False
[ 220, 2147, 2930, 811 ]
f METHOD_NAME(self):
[ 9, 13151, 24, 13152, 10618, 227 ]
def METHOD_NAME(self, doc, eval_context=None): "Gets all the values from a document to save" return self.config.METHOD_NAME(doc, eval_context)
[ 19, 75, 199 ]
def METHOD_NAME(self, table: sa.Table, columns_names: List[str]) -> bool: # TODO add method for checking that none of the columns exists existing = {c.name for c in table.columns} for c in columns_names: if c not in existing: return False return True
[ 220, 1951 ]
def METHOD_NAME(_): rng = np.random.RandomState(FLAGS.seed) # Make sure poker is compiled into the library, as it requires an optional # dependency: the ACPC poker code. To ensure it is compiled in, prepend both # the install.sh and build commands with OPEN_SPIEL_BUILD_WITH_ACPC=ON. # See here: # https://github.com/deepmind/open_spiel/blob/master/docs/install.md#configuration-conditional-dependencies # for more details on optional dependencies. games_list = pyspiel.registered_names() assert "universal_poker" in games_list fcpa_game_string = pyspiel.hunl_game_string("fcpa") print("Creating game: {}".format(fcpa_game_string)) game = pyspiel.load_game(fcpa_game_string) agents = [ LoadAgent(FLAGS.player0, game, 0, rng), LoadAgent(FLAGS.player1, game, 1, rng) ] state = game.new_initial_state() # Print the initial state print("INITIAL STATE") print(str(state)) while not state.is_terminal(): # The state can be three different types: chance node, # simultaneous node, or decision node current_player = state.current_player() if state.is_chance_node(): # Chance node: sample an outcome outcomes = state.chance_outcomes() num_actions = len(outcomes) print("Chance node with " + str(num_actions) + " outcomes") action_list, prob_list = zip(*outcomes) action = rng.choice(action_list, p=prob_list) print("Sampled outcome: ", state.action_to_string(state.current_player(), action)) state.apply_action(action) else: # Decision node: sample action for the single current player legal_actions = state.legal_actions() for action in legal_actions: print("Legal action: {} ({})".format( state.action_to_string(current_player, action), action)) action = agents[current_player].step(state) action_string = state.action_to_string(current_player, action) print("Player ", current_player, ", chose action: ", action_string) state.apply_action(action) print("") print("NEXT STATE:") print(str(state)) # Game is now done. Print utilities for each player returns = state.returns() for pid in range(game.num_players()): print("Utility for player {} is {}".format(pid, returns[pid]))
[ 57 ]
def METHOD_NAME(center, radius, height, width): half_height = height / 2. half_width = width / 2. hw = min(half_height, half_width) if isinstance(radius, (list, tuple)): if len(radius) != 4: raise ValueError("radius must be float or 4 value tuple/list" " (got %s of length %d)" % (type(radius), len(radius))) if (radius > np.ones(4) * hw).all(): raise ValueError('Radius of curvature cannot be greater than\ half of min(width, height)') radius = np.array(radius, dtype=np.float32) else: if radius > hw: raise ValueError('Radius of curvature cannot be greater than\ half of min(width, height)') radius = np.ones(4) * radius num_segments = (radius / hw * 500.).astype(int) bias1 = np.ones(4) * half_width - radius bias2 = np.ones(4) * half_height - radius corner1 = np.empty([num_segments[0]+1, 3], dtype=np.float32) corner2 = np.empty([num_segments[1]+1, 3], dtype=np.float32) corner3 = np.empty([num_segments[2]+1, 3], dtype=np.float32) corner4 = np.empty([num_segments[3]+1, 3], dtype=np.float32) start_angle = 0. end_angle = np.pi / 2. theta = np.linspace(end_angle, start_angle, num_segments[0]+1) corner1[:, 0] = center[0] - bias1[0] - radius[0] * np.sin(theta) corner1[:, 1] = center[1] - bias2[0] - radius[0] * np.cos(theta) corner1[:, 2] = 0 theta = np.linspace(start_angle, end_angle, num_segments[1]+1) corner2[:, 0] = center[0] + bias1[1] + radius[1] * np.sin(theta) corner2[:, 1] = center[1] - bias2[1] - radius[1] * np.cos(theta) corner2[:, 2] = 0 theta = np.linspace(end_angle, start_angle, num_segments[2]+1) corner3[:, 0] = center[0] + bias1[2] + radius[2] * np.sin(theta) corner3[:, 1] = center[1] + bias2[2] + radius[2] * np.cos(theta) corner3[:, 2] = 0 theta = np.linspace(start_angle, end_angle, num_segments[3]+1) corner4[:, 0] = center[0] - bias1[3] - radius[3] * np.sin(theta) corner4[:, 1] = center[1] + bias2[3] + radius[3] * np.cos(theta) corner4[:, 2] = 0 output = np.concatenate(([[center[0], center[1], 0.]], [[center[0] - half_width, center[1], 0.]], corner1, [[center[0], center[1] - half_height, 0.]], corner2, [[center[0] + half_width, center[1], 0.]], corner3, [[center[0], center[1] + half_height, 0.]], corner4, [[center[0] - half_width, center[1], 0.]])) vertices = np.array(output, dtype=np.float32) return vertices
[ 567, 2128 ]
def METHOD_NAME(queue, quantity=1, is_runner=False): """ Pop one or more or all items from the queue return them. """ cmd = "SELECT name FROM {}".format(queue) if quantity != "all": try: quantity = int(quantity) except ValueError as exc: error_txt = 'Quantity must be an integer or "all".\nError: "{}".'.format( exc ) raise SaltInvocationError(error_txt) cmd = "".join([cmd, " LIMIT {}".format(quantity)]) log.debug("SQL Query: %s", cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if len(result) > 0: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = 'DELETE FROM {} WHERE name IN ("{}")'.format(queue, itemlist) log.debug("SQL Query: %s", del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
[ 760 ]
f METHOD_NAME(value):
[ 214, 962 ]
def METHOD_NAME(x, y): if "a" <= x <= y <= "z": print("One") if "a" <= x <= "z": print("Two") if "a" <= x > "z": print("Three")
[ 53, 5474 ]
def METHOD_NAME(self, key): result = None if key in self.replacement_mods: result = self.replacement_mods[key] elif key in self.replacement_keys: result = self.replacement_keys[key] if result >= 277: result -= 277 elif len(key) == 1: result = ord(key.upper()) if result is None: raise KeyboardHandlerError("Could not translate key %r into a valid keycode." % key) return result
[ 9179, 280, 59 ]
def METHOD_NAME(): image = np.zeros((10, 10), dtype=bool) label_image = np.zeros((10, 10), dtype=np.uint8) label_image[2:7, 2:7] = 1 ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 0, 0, 0, 1, 1, 0, 0], [0, 1, 1, 0, 0, 0, 1, 1, 0, 0], [0, 1, 1, 0, 0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) marked = mark_boundaries(image, label_image, color=white, mode='thick') result = np.mean(marked, axis=-1) assert_array_equal(result, ref)
[ 9, 1743, 1744, 863 ]
def METHOD_NAME(self): color_value, value_type = self._vmt.get_vector('$color', [1, 1, 1]) divider = 255 if value_type is int else 1 color_value = list(map(lambda a: a / divider, color_value)) if len(color_value) == 1: color_value = [color_value[0], color_value[0], color_value[0]] elif len(color_value) > 3: color_value = color_value[:3] return color_value
[ 36 ]
def METHOD_NAME(self, name): from . import insert, model if name in model.model().names(): view = self.tool().mainwindow().currentView() if view.hasFocus() or self.tool().widget().searchEntry.hasFocus(): insert.insert(name, view)
[ 2117, 1006 ]
def METHOD_NAME(self) -> int: pass
[ 19, 1573, 29 ]
def METHOD_NAME(): manager = EventManager(make_event(event_id="1234ABCD" * 4)) manager.normalize() data = manager.get_data() assert data["event_id"] == "1234abcd" * 4 manager = EventManager(make_event(event_id="1234ABCD" * 4)) manager.normalize() data = manager.get_data() assert data["event_id"] == "1234abcd" * 4
[ 9, 417, 147, 8519 ]
def METHOD_NAME(self, test): "Called when a test has completed successfully" pass
[ 238, 1434 ]
def METHOD_NAME(self) -> str: """ The name of the resource """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME( self, registration_test_common_setup, register_payload ): setup_dict = registration_test_common_setup() users_before = User.objects.all().count() organizations_before = Organization.objects.all().count() payload = register_payload( organization_name=setup_dict["org"].organization_name, industry="test", email="test", password="test", username=setup_dict["user"].username, ) response = setup_dict["client"].post( reverse("register"), data=json.dumps(payload, cls=DjangoJSONEncoder), content_type="application/json", ) users_after = User.objects.all().count() organizations_after = Organization.objects.all().count() assert response.status_code == status.HTTP_400_BAD_REQUEST assert users_before == users_after assert organizations_before == organizations_after
[ 9, 372, 2072, 954 ]
def METHOD_NAME( tls_cert_file: pathlib.Path, tls_key_file: pathlib.Path, listen_hosts: Iterable[str] ) -> None: from cryptography import x509 from cryptography.hazmat import backends from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509 import oid backend = backends.default_backend() private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=backend ) subject = x509.Name( [x509.NameAttribute(oid.NameOID.COMMON_NAME, "EdgeDB Server")] ) certificate = ( x509.CertificateBuilder() .subject_name(subject) .public_key(private_key.public_key()) .serial_number(int(uuid.uuid4())) .issuer_name(subject) .not_valid_before( datetime.today() - timedelta(days=1) ) .not_valid_after( datetime.today() + timedelta(weeks=1000) ) .add_extension( x509.SubjectAlternativeName( [ x509.DNSName(name) for name in listen_hosts if name not in {'0.0.0.0', '::'} ] ), critical=False, ) .sign( private_key=private_key, algorithm=hashes.SHA256(), backend=backend, ) ) with tls_cert_file.open("wb") as f: f.write(certificate.public_bytes(encoding=serialization.Encoding.PEM)) tls_cert_file.chmod(0o644) with tls_key_file.open("wb") as f: f.write( private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) ) tls_key_file.chmod(0o600)
[ 567, 1245, 1941 ]
def METHOD_NAME(): data_dir = os.path.dirname(os.path.abspath(__file__)) print('Data directory is: {}'.format(data_dir)) os.chdir(data_dir) update_tld_list(IANA_TLD_LIST_URL, 'iana-filtered-tld-list.txt') update_spdx_id_list( SPDX_REPO_URL, 'spdx-license-ids.txt', 'spdx-free-license-ids.txt', 'spdx-license-exception-ids.txt', ) update_categories_list(MENU_SPEC_URL, 'xdg-category-names.txt') update_platforms_data() print('All done.')
[ 57 ]
def METHOD_NAME( backend_obj: QAOABaseBackend, n_shots: Optional[int] = None, seed_simulator: Optional[int] = None, qiskit_simulation_method: Optional[str] = None, qiskit_optimization_level: Optional[int] = None, noise_model=None, active_reset: Optional[bool] = None, rewiring=None, disable_qubit_rewiring: Optional[bool] = None, initial_qubit_mapping=None, ): BACKEND_ARGS_MAPPER = { QAOABackendAnalyticalSimulator: {}, QAOAvectorizedBackendSimulator: {}, } local_vars = locals() for each_plugin_entrypoint in PLUGIN_DICT.values(): if hasattr(each_plugin_entrypoint, "backend_args"): for each_key, each_value in each_plugin_entrypoint.backend_args.items(): # Convert list of accepted parameters into a dictionary with # the name of the variable as a key and the local value of the # variable var_values = [local_vars[each_name] for each_name in each_value] input_dict = {each_key: dict(zip(each_value, var_values))} BACKEND_ARGS_MAPPER.update(input_dict) final_backend_kwargs = { key: value for key, value in BACKEND_ARGS_MAPPER[backend_obj].items() if value is not None } return final_backend_kwargs
[ 3127, 718, 3782 ]
def METHOD_NAME(self): return Chi(Choi(self).METHOD_NAME())
[ 1330 ]
def METHOD_NAME(self): """Test the "pickle-ability" of the *RGB* colourspaces.""" for colourspace in RGB_COLOURSPACES.values(): pickle.dumps(colourspace)
[ 9, 1385 ]
def METHOD_NAME(expand: Optional[pulumi.Input[Optional[str]]] = None, network_profile_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkProfileResult]: """ Gets the specified network profile in a specified resource group. :param str expand: Expands referenced resources. :param str network_profile_name: The name of the public IP prefix. :param str resource_group_name: The name of the resource group. """ ...
[ 19, 1228, 337, 146 ]
def METHOD_NAME(self, ctx:ASLIntrinsicParser.Json_path_query_binaryContext): return self.visitChildren(ctx)
[ 716, 763, 157, 539, 808 ]
def METHOD_NAME(self, block_index, seg_index, unit_index): return 0
[ 945, 29 ]
def METHOD_NAME(self, slot, subindex, roi): assert slot == self.GlobString # Any change to the globstring means our entire output is dirty. self.Output.setDirty()
[ 4820, 5220 ]
def METHOD_NAME(py_file): """Extract the doc() definition from index.py file""" with open(py_file, 'r') as f: doc_file = f.read() exec(doc_file) return doc_return_value
[ 11591, 366 ]
def METHOD_NAME(self): cfg = broker.Configuration(broker.BrokerOptions()) cfg.openssl_certificate = data_path("cert.1.pem") cfg.openssl_key = data_path("key.1.enc.pem") cfg.openssl_cafile = data_path("ca.pem") cfg.openssl_passphrase = "WRONG PASSWORD" with broker.Endpoint(cfg) as ep1, \ broker.Endpoint(cfg) as ep2: port = ep1.listen("127.0.0.1", 0) # TODO: This correctly generates an exception in CAF, for which I # don't know where to catch it. r = ep2.peer("127.0.0.1", port, 0) self.assertEqual(r, False)
[ 11589, 11795, 1247, 2433, 374, 1246, 2741 ]
def METHOD_NAME( project: Project, reload: bool, link: Optional[str], interval: float, ): """Watch the project directory and build on file changes.""" text = "Linking and watching project..." if link else "Watching project..." with message_fence(text): if link: click.echo(project.link(world=link)) for changes in project.METHOD_NAME(interval): filename, action = next(iter(changes.items())) text = ( f'{action.capitalize()} "{filename}"' if changes == {filename: action} else f"{len(changes)} changes detected" ) now = time.strftime("%H:%M:%S") change_time = click.style(now, fg="green", bold=True) click.echo(f"{change_time} {text}") with error_handler(format_padding=1), project.override( reload and "require[] = beet.contrib.livereload" ): project.build()
[ 1619 ]
def METHOD_NAME(base_dir): return { 'default': { 'ENGINE': os.environ.get('DJANGO_DB_ENGINE', 'django.db.backends.sqlite3'), 'NAME': os.environ.get('DJANGO_DB_NAME', os.path.join(base_dir, 'db.sqlite3')), 'USER': os.environ.get('DJANGO_DB_USER', ''), } }
[ 19, 5938 ]
def METHOD_NAME(): np.random.seed(1) edges = {} # for test no successor edges['c2p'] = [(1, 4), (0, 5), (1, 9), (1, 8), (2, 8), (2, 5), (3, 6), (3, 7), (3, 4), (3, 8)] edges['p2c'] = [(v, u) for u, v in edges['c2p']] edges['p2a'] = [(4, 10), (4, 11), (4, 12), (4, 14), (4, 13), (6, 12), (6, 11), (6, 14), (7, 12), (7, 11), (8, 14), (9, 10)] edges['a2p'] = [(v, u) for u, v in edges['p2a']] node_types = ['c' for _ in range(4)] + ['p' for _ in range(6) ] + ['a' for _ in range(5)] node_types = [(i, t) for i, t in enumerate(node_types)] graph = heter_graph.HeterGraph( num_nodes=len(node_types), edges=edges, node_types=node_types) graph.dump("./hetergraph_mmap", outdegree=True)
[ 9, 278 ]
def METHOD_NAME(self): """ Modify haproxy service """ cmd = ( f"sudo sed -i 's/haproxy\\.conf/haproxy.cfg/g' {constants.HAPROXY_SERVICE}" ) self.lb.exec_cmd(cmd)
[ 2444, 14767, 549 ]
def METHOD_NAME(response_json): if "labels" in response_json: pr_labels = response_json["labels"] for label in pr_labels: if label["name"] == "skip changelog": return True return False
[ 427, 2423, 3838 ]
def METHOD_NAME() -> MockConfigEntry: """Return the default mocked config entry.""" return MockConfigEntry( domain=DOMAIN, data={ CONF_SENSOR_TYPE: SensorType.VIRTUAL_POWER, CONF_ENTITY_ID: "input_boolean.test", CONF_FIXED: { CONF_POWER: 50, }, }, unique_id="aabbccddeeff", title="test", )
[ 248, 200, 475 ]
def METHOD_NAME( zone_key: ZoneKey = ZoneKey("IL"), session: Optional[Session] = None, target_datetime: Optional[datetime] = None, logger: Logger = getLogger(__name__),
[ 1047, 395, 1900 ]
def METHOD_NAME(): if using_pyside: # this will return None, unless properly inited inst = GetPySideViewerInstance() if not inst is None: return inst.GetRenderWindowIDs() return None
[ 19, 338, 1092, 308 ]
def METHOD_NAME( name: str, request: pyramid.request.Request, is_intranet_: bool ) -> list[Union[str, int, float, bool, list[Any], dict[str, Any]]]: """Get all the functionality for the current user.""" result: list[Union[str, int, float, bool, list[Any], dict[str, Any]]] = [] errors: set[str] = set() if request.user is not None: result = _get_db_functionality( name, _user_to_struct(request.user), _get_functionalities_type(request), request, errors ) if not result: result = _get_db_functionality( name, _get_role(request.get_organization_role("registered")), _get_functionalities_type(request), request, errors, ) if not result and is_intranet_: result = _get_db_functionality( name, _get_role(request.get_organization_role("intranet")), _get_functionalities_type(request), request, errors, ) if not result: result = _get_db_functionality( name, _get_role(request.get_organization_role("anonymous")), _get_functionalities_type(request), request, errors, ) if errors != set(): LOG.error("\n".join(errors)) return result
[ 19, 6246 ]
def METHOD_NAME(current_actor_context, monkeypatch): """Check that previous inhibitors are not stopping the upgrade in case env var is set""" monkeypatch.setattr(config, 'get_env', lambda x, y: 'network-manager' if x == 'LEAPP_DEVEL_INITRAM_NETWORK' else y) with_mount_share = [MountEntry(name="nfs", mount="/mnt/data", tp='nfs', options="rw,nosuid,nodev,relatime,user_id=1000,group_id=1000")] with_systemdmount_entry = [SystemdMountEntry(node="nfs", path="n/a", model="n/a", wwn="n/a", fs_type='nfs', label="n/a", uuid="n/a")] with_fstab_entry = [FstabEntry(fs_spec="lithium:/mnt/data", fs_file="/mnt/data", fs_vfstype='nfs', fs_mntops="noauto,noatime,rsize=32768,wsize=32768", fs_freq="0", fs_passno="0")] current_actor_context.feed(StorageInfo(mount=with_mount_share, systemdmount=with_systemdmount_entry, fstab=with_fstab_entry)) current_actor_context.run() assert not current_actor_context.consume(Report)
[ 9, 7675, 79, 217, -1, 1228, 1111 ]
def METHOD_NAME(): return ( os.name == 'nt' )
[ 137, 3239 ]
def METHOD_NAME(theta_E): """Converts projected density parameter (in units of deflection) into 3d density parameter :param theta_E: Einstein radius :return:""" fac1 = np.pi * 2 rho0 = theta_E / fac1 return rho0
[ 14027 ]
def METHOD_NAME(self, x, y): neighbours = [] if x > 0: neighbours.append((x - 1, y, self.LEFT)) if x < self.map.w - 1: neighbours.append((x + 1, y, self.RIGHT)) if y > 0: neighbours.append((x, y - 1, self.UP)) if y < self.map.h - 1: neighbours.append((x, y + 1, self.DOWN)) return neighbours
[ 19, 11810 ]
def METHOD_NAME(self, value): self._s32_pin_type = value if value: self._toggle_properties('s32')
[ 0, 7711, 2818, 44 ]
def METHOD_NAME(self): self._calltip_window = None
[ 1462 ]
METHOD_NAME(self):
[ 390, 3287, 573 ]
def METHOD_NAME(self): result = object() test = object() class FakeRunner(object): def run(self, test): self.test = test return result runner = FakeRunner() oldParseArgs = unittest.TestProgram.parseArgs def restoreParseArgs(): unittest.TestProgram.parseArgs = oldParseArgs unittest.TestProgram.parseArgs = lambda *args: None self.addCleanup(restoreParseArgs) def removeTest(): del unittest.TestProgram.test unittest.TestProgram.test = test self.addCleanup(removeTest) program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2) self.assertEqual(program.result, result) self.assertEqual(runner.test, test) self.assertEqual(program.verbosity, 2)
[ 9, 654, 538 ]
def METHOD_NAME(self): return self._default_token_type
[ 235, 466, 44 ]
def METHOD_NAME(self): """Used by `build` Template Method to build header for the repr""" raise NotImplementedError
[ 356, 572 ]
def METHOD_NAME(cls, parsed_args=None): if not parsed_args: parsed_args = cls._create_argparser().parse_args() if parsed_args.update_database: cls.update_database(verbose=parsed_args.verbose) else: super().METHOD_NAME(parsed_args=parsed_args)
[ 22 ]
def METHOD_NAME(job: Job): file_path = manager.job_manager.get_job_file_path(job) md_path = os.path.splitext(file_path)[0] + ".qmd" return read_qmd(md_path)
[ 203, 126, 773 ]
def METHOD_NAME(self, name, type, label, widget=None, default=None, tooltip=None, min=None, max=None, vals=None, suffix=""): self.settingsList.append(name) self.settings[name] = self.setting(name, type, label, widget, default, tooltip, min, max, vals, suffix)
[ 238, 1333 ]
def METHOD_NAME(self): self._mock_migratable_db.get_version.return_value = 1 self._mock_migratable_db.get_upgrade_version.return_value = 2 self.assertEqual(DataState.STALE, self._installed_data.get_state())
[ 9, 19, 551, 427, 1413, 5754, 1646 ]
def METHOD_NAME(self, sut_res: any = None, key: str = None) -> Dict[str, any]: """EPC specific checks mathod should be used to retrive memory and pid info""" mem_dict = {"epc": 1} pid_dict = {"epc": 1} return mem_dict, pid_dict
[ 16150, 250 ]
def METHOD_NAME(self, absent="#016810", occupied="#B60202"): self._absent_colour = absent self._occupied_colour = occupied
[ 0, 8668 ]
def METHOD_NAME( self, violations=FAKE_VIOLATIONS, inv_index_id=FAKE_INV_INDEX_ID, scanner_index_id=None, succeeded=['IamPolicyScanner'], failed=[]): """Populate the db with violations. Args: violations (dict): the violations to write to the test database inv_index_id (str): the inventory index to use scanner_index_id (str): the scanner index to use succeeded (list): names of scanners that ran successfully failed (list): names of scanners that failed """ if not scanner_index_id: scanner_index_id = scanner.init_scanner_index( self.session, inv_index_id) self.violation_access.create(violations, scanner_index_id) scanner.mark_scanner_index_complete( self.session, scanner_index_id, succeeded, failed) return scanner_index_id
[ 3914, 1267 ]
def METHOD_NAME(self, original_content: str) -> str: # gets rid of whitespace around the edges, so that they aren't a problem in the future message_content = original_content.strip() # replaces all spaces with '+' to be in the format the api requires sentence = message_content.replace(" ", "+") return sentence
[ 275, 362 ]
def METHOD_NAME(self, structure): self.structure.calc.calculate(structure)
[ 1204, 1011, 800 ]
def METHOD_NAME(a: ArrayLike, tol=None, hermitian=False): a = _atleast_float_1(a) if a.ndim < 2: return int((a != 0).any()) if tol is None: # follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885 atol = 0 rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps else: atol, rtol = tol, 0 return torch.linalg.METHOD_NAME(a, atol=atol, rtol=rtol, hermitian=hermitian)
[ 430, 1499 ]
def METHOD_NAME(self): os = "abcde" A_spec = self.A_spec alpha_spec = GppkgSpec("alpha", "1.0", GPDB_VERSION, os) gppkg_file = self.build(alpha_spec, A_spec) with self.assertRaisesRegex(ExecutionError , "%s OS required. %s OS found" % (os, OS)): self.install(gppkg_file)
[ 3739, 909, 350 ]
def METHOD_NAME(): return WorkspaceSource
[ 102 ]
def METHOD_NAME(distribution, has_signing_service=False): subprocess.run(("sudo", "dnf", "config-manager", "--add-repo", distribution.base_url)) repo_id = "*{}_".format(distribution.base_path) args = ["sudo", "dnf", "config-manager", "--save", f"--setopt={repo_id}.gpgcheck=0"] if has_signing_service: public_key_url = f"{distribution.base_url}repodata/repomd.xml.key" args.extend( ( f"--setopt={repo_id}.repo_gpgcheck=1", f"--setopt={repo_id}.gpgkey={public_key_url}", ) ) subprocess.run(args + [repo_id]) added_repos.append(repo_id)
[ 238, 522 ]
def METHOD_NAME( self, resource_group_name, workspace_collection_name, custom_headers=None, raw=False, **operation_config): """Retrieves all existing Power BI workspaces in the specified workspace collection. :param resource_group_name: Azure resource group :type resource_group_name: str :param workspace_collection_name: Power BI Embedded Workspace Collection name :type workspace_collection_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of Workspace :rtype: ~azure.mgmt.powerbiembedded.models.WorkspacePaged[~azure.mgmt.powerbiembedded.models.Workspace] :raises: :class:`ErrorException<azure.mgmt.powerbiembedded.models.ErrorException>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.METHOD_NAME.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'workspaceCollectionName': self._serialize.url("workspace_collection_name", workspace_collection_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorException(self._deserialize, response) return response # Deserialize response deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
[ 245 ]
def METHOD_NAME(self, data): return data[0]
[ 821 ]
def METHOD_NAME(cuda, local_rank): """ Sets device based on local_rank and returns instance of torch.device. :param cuda: if True: use cuda :param local_rank: local rank of the worker """ if cuda: torch.cuda.METHOD_NAME(local_rank) device = torch.device('cuda') else: device = torch.device('cpu') return device
[ 0, 398 ]
def METHOD_NAME(self, value): self.update_panel_visibility()
[ 69, 2977, 3237, 519, 86 ]
def METHOD_NAME(train_p, train_input_p, exp_start_epoch, total_epoch, warmup_epoch=0, limit_epoch=None, multiplier_min=0.01, warmup_init=0.): """Sets a linear rampup and exponential decay LR schedule on train_p. This is a wrapper around LinearRampupExponentialDecayScaledByNumSplitSchedule that sets the steps using epochs and the training statistics. Args: train_p: train parameters. train_input_p: The training set input parameters. exp_start_epoch: The start epoch of exponential annealing. total_epoch: Total number of epoch to train. warmup_epoch: Epoch for the warm up ramp to end at. Note that the learning rate will be fixed between the end of the warmup phase and the beginning of the exponential annealing phase. limit_epoch: Epoch to end exponential annealing. If None, this will be set to 0.95 * total_epoch, that is, the last 5% of training time will be at the minimum learning rate. multiplier_min: The multiplier minimum at the end of exponential decay. warmup_init: Initial value for the warmup phase. Note that warm up can be disabled by either setting warmup_init to 1 or setting warmup_epoch to 0. """ # Determine steps based on the training statistics, since the number of steps # depends on the number of examples per step. train_stats = _GetTrainingStatistics(train_input_p) warmup_steps, exp_start_steps, total_steps = _GetSteps( train_stats, warmup_epoch, exp_start_epoch, total_epoch) if limit_epoch is None: limit_epoch = 0.95 * total_epoch limit_steps = limit_epoch * train_stats.steps_per_epoch tf.logging.info('limit_steps = %d', limit_steps) assert 0. <= warmup_steps <= exp_start_steps <= limit_steps <= total_steps # Ensure that warmup is disabled by also setting warmup_init to 1 if # warmup_epoch is set to 0. if warmup_epoch == 0.: warmup_init = 1. train_p.max_steps = math.ceil(total_epoch * train_stats.steps_per_epoch) train_p.lr_schedule = ( schedule.LinearRampupExponentialDecayScaledByNumSplitSchedule.Params()) train_p.lr_schedule.Set( warmup=warmup_steps, decay_start=exp_start_steps, decay_end=limit_steps, min=multiplier_min, warmup_init=warmup_init, # Set num_splits to 1 so that no further scaling is done. num_splits=1)
[ 0, 8088, 6941 ]
def METHOD_NAME(self, rset=None): if not hasattr(self, 'snaps'): return if len(self.snaps) == 0 : return for s in list(self.snaps.keys()): self.snapdestroykey(s) if rset is None: return for i, r in enumerate(rset.resources): if hasattr(rset.resources[i], 'alt_src'): delattr(rset.resources[i], 'alt_src')
[ 4792, 950 ]
def METHOD_NAME(document_ids, lang, document_type): """ Get a cache key for all given document ids. """ if not document_ids: return [] versions = DBSession.query(CacheVersion). \ filter(CacheVersion.document_id.in_(document_ids)). \ join(Document, Document.document_id == CacheVersion.document_id). \ filter(Document.redirects_to.is_(None)). \ all() version_for_documents = {v.document_id: v.version for v in versions} return [ _format_cache_key( document_id, lang, version_for_documents.get(document_id), document_type ) for document_id in document_ids if version_for_documents.get(document_id) ]
[ 19, 596, 219 ]
def METHOD_NAME( self, func_name, update_status, package=ANY, error_code=None): expected_params = {'FunctionName': func_name, 'ZipFile': package} response = {'FunctionName': func_name, 'LastUpdateStatus': update_status} self._stub_bifurcator( 'update_function_code', expected_params, response, error_code=error_code)
[ 492, 86, 559, 544 ]
def METHOD_NAME(self): parameters = { **self.serialize_url_param( "clusterName", self.ctx.args.cluster_name, required=True, ), **self.serialize_url_param( "databaseName", self.ctx.args.database_name, required=True, ), **self.serialize_url_param( "resourceGroupName", self.ctx.args.resource_group, required=True, ), **self.serialize_url_param( "subscriptionId", self.ctx.subscription_id, required=True, ), } return parameters
[ 274, 386 ]
def METHOD_NAME(self): if os.name == "nt": pat = TagsFromPattern('<path>\\<~>\\<~>\\<tracknumber> - <title>') else: pat = TagsFromPattern('<path>/<~>/<~>/<tracknumber> - <title>') self.failUnlessEqual(len(pat.headers), 3) song = pat.match({"~filename": self.f1}) self.failUnlessEqual(song.get("path"), "path") self.failUnlessEqual(song.get("title"), "Title") self.failIf(song.get("album")) self.failIf(song.get("artist"))
[ 9, 2423 ]
def METHOD_NAME(self): if self.settings.compiler == 'clang' and str(self.settings.compiler.libcxx) in ['libstdc++', 'libstdc++11']: replace_in_file(self, os.path.join(self.source_folder, 'Release', 'CMakeLists.txt'), 'libc++', 'libstdc++')
[ 1575, 4040, 9076 ]
def METHOD_NAME(): pass
[ 1030, 2126, 1929, 1030, 2126 ]
def METHOD_NAME(packer, button_val, car_fingerprint): values = { 'CRUISE_BUTTONS': button_val, 'CRUISE_SETTING': 0, } # send buttons to camera on radarless cars bus = 2 if car_fingerprint in HONDA_BOSCH_RADARLESS else get_pt_bus(car_fingerprint) return packer.make_can_msg("SCM_BUTTONS", bus, values)
[ 5340, 1409, 462 ]
def METHOD_NAME(self): self._patcher1.METHOD_NAME() self._patcher2.METHOD_NAME() self._patcher3.METHOD_NAME() self._patcher4.METHOD_NAME() self._patcher5.METHOD_NAME()
[ 631 ]
def METHOD_NAME( self, resource_id: str, data: Optional[Dict[str, Any]] = None, idempotency_key: str = "", **params: Any ) -> Any: idempotency_key = idempotency_key or self._generate_idempotency_key() resource_path = self.get_resource_path() path = f"{resource_path}/{resource_id}" result = self.perform_api_call(self.REST_UPDATE, path, data, params, idempotency_key=idempotency_key) return self.get_resource_object(result)
[ 86 ]
def METHOD_NAME( matrix: np.ndarray, x: np.ndarray, y: np.ndarray, all_finite: bool = False, all_infinite: bool = False, is_gradient_bounding: bool = False, ): """Validate the bounding matrix is what is expected. Parameters ---------- matrix: np.ndarray (2d array) Bounding matrix. x: np.ndarray (1d, 2d or 3d array) First time series. y: np.ndarray (1d, 2d or 3d array) Second time series. all_finite: bool, default = False Boolean that when true will check all the values are finite. all_infinite: bool, default = False Boolean that when true will check all the values (aside the middle diagonal) are infinite. is_gradient_bounding: bool, default = False Boolean that when true marks the bounding matrix as generated by an algorithm that uses a gradient and therefore the first a second column are allowed to be finite (aside the first and last element in the matrix). """ assert isinstance(matrix, np.ndarray), ( f"A bounding matrix must be of type np.ndarray. Instead one was provided with " f"{type(matrix)} type." ) assert matrix.ndim == 2, ( f"A bounding matrix must have two dimensions. Instead one was provided with " f"{matrix.ndim} dimensions." ) assert matrix.shape == (len(x), len(y)), ( f"A bounding matrix with shape len(x) by len(y) is expected ({len(x), len(y)}. " f"Instead one was given with shape {matrix.shape}" ) unique, counts = np.unique(matrix, return_counts=True) count_dict = dict(zip(unique, counts)) for key in count_dict: if np.isfinite(key): assert count_dict[key] >= len(y) or all_infinite is False, ( "All the values in the bounding matrix should be finite. A infinite " "value was found (aside from the diagonal)." ) else: if is_gradient_bounding: max_infinite = len(y) + len(x) - 2 # -2 as 0,0 and n,m should be finite assert count_dict[key] >= max_infinite or all_finite is False, ( "All values in the bounding matrix should be infinite. Aside" "from the first column and last column." ) else: assert all_finite is False, ( "All values in the bounding matrix should be" "infinite. A finite value was found" )
[ 187, 1538, 1571 ]
def METHOD_NAME(event): return ( not _is_cfp_open(event) and event.submissions.filter(state=SubmissionStates.SUBMITTED).exists() and now() <= event.datetime_from )
[ 137, 623, 4381 ]
def METHOD_NAME( min_float: float = 0.0, max_float: float = math.inf, allow_zero: bool = False, # Allows +0.0 (even if minfloat > 0)
[ 93, 1819, 17534 ]
def METHOD_NAME(): cxa = CxAssembler() cxa.add_statements([st_phos, st_dephos]) cxa.make_model() assert len(cxa.cx['nodeAttributes']) == 5
[ 9, 1716, 177 ]
def METHOD_NAME(output: Iterable, base_response: CompletionStreamResponse): """ Streams a GPT4All output to the client. Args: output: The output of GPT4All.generate(), which is an iterable of tokens. base_response: The base response object, which is cloned and modified for each token. Returns: A Generator of CompletionStreamResponse objects, which are serialized to JSON Event Stream format. """ for token in output: chunk = base_response.copy() chunk.choices = [dict(CompletionChoice( text=token, index=0, logprobs=-1, finish_reason='' ))] yield f"data: {json.dumps(dict(chunk))}\n\n"
[ 919, 1323 ]
def METHOD_NAME(default_conf, hyperopt_results, lossfunction) -> None: results_over = hyperopt_results.copy() results_over['profit_abs'] = hyperopt_results['profit_abs'] * 2 + 0.2 results_over['profit_ratio'] = hyperopt_results['profit_ratio'] * 2 results_under = hyperopt_results.copy() results_under['profit_abs'] = hyperopt_results['profit_abs'] / 2 - 0.2 results_under['profit_ratio'] = hyperopt_results['profit_ratio'] / 2 default_conf.update({'hyperopt_loss': lossfunction}) hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function( hyperopt_results, trade_count=len(hyperopt_results), min_date=datetime(2019, 1, 1), max_date=datetime(2019, 5, 1), config=default_conf, processed=None, backtest_stats={'profit_total': hyperopt_results['profit_abs'].sum()} ) over = hl.hyperopt_loss_function( results_over, trade_count=len(results_over), min_date=datetime(2019, 1, 1), max_date=datetime(2019, 5, 1), config=default_conf, processed=None, backtest_stats={'profit_total': results_over['profit_abs'].sum()} ) under = hl.hyperopt_loss_function( results_under, trade_count=len(results_under), min_date=datetime(2019, 1, 1), max_date=datetime(2019, 5, 1), config=default_conf, processed=None, backtest_stats={'profit_total': results_under['profit_abs'].sum()} ) assert over < correct assert under > correct
[ 9, 1572, 3194, 5912, -1 ]
def METHOD_NAME(app_configs, **kwargs): from corehq.sql_db.models import PartitionedModel from corehq.sql_db.util import get_db_aliases_for_partitioned_query errors = [] # some apps only apply to specific envs env_specific_apps = { 'icds_reports': settings.ICDS_ENVS, 'aaa': ('none',), } ignored_models = [ 'DeprecatedXFormAttachmentSQL' ] def _check_model(model_class, using=None): db = using or router.db_for_read(model_class) try: with django_connections[db].cursor() as cursor: cursor.execute("SELECT %s::regclass", [model_class._meta.db_table]) except Exception as e: errors.append(checks.Error('checks.Error querying model on database "{}": "{}.{}": {}.{}({})'.format( using or DEFAULT_DB_ALIAS, model_class._meta.app_label, model_class.__name__, e.__class__.__module__, e.__class__.__name__, e ))) for model in apps.get_models(): app_label = model._meta.app_label enabled_envs = env_specific_apps.get(app_label) if enabled_envs and settings.SERVER_ENVIRONMENT not in enabled_envs: continue if model.__name__ in ignored_models or not model._meta.managed: continue if issubclass(model, PartitionedModel): for db in get_db_aliases_for_partitioned_query(): _check_model(model, using=db) else: _check_model(model) return errors
[ 250, 1267, 2253 ]
def METHOD_NAME(req: ReqMock, expected: bool) -> None: should_build = wheel_builder.should_build_for_wheel_command( cast(InstallRequirement, req) ) assert should_build is expected
[ 9, 427, 56, 43, 2620, 462 ]
def METHOD_NAME(): return True
[ 1751, 2245 ]
def METHOD_NAME(self): assert nx.bidirectional_shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] assert nx.bidirectional_shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] validate_grid_path( 4, 4, 1, 12, nx.bidirectional_shortest_path(self.grid, 1, 12) ) assert nx.bidirectional_shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] # test source = target assert nx.bidirectional_shortest_path(self.cycle, 3, 3) == [3]
[ 9, 9628, 7159, 157 ]
def METHOD_NAME(self): """ Reads data from device or interface synchronously. """ if self.closed: raise RuntimeError("Trying to read from a closed instrument") if _USE_DEBUG: print("DBG-Mock: read", self.answer) if len(self.answer) > 0: return self.answer.pop(0) return 'ERROR'
[ 203 ]
def METHOD_NAME(self) -> None: if self.is_tip_rack(): self._engine_client.METHOD_NAME(labware_id=self.labware_id) else: raise TypeError(f"{self.get_display_name()} is not a tip rack.")
[ 656, 7964 ]
def METHOD_NAME(success): if success: self.main_window.broadcast_or_show(self.tx)
[ 2452, 1658 ]
def METHOD_NAME(): _check_op(convolution.laplacian, adj)
[ 9, 3505 ]
def METHOD_NAME(noise_model, num_keys=1): assert isinstance(noise_model, qibo.noise.NoiseModel) errorkeys = noise_model.errors.keys() assert len(errorkeys) == num_keys error = list(noise_model.errors.values())[0][0][1] assert isinstance(error, qibo.noise.PauliError) assert len(error.options) == 3 and np.sum(pair[1] for pair in error.options) < 1
[ 9, 578 ]
def METHOD_NAME(__encoding: str) -> codecs.CodecInfo: ...
[ 1906 ]
def METHOD_NAME( self, **generator_kwargs: str | int | dict[str, str] ) -> Iterator[praw.models.Comment | praw.models.Submission]: """Return a :class:`.ListingGenerator` for items the user has gilded. :returns: A :class:`.ListingGenerator` object which yields :class:`.Comment` or :class:`.Submission` objects the user has gilded. :raises: ``prawcore.Forbidden`` if the user is not authorized to access the list. .. note:: Since this function returns a :class:`.ListingGenerator` the exception may not occur until sometime after this function has returned. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. For example, to get all gilded items of the authenticated user: .. code-block:: python for item in reddit.user.me().gildings(): print(item.id) """ return ListingGenerator( self._reddit, urljoin(self._path, "gilded/given"), **generator_kwargs )
[ -1 ]
def METHOD_NAME(self): self.subcomponent_test_template("sc_2u")
[ 9, 1066, 1007, 3555, -1 ]
def METHOD_NAME(self) -> Optional[str]: """ Resource name. """ return pulumi.get(self, "name")
[ 156 ]
def METHOD_NAME(self) -> None: prices: Dict[str, int] = self.multiworld.worlds[self.player].shop_prices for loc, price in prices.items(): with self.subTest("prices", loc=loc): self.assertLessEqual(price, self.multiworld.get_location(f"The Shop - {loc}", self.player).cost) self.assertTrue(loc in SHOP_ITEMS) self.assertEqual(len(prices), len(SHOP_ITEMS))
[ 9, 873, 2357 ]
def METHOD_NAME(partialPath): """ Returns the absolute path for the test json file """ normPath = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')) return os.path.join(normPath, partialPath)
[ 19, 9, 171 ]
def METHOD_NAME( self, slack_team_identity: SlackTeamIdentity, payload: EventPayload ) -> AlertGroup | None: """ Get AlertGroup instance from SlackMessage instance. Old messages may not have alert_group_pk encoded into buttons, so we need to query SlackMessage to figure out the AlertGroup. """ message_ts = payload.get("message_ts") or payload["container"]["message_ts"] # interactive message or block channel_id = payload["channel"]["id"] # All Slack messages from OnCall should have alert_group_pk encoded into buttons, so reaching this point means # something probably went wrong. logger.warning(f"alert_group_pk not found in payload, fetching SlackMessage from DB. message_ts: {message_ts}") # Get SlackMessage from DB slack_message = SlackMessage.objects.get( slack_id=message_ts, _slack_team_identity=slack_team_identity, channel_id=channel_id, ) return slack_message.alert_group
[ 19, 2941, 846, 280, 729, 277, 623 ]
def METHOD_NAME(self, message): """Print messages only if ``verbose`` was set to ``True``.""" if self.verbose: log.info(message)
[ -1 ]
def METHOD_NAME(self): """ Opens the SpaCy output and gets ze entities. """ # Validate whether the user enabled the right parameters. if "ner" not in self.source_dataset.parameters["enable"]: self.dataset.update_status("Enable \"Named entity recognition\" in previous module") self.dataset.finish(0) return else: # Extract the SpaCy docs first self.dataset.update_status("Unzipping SpaCy docs") # Store all the entities in this list li_entities = [] nlp = spacy.load("en_core_web_sm") # Load model for doc_file in self.iterate_archive_contents(self.source_file): with doc_file.open("rb") as pickle_file: # Load DocBin file = pickle.load(pickle_file) doc_bin = DocBin().from_bytes(file) docs = list(doc_bin.get_docs(nlp.vocab)) for doc in docs: post_entities = [] # stop processing if worker has been asked to stop if self.interrupted: raise ProcessorInterruptedException("Interrupted while processing documents") for ent in doc.ents: if ent.label_ in self.parameters["entities"]: post_entities.append((ent.text, ent.label_)) # Add a tuple li_entities.append(post_entities) results = [] if li_entities: # Also add the data to the original file, if indicated. if self.parameters.get("overwrite"): self.add_field_to_parent(field_name='named_entities', # Format like "Apple:ORG, Gates:PERSON, ..." and add to the row new_data=[", ".join([":".join(post_entities) for post_entities in entity]) for entity in li_entities], which_parent=self.dataset.top_parent(), update_existing=True) all_entities = [] # Convert to lower and filter out one-letter words. Join the words with the entities so we can group easily. for post_ents in li_entities: for pair in post_ents: if pair and len(pair[0]) > 1: pair = pair[0].lower() + " |#| " + pair[1] all_entities.append(pair) # Group and rank count_nouns = Counter(all_entities).most_common() # Unsplit and list the count. results = [{"word": tpl[0].split(" |#| ")[0], "entity": tpl[0].split(" |#| ")[1], "count": tpl[1]} for tpl in count_nouns] # done! if results: self.dataset.update_status("Finished") self.write_csv_items_and_finish(results) else: self.dataset.update_status("Finished, but no entities were extracted.") self.dataset.finish(0)
[ 356 ]