text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(scale): value = scale.get_value() set_cfg("ratio", value) self.emit("changed")
[ 4517, 1180 ]
def METHOD_NAME(self): self.check("*.jpg")
[ 9, 557, 7121 ]
def METHOD_NAME(plugin_settings): settings = get_settings() assert settings.version == 1 settings_profile = settings.profiles[_DEFAULT_PROFILE] assert settings_profile.scopes == {PluginScope.STORAGE: [TestPlugin.get_path()]} assert settings_profile.settings == { TestPlugin.get_path(): { SettingsVar.SOME_SETTING: "bar", _PLUGIN_VERSION_KEY: "0.1.0", } }
[ 9, 19, 817 ]
def METHOD_NAME(*args, **kwargs): assert self._indent > 0 self._indent -= 1 if logger.isEnabledFor(logging.INFO): s: str = " " * self._indent s += f"[!error!<-observed({suffix})] " s += ",".join((str(x) for x in args)) for k, v in kwargs.items(): s += f"{k}->{v}" logger.info(s)
[ -1 ]
def METHOD_NAME(cls): cls.bot_reference = ZoneHParserBot cls.default_input_message = ACCEPTED_REPORT cls.sysconfig = {'feedname': 'Compromised-Website'}
[ 0, 1227 ]
def METHOD_NAME( info_list: list[_OptimizationHistoryInfo], target_name: str, ) -> "Axes": # Set up the graph style. plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly. _, ax = plt.subplots() ax.set_title("Optimization History Plot") ax.set_xlabel("Trial") ax.set_ylabel(target_name) cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly. for i, (trial_numbers, values_info, best_values_info) in enumerate(info_list): if values_info.stds is not None: if ( _ValueState.Infeasible in values_info.states or _ValueState.Incomplete in values_info.states ): _logger.warning( "Your study contains infeasible trials. " "In optimization history plot, " "error bars are calculated for only feasible trial values." ) feasible_trial_numbers = trial_numbers feasible_trial_values = values_info.values plt.errorbar( x=feasible_trial_numbers, y=feasible_trial_values, yerr=values_info.stds, capsize=5, fmt="o", color="tab:blue", ) infeasible_trial_numbers: list[int] = [] infeasible_trial_values: list[float] = [] else: feasible_trial_numbers = [ n for n, s in zip(trial_numbers, values_info.states) if s == _ValueState.Feasible ] infeasible_trial_numbers = [ n for n, s in zip(trial_numbers, values_info.states) if s == _ValueState.Infeasible ] feasible_trial_values = [] for num in feasible_trial_numbers: feasible_trial_values.append(values_info.values[num]) infeasible_trial_values = [] for num in infeasible_trial_numbers: infeasible_trial_values.append(values_info.values[num]) ax.scatter( x=feasible_trial_numbers, y=feasible_trial_values, color=cmap(0) if len(info_list) == 1 else cmap(2 * i), alpha=1, label=values_info.label_name, ) if best_values_info is not None: ax.plot( trial_numbers, best_values_info.values, color=cmap(3) if len(info_list) == 1 else cmap(2 * i + 1), alpha=0.5, label=best_values_info.label_name, ) if best_values_info.stds is not None: lower = np.array(best_values_info.values) - np.array(best_values_info.stds) upper = np.array(best_values_info.values) + np.array(best_values_info.stds) ax.fill_between( x=trial_numbers, y1=lower, y2=upper, color="tab:red", alpha=0.4, ) ax.legend() ax.scatter( x=infeasible_trial_numbers, y=infeasible_trial_values, color="#cccccc", ) plt.legend(bbox_to_anchor=(1.05, 1.0), loc="upper left") return ax
[ 19, 2123, 351, 1288 ]
def METHOD_NAME(self): return self.IdString
[ 19, 147, 144 ]
def METHOD_NAME(ctx, z): return isinstance(z, complex)
[ 137, 2587, 44 ]
def METHOD_NAME(schema, rootschema, props=None): items = select_items_from_schema(schema, props) return build_schema_table(items, rootschema)
[ 123, 135, 410 ]
def METHOD_NAME(elem, loader=None): if loader is None: loader = default_loader # look for xinclude elements i = 0 while i < len(elem): e = elem[i] if e.tag == XINCLUDE_INCLUDE: # process xinclude directive href = e.get("href") parse = e.get("parse", "xml") if parse == "xml": node = loader(href, parse) if node is None: raise FatalIncludeError( "cannot load %r as %r" % (href, parse) ) node = copy.copy(node) if e.tail: node.tail = (node.tail or "") + e.tail elem[i] = node elif parse == "text": text = loader(href, parse, e.get("encoding")) if text is None: raise FatalIncludeError( "cannot load %r as %r" % (href, parse) ) if i: node = elem[i-1] node.tail = (node.tail or "") + text + (e.tail or "") else: elem.text = (elem.text or "") + text + (e.tail or "") del elem[i] continue else: raise FatalIncludeError( "unknown parse type in xi:include tag (%r)" % parse ) elif e.tag == XINCLUDE_FALLBACK: raise FatalIncludeError( "xi:fallback tag must be child of xi:include (%r)" % e.tag ) else: METHOD_NAME(e, loader) i = i + 1
[ 1872 ]
def METHOD_NAME(): """Total location events raises an error for a bad direction.""" with pytest.raises(ValueError): TotalLocationEvents( "2016-01-01", "2016-01-04", spatial_unit=make_spatial_unit("versioned-site"), interval="min", direction="BAD_DIRECTION", )
[ 9, 1068, 4065, 45, 168 ]
def METHOD_NAME(self): # type: () -> None self._client.METHOD_NAME()
[ 1462 ]
def METHOD_NAME(self, vault_uri, **kwargs): from azure.keyvault.keys import KeyClient credential = self.get_credential(KeyClient) return self.create_client_from_credential(KeyClient, credential=credential, vault_url=vault_uri, **kwargs )
[ 129, 59, 340 ]
def METHOD_NAME(self, path, commit_hash): def checkout_existing(display_error): # Deinit fails if no submodules, so ignore its failure self._run_git(['-c', 'protocol.file.allow=always', 'submodule', 'deinit', '-f', '.'], cwd=path, display_error=False, valid_return_codes=None) self._run_git(['checkout', '-f', commit_hash], cwd=path, display_error=display_error) self._run_git(['clean', '-fdx'], cwd=path, display_error=display_error) self._run_git(['-c', 'protocol.file.allow=always', 'submodule', 'update', '--init', '--recursive'], cwd=path, display_error=display_error) if os.path.isdir(path): try: checkout_existing(display_error=False) except util.ProcessError: # Remove and try to re-clone util.long_path_rmtree(path) if not os.path.isdir(path): self._run_git(['clone', '--shared', '--recursive', self._path, path], cwd=None) checkout_existing(display_error=True)
[ 2170 ]
def METHOD_NAME(email_service_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEmailServiceResult: """ Get the EmailService and its properties. :param str email_service_name: The name of the EmailService resource. :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['emailServiceName'] = email_service_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:communication/v20230401preview:getEmailService', __args__, opts=opts, typ=GetEmailServiceResult).value return AwaitableGetEmailServiceResult( data_location=pulumi.get(__ret__, 'data_location'), id=pulumi.get(__ret__, 'id'), location=pulumi.get(__ret__, 'location'), name=pulumi.get(__ret__, 'name'), provisioning_state=pulumi.get(__ret__, 'provisioning_state'), system_data=pulumi.get(__ret__, 'system_data'), tags=pulumi.get(__ret__, 'tags'), type=pulumi.get(__ret__, 'type'))
[ 19, 487, 549 ]
def METHOD_NAME(self) -> bool: conf = self.col.decks.config_dict_for_deck_id(self.current_deck_id()) return conf["timer"]
[ 427, 697, 2401 ]
def METHOD_NAME(self): lib = self._get_userlibrary('kw1', 'Embedded ${arg}', 'kw2') assert_equal(len(lib.handlers), 3) assert_true('kw1' in lib.handlers) assert_true('kw 2' in lib.handlers) self._lib_has_embedded_arg_keyword(lib)
[ 9, 4564, 2537, 1646, 1576, 61, 2314 ]
def METHOD_NAME(self, index): return (index + self.MAX_FAN_PER_DRAWER - 1) / self.MAX_FAN_PER_DRAWER
[ 197, 1337, 724, 24, 1450, 724 ]
def METHOD_NAME(futures, lithops_executor): if mp_config.get_parameter(mp_config.EXPORT_EXECUTION_DETAILS): try: path = os.path.realpath(mp_config.get_parameter(mp_config.EXPORT_EXECUTION_DETAILS)) job_id = futures[0].job_id plots_file_name = '{}_{}'.format(lithops_executor.executor_id, job_id) lithops_executor.plot(fs=futures, dst=os.path.join(path, plots_file_name)) stats = {fut.call_id: fut.stats for fut in futures} stats_file_name = '{}_{}_stats.json'.format(lithops_executor.executor_id, job_id) with open(os.path.join(path, stats_file_name), 'w') as stats_file: stats_json = json.dumps(stats, indent=4) stats_file.write(stats_json) except Exception as e: logger.error('Error while exporting execution results: {}\n{}'.format(e, traceback.format_exc()))
[ 294, 2046, 2051 ]
def METHOD_NAME(config): """ process optim configs for hybrid parallel """ config["Optimizer"]["multi_precision"] = config["Engine"]["mix_precision"]["enable"] nranks = dist.get_world_size() dp_degree = config["Distributed"]["dp_degree"] sharding_degree = config["Distributed"]["sharding"]["sharding_degree"] if config["Optimizer"].get("tensor_fusion", None): assert ( nranks == dp_degree * sharding_degree ), "tensor_fusion only support single card train or data/sharding parallel train" if config["Optimizer"]["lr"]["decay_steps"] is None: config["Optimizer"]["lr"]["decay_steps"] = config["Engine"]["max_steps"] config["Optimizer"]["lr"]["decay_steps"] *= config["Global"]["global_batch_size"]
[ 356, 13051, 736 ]
def METHOD_NAME(self): """Simple Convnet model.""" model = tf.keras.Sequential() model.add( tf.keras.layers.Conv2D( 32, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1), ) ) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation="relu")) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation="relu")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10, activation="softmax")) return model
[ 1697, 7222 ]
def METHOD_NAME(self): config = ClientConfig() assert config.dump() == {} assert config.get("foo") is None assert config.get("foo", default="bar") == "bar"
[ 9, 235 ]
def METHOD_NAME(self): calibration_script = os.path.join(self.args.intelai_models, self.args.precision, "generate_calibration_data.py") script_args_list = [ "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd)
[ 22, 2216 ]
def METHOD_NAME(dev, target): _run_tests(dev, target, op_name="cumprod", gt_func=np.cumprod)
[ 9, 14044 ]
def METHOD_NAME(self) -> Optional[str]: """ The resource location. """ return pulumi.get(self, "location")
[ 708 ]
def METHOD_NAME(mt: 'hl.MatrixTable', no_props=False): assert mt.rows().collect() == \ [ hl.Struct(row_idx=0, **unless(no_props, {'foo': 'a'})) , hl.Struct(row_idx=1, **unless(no_props, {'foo': 'b'})) ]
[ 638, 855, 1346 ]
def METHOD_NAME(self): self.assertEqual(self.harvester_class.ratings(), 4.67)
[ 9, 4622 ]
def METHOD_NAME(self, obj: Any) -> bytes: ...
[ 947, 1479 ]
def METHOD_NAME(load, fnd): """ Return a chunk from a file based on the data received """ return _gitfs().METHOD_NAME(load, fnd)
[ 3124, 171 ]
def METHOD_NAME(self): # Check that setting the optional (non-ellipse) args works. crs = Mercator( longitude_of_projection_origin=27, standard_parallel=157.4, false_easting=13, false_northing=12, ) self.assertEqualAndKind(crs.longitude_of_projection_origin, 27.0) self.assertEqualAndKind(crs.standard_parallel, 157.4) self.assertEqualAndKind(crs.false_easting, 13.0) self.assertEqualAndKind(crs.false_northing, 12.0)
[ 9, 0, 665, 335 ]
def METHOD_NAME(xy_of_hex): """Test scipy Voronoi names are mapped to landlab-style names.""" voronoi = Voronoi(xy_of_hex) delaunay = Delaunay(xy_of_hex) graph = VoronoiDelaunay(xy_of_hex) voronoi.regions, voronoi.point_region = VoronoiDelaunay._remove_empty_regions( voronoi.regions, voronoi.point_region ) assert np.all(graph.x_of_node == approx(voronoi.points[:, 0])) assert np.all(graph.y_of_node == approx(voronoi.points[:, 1])) assert np.all(graph.x_of_corner == approx(voronoi.vertices[:, 0])) assert np.all(graph.y_of_corner == approx(voronoi.vertices[:, 1])) assert np.all(graph.nodes_at_link == voronoi.ridge_points) assert tuple(graph.n_corners_at_cell) == tuple( len(region) for region in voronoi.regions ) for cell, corners in enumerate(graph.corners_at_cell): assert np.all(corners[: graph.n_corners_at_cell[cell]] == voronoi.regions[cell]) assert np.all(corners[graph.n_corners_at_cell[cell] :] == -1) assert np.all(graph.corners_at_face == voronoi.ridge_vertices) assert np.all(graph.nodes_at_face == voronoi.ridge_points) assert np.all(graph.cell_at_node == voronoi.point_region) assert np.all(graph.nodes_at_patch == delaunay.simplices)
[ 9, 6071, 156, 445 ]
def METHOD_NAME(self): """ function to start the draw ROI function """ # If there is a ROI Selected if self.list_of_ROIs.currentItem() is not None: roi = self.list_of_ROIs.currentItem() self.roi_name = str(roi.text()) # Call function on UIDrawWindow so it has selected ROI self.signal_roi_name.emit(self.roi_name) self.close()
[ 69, 1472, 65, 2859 ]
f METHOD_NAME(self) -> Iterator[None]:
[ 129 ]
def METHOD_NAME(self) -> bool: return self.global_rank == 0
[ 137, 285, 313 ]
def METHOD_NAME(self) -> Optional[str]: """ The type of identity that last modified the resource. """ return pulumi.get(self, "last_modified_by_type")
[ 679, 680, 604, 44 ]
def METHOD_NAME( ivp: problems.InitialValueProblem, locations: Union[Sequence, np.ndarray], ode_information_operator: information_operators.InformationOperator, approx_strategy: Optional[approx_strategies.ApproximationStrategy] = None, ode_measurement_variance: Optional[FloatLike] = 0.0, exclude_initial_condition=False, ): """Transform an initial value problem into a regression problem. Parameters ---------- ivp Initial value problem to be transformed. locations Locations of the time-grid-points. ode_information_operator ODE information operator to use. approx_strategy Approximation strategy to use. Optional. Default is `EK1()`. ode_measurement_variance Artificial ODE measurement noise. Optional. Default is 0.0. exclude_initial_condition Whether to exclude the initial condition from the regression problem. Optional. Default is False, in which case the returned measurement model list consist of [`initcond_mm`, `ode_mm`, ..., `ode_mm`]. Returns ------- problems.TimeSeriesRegressionProblem Time-series regression problem. """ # Construct data and solution N = len(locations) data = np.zeros((N, ivp.dimension)) if ivp.solution is not None: solution = np.stack([ivp.solution(t) for t in locations]) else: solution = None ode_information_operator.incorporate_ode(ivp) # Construct measurement models measmod_initial_condition, measmod_ode = _construct_measurement_models( ivp, ode_information_operator, approx_strategy, ode_measurement_variance ) if exclude_initial_condition: measmod_list = [measmod_ode] * N else: measmod_list = [measmod_initial_condition] + [measmod_ode] * (N - 1) # Return regression problem return problems.TimeSeriesRegressionProblem( locations=locations, observations=data, measurement_models=measmod_list, solution=solution, )
[ -1, 24, 1399, 3095 ]
def METHOD_NAME(self): r"""Does the dataset contain temporal graphs Returns ------- bool """ return True
[ 137, 6346 ]
def METHOD_NAME(): date = "2001-01-01" response = GoogleAds.parse_single_result(SAMPLE_SCHEMA, MockedDateSegment(date)) assert response == response
[ 9, 214, 97, 1571 ]
def METHOD_NAME( self, api: _PageApi, max_results: int | None, limit: int | None ) -> Generator[_HubSpotResult, None, None]: after = None count = 0 # HubSpot returns a 400 HTTP error when trying to fetch more than 100 results if limit is not None: limit = min(limit, 100) while True: page = self._fetch_page(api, after=after, limit=limit) for result in page.results: if max_results is not None and count >= max_results: return yield result count += 1 if (after := page.next_page_after()) is None: return
[ 1571, 640 ]
def METHOD_NAME(self, segment, states): # Split a full word (segment) into subwords (units) return self.spm['src'].EncodeAsPieces(segment)
[ 4373, 24, 1878 ]
def METHOD_NAME(cls, original_object: Any, pr_id: int) -> "PullRequest": pull_request = original_object.get_pr(pr_id) pull_request._status = PRStatus.closed return pull_request
[ 1933, 1462 ]
def METHOD_NAME(path): """ Make sure the directory path ends with a slash. >>> assert path_prettify('/foo/bar') == '/foo/bar/' >>> assert path_prettify('/foo/bar//') == '/foo/bar/' :param path: The path to process. :type path: str :return: Returns the prettified path. :rtype: str """ assert isinstance(path, str) return '{}/'.format(path.rstrip('/'))
[ 157, 2191 ]
def METHOD_NAME(self): """Return a list of surveys for MAGPIS""" return self.surveys
[ 245, 9058 ]
f METHOD_NAME(self):
[ 9, 73, 24, 35, 105 ]
def METHOD_NAME(self): """Check for errors after having set a property and log them. Called if :code:`check_set_errors=True` is set for that property. :return: List of error entries. """ response = self.read() return [] if response == "" else [response]
[ 250, 0, 1096 ]
def METHOD_NAME(s, *p): path = s for t in p: if (not path) or isabs(t): path = t continue if t[:1] == ':': t = t[1:] if ':' not in path: path = ':' + path if path[-1:] != ':': path = path + ':' path = path + t return path
[ 2831 ]
def METHOD_NAME(self): self.image1.load_image() self.image2.load_image() self.image3.load_image() self.assertTrue(self.image3.set_image_import(self.image2)) self.assertFalse(self.image3.set_image_import(self.image1))
[ 9, 0, 660, 512 ]
def METHOD_NAME(self) -> str: """ Resource type. """ return pulumi.get(self, "type")
[ 44 ]
def METHOD_NAME(tmp_path, mocker, capsys, backend, status_runner, container_type): # given unsynced_container = prepare_metadata_container(container_type=container_type, path=tmp_path, last_ack_version=1) synced_container = prepare_metadata_container(container_type=container_type, path=tmp_path, last_ack_version=3) get_container_impl = generate_get_metadata_container(registered_containers=(unsynced_container, synced_container)) # and mocker.patch.object(backend, "get_metadata_container", get_container_impl) mocker.patch.object(Operation, "from_dict") # when status_runner.synchronization_status(tmp_path) # then captured = capsys.readouterr() assert captured.out.splitlines() == [ "Unsynchronized objects:", f"- {get_qualified_name(unsynced_container)}", "", "Please run with the `neptune sync --help` to see example commands.", ]
[ 9, 245, 2954 ]
def METHOD_NAME(self, ip): if len(ip.split(".")) == 4: return True else: return False
[ 137, 1213, 8046 ]
def METHOD_NAME(self): self.run_location_tests([ ["Superbunny Cave - Top", False, []], ["Superbunny Cave - Top", True, ['Pegasus Boots']], ["Superbunny Cave - Bottom", False, []], ["Superbunny Cave - Bottom", True, ['Pegasus Boots']], ["Hookshot Cave - Bottom Right", False, []], ["Hookshot Cave - Bottom Right", False, [], ['Hookshot', 'Pegasus Boots']], ["Hookshot Cave - Bottom Right", False, [], ['Progressive Glove', 'Pegasus Boots', 'Magic Mirror']], ["Hookshot Cave - Bottom Right", True, ['Pegasus Boots']], ["Hookshot Cave - Bottom Left", False, []], ["Hookshot Cave - Bottom Left", False, [], ['Hookshot']], ["Hookshot Cave - Bottom Left", False, [], ['Progressive Glove', 'Pegasus Boots', 'Magic Mirror']], ["Hookshot Cave - Bottom Left", True, ['Pegasus Boots', 'Hookshot']], ["Hookshot Cave - Top Left", False, []], ["Hookshot Cave - Top Left", False, [], ['Hookshot']], ["Hookshot Cave - Top Left", False, [], ['Progressive Glove', 'Pegasus Boots', 'Magic Mirror']], ["Hookshot Cave - Top Left", True, ['Pegasus Boots', 'Hookshot']], ["Hookshot Cave - Top Right", False, []], ["Hookshot Cave - Top Right", False, [], ['Hookshot']], ["Hookshot Cave - Top Right", False, [], ['Progressive Glove', 'Pegasus Boots', 'Magic Mirror']], ["Hookshot Cave - Top Right", True, ['Pegasus Boots', 'Hookshot']], ]
[ 9, 14745, 6022, 3021, 570, -1 ]
def METHOD_NAME(goal): txinfo, err = goal.app_create(goal.account, goal.assemble(TEAL)) assert not err, f"err: {err}" appid = txinfo["application-index"] creator_min_balance = int.from_bytes(b64decode(txinfo["logs"][2]), byteorder="big") return appid, creator_min_balance
[ 129, 5703, 1835, 3101, 991 ]
def METHOD_NAME(status_tree): status_tree.setdefault("children", {}) status_tree.pop("created_time", "") started_time = status_tree.pop("started_time", None) archived_time = status_tree.pop("archived_time", None) if "elapsed_time" not in status_tree: status_tree["elapsed_time"] = calculate_elapsed_time(started_time, archived_time) status_tree["start_time"] = format_datetime(started_time) if started_time else None status_tree["finish_time"] = format_datetime(archived_time) if archived_time else None
[ 275, 452, 104 ]
def METHOD_NAME(self, *args, **kwargs) -> IterDataPipe: res = self.deterministic_fn(*args, **kwargs) # type: ignore if not isinstance(res, bool): raise TypeError( "deterministic_fn of `non_deterministic` decorator is required " "to return a boolean value, but {} is found".format(type(res)) ) global _determinism if _determinism and res: raise TypeError( "{} is non-deterministic with the inputs, but you set " "'guaranteed_datapipes_determinism'. You can turn off determinism " "for this DataPipe if that is acceptable for your application".format( self.cls.__name__ ) ) # type: ignore return self.cls(*args, **kwargs) # type: ignore
[ 4665, 291, 667 ]
def METHOD_NAME(self, image: ndarray, camera: str) -> str: r = self._get("image/signed_urls") presigned_urls = r.json() if not r.ok: raise Exception("Unable to get signed urls") # resize and submit original files = {"file": get_jpg_bytes(image, 1920, 85)} data = presigned_urls["original"]["fields"] data["content-type"] = "image/jpeg" r = requests.post(presigned_urls["original"]["url"], files=files, data=data) if not r.ok: logger.error(f"Failed to upload original: {r.status_code} {r.text}") raise Exception(r.text) # resize and submit annotate files = {"file": get_jpg_bytes(image, 640, 70)} data = presigned_urls["annotate"]["fields"] data["content-type"] = "image/jpeg" r = requests.post(presigned_urls["annotate"]["url"], files=files, data=data) if not r.ok: logger.error(f"Failed to upload annotate: {r.status_code} {r.text}") raise Exception(r.text) # resize and submit thumbnail files = {"file": get_jpg_bytes(image, 200, 70)} data = presigned_urls["thumbnail"]["fields"] data["content-type"] = "image/jpeg" r = requests.post(presigned_urls["thumbnail"]["url"], files=files, data=data) if not r.ok: logger.error(f"Failed to upload thumbnail: {r.status_code} {r.text}") raise Exception(r.text) # create image r = self._post( "image/create", {"id": presigned_urls["imageId"], "camera": camera} ) if not r.ok: raise Exception(r.text) # return image id return str(presigned_urls.get("imageId"))
[ 172, 660 ]
def METHOD_NAME(stream): import cStringIO return cStringIO.StringIO( stream.read() )
[ 1645, 948 ]
def METHOD_NAME(self, ProtocolList): self.ProtocolList = ProtocolList
[ 0, 234, 245 ]
def METHOD_NAME(self, position: Union[pygame.math.Vector2, Tuple[int, int], Tuple[float, float]]): """ Method to directly set the relative rect position of an element. :param position: The new position to set. """
[ 0, 1821, 195 ]
def METHOD_NAME(self, session): pass
[ 69, 2333 ]
f METHOD_NAME(self):
[ 1911, 5490, 947, 695, 2779 ]
def METHOD_NAME(msg: MsgInSignTx) -> None: data_length = msg.data_length # local_cache_attribute if data_length > 0: if not msg.data_initial_chunk: raise DataError("Data length provided, but no initial chunk") # Our encoding only supports transactions up to 2^24 bytes. To # prevent exceeding the limit we use a stricter limit on data length. if data_length > 16_000_000: raise DataError("Data length exceeds limit") if len(msg.data_initial_chunk) > data_length: raise DataError("Invalid size of initial chunk") if len(msg.to) not in (0, 40, 42): raise DataError("Invalid recipient address") if not msg.to and data_length == 0: # sending transaction to address 0 (contract creation) without a data field raise DataError("Contract creation without data") if msg.chain_id == 0: raise DataError("Chain ID out of bounds")
[ 250, 67, 342 ]
def METHOD_NAME(lr, x): """Classify an observation into a class.""" probs = calculate(lr, x) if probs[0] > probs[1]: return 0 return 1
[ 6144 ]
def METHOD_NAME(): example( resource_name='PXI1Slot2/0', options={'simulate': True, 'driver_setup': {'Model': '4190', 'BoardType': 'PXIe', }, }, lcr_frequency=10.0e3, lcr_impedance_range=100.0, cable_length=nidcpower.CableLength.NI_STANDARD_2M, lcr_voltage_rms=700.0e-3, lcr_dc_bias_source=nidcpower.LCRDCBiasSource.OFF, lcr_dc_bias_voltage_level=0.0, lcr_measurement_time=nidcpower.LCRMeasurementTime.MEDIUM, lcr_custom_measurement_time=10.0e-3, lcr_source_delay_mode=nidcpower.LCRSourceDelayMode.AUTOMATIC, source_delay=16.66e-3, )
[ 9, 1441 ]
async def METHOD_NAME(self, req): # based on https://github.com/oetiker/aio-reverse-proxy/blob/master/paraview-proxy.py ws_server = aiohttp.web.WebSocketResponse() await ws_server.prepare(req) async with self.session.ws_connect( self.next_url + "/ws", headers=req.headers ) as ws_client: async def ws_forward(ws_from, ws_to): async for msg in ws_from: if ws_to.closed: await ws_to.close(code=ws_to.close_code, message=msg.extra) return if msg.type == aiohttp.WSMsgType.TEXT: await ws_to.send_str(msg.data) elif msg.type == aiohttp.WSMsgType.BINARY: await ws_to.send_bytes(msg.data) elif msg.type == aiohttp.WSMsgType.PING: await ws_to.ping() elif msg.type == aiohttp.WSMsgType.PONG: await ws_to.pong() else: raise ValueError(f'unexpected message type: {msg}') # keep forwarding websocket data in both directions await asyncio.wait( [ ws_forward(ws_server, ws_client), ws_forward(ws_client, ws_server) ], return_when=asyncio.FIRST_COMPLETED) return ws_server
[ 368, 1519 ]
def METHOD_NAME(self): auth = self.server.auth() session = self.client.session session[auth._token_key] = 'token' session[auth._state_key] = 'state' session[auth._user_key] = 'user' session.save() url = reverse('ci:bitbucket:sign_out', args=[self.server.name]) response = self.client.get(url) self.assertEqual(response.status_code, 302) # redirect # make sure the session variables are gone self.assertNotIn(auth._token_key, self.client.session) self.assertNotIn(auth._state_key, self.client.session) self.assertNotIn(auth._user_key, self.client.session) data = {'source_url': reverse('ci:main')} response = self.client.get(url, data) self.assertEqual(response.status_code, 302) # redirect
[ 9, 2452, 1737 ]
def METHOD_NAME(pricing_client): response = pricing_client.describe_services(FormatVersion='aws_v1') service_codes = map(lambda service: service['ServiceCode'], response['Services']) return service_codes
[ 19, 874, 549, 1114 ]
def METHOD_NAME(self, bitstring: str) -> bool: """Evaluate the expression on a bitstring. This evaluation is done classically. Args: bitstring: The bitstring for which to evaluate. Returns: bool: result of the evaluation. """ from tweedledum import BitVec # pylint: disable=import-error bits = [] for bit in bitstring: bits.append(BitVec(1, bit)) return bool(self._tweedledum_bool_expression.METHOD_NAME(*bits))
[ 1792 ]
def METHOD_NAME(project): """Test defining indirect inputs.""" path_1 = "/some/absolute/path" path_2 = "relative/path" path_3 = "a/path with white-spaces" input_1 = Input("input-1", path_1) with Project() as project: input_2 = Input("input-2", path_2) input_3 = Input("input-3", path_3) assert Path(path_1) == input_1.path assert Path(path_2) == input_2.path assert Path(path_3) == input_3.path content = get_indirect_inputs_path(project.path).read_text() assert {path_1, path_2, path_3} == set(yaml.safe_load(content).values()) assert {input_1.name, input_2.name, input_3.name} == set(yaml.safe_load(content).keys())
[ 9, 2500, 1461 ]
def METHOD_NAME(reviewers): messages = [] for role, reviewers_ in group_reviewers(reviewers).items(): message = ', '.join(str(reviewer) for reviewer in reviewers_) if role: message += _(' as {role}').format(role=str(role)) message += '.' messages.append(message) return messages
[ 7471, 277 ]
def METHOD_NAME(self): if self._cmake: return self._cmake self._cmake = CMake(self) self._cmake.definitions["BUILD_PLUGIN"] = False self._cmake.definitions["BUILD_SHARED"] = self.options.shared self._cmake.definitions["BUILD_STATIC"] = not self.options.shared self._cmake.definitions["BUILD_TESTING"] = False self._cmake.configure(build_folder=self._build_subfolder) return self._cmake
[ 111, 334 ]
def METHOD_NAME(args): options = parser.parse_args(args) with codecs.open(options.spec, "r", encoding="utf-8") as spec_file: spec_yaml = yaml.safe_load(spec_file) if not isinstance(spec_yaml, list): raise ValueError("expected a list of micro-kernels in the spec") tests = """\
[ 57 ]
def METHOD_NAME(self, ev): ev.stopPropagation() self.score.tabs[-2].add_close_button() self.score.tabs[-2].select() del self.score.tabs[-1] del self.score.bars[-1] self.remove()
[ 1462 ]
def METHOD_NAME(operational_insights_resource_provider: Optional[str] = None, resource_group_name: Optional[str] = None, settings_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEyesOnResult: """ Gets a setting. :param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str settings_name: The setting name. Supports - Anomalies, EyesOn, EntityAnalytics, Ueba :param str workspace_name: The name of the workspace. """ __args__ = dict() __args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider __args__['resourceGroupName'] = resource_group_name __args__['settingsName'] = settings_name __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20210301preview:getEyesOn', __args__, opts=opts, typ=GetEyesOnResult).value return AwaitableGetEyesOnResult( etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), is_enabled=pulumi.get(__ret__, 'is_enabled'), kind=pulumi.get(__ret__, 'kind'), name=pulumi.get(__ret__, 'name'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type'))
[ 19, 11976, 69 ]
def METHOD_NAME(): plugin = catkin_tools.CatkinToolsPlugin(part_name="my-part", options=lambda: None) assert plugin.get_build_packages() == { "python3-rosdep", "python3-catkin-tools", }
[ 9, 19, 56, 2975 ]
def METHOD_NAME(self) -> list: """ Returns the list of underlay links for this device """ underlay_links = [] underlay_links.extend(self._uplinks) if self.shared_utils.fabric_sflow_uplinks is not None: for uplink in underlay_links: uplink.update({"sflow": {"enable": self.shared_utils.fabric_sflow_uplinks}}) for peer in self._avd_peers: peer_facts = self.shared_utils.get_peer_facts(peer, required=True) for uplink in peer_facts["uplinks"]: if uplink["peer"] == self.shared_utils.hostname: link = { "interface": uplink["peer_interface"], "peer": peer, "peer_interface": uplink["interface"], "peer_type": get(peer_facts, "type"), "peer_is_deployed": peer_facts["is_deployed"], "peer_bgp_as": get(peer_facts, "bgp_as"), "type": get(uplink, "type", required=True), "speed": get(uplink, "speed"), "ip_address": get(uplink, "peer_ip_address"), "peer_ip_address": get(uplink, "ip_address"), "channel_group_id": get(uplink, "peer_channel_group_id"), "peer_channel_group_id": get(uplink, "channel_group_id"), "channel_description": get(uplink, "peer_channel_description"), "vlans": get(uplink, "vlans"), "native_vlan": get(uplink, "native_vlan"), "trunk_groups": get(uplink, "peer_trunk_groups"), "bfd": get(uplink, "bfd"), "ptp": get(uplink, "ptp"), "mac_security": get(uplink, "mac_security"), "short_esi": get(uplink, "peer_short_esi"), "underlay_multicast": get(uplink, "underlay_multicast"), "ipv6_enable": get(uplink, "ipv6_enable"), "sflow": {"enable": self.shared_utils.fabric_sflow_downlinks}, "structured_config": get(uplink, "structured_config"), } underlay_links.append(strip_empties_from_dict(link)) return natural_sort(underlay_links, "interface")
[ 5606, 1127 ]
def METHOD_NAME(env, source, expect): t = env.from_string(source) result = t.render(value=expect) assert isinstance(result, bool) assert result is expect
[ 9, 5294 ]
def METHOD_NAME(self): mf = dft.UKS(h4) mf.grids.level = 4 mf.xc = 'lda,vwn' mf.conv_tol = 1e-14 mf.kernel() hobj = mf.Hessian() e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ) e2 += hobj.hess_nuc(h4) e2ref = finite_partial_diff(mf) self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 6)
[ 9, 4516, 2443, 5018, 2351, 5019, 5020 ]
def METHOD_NAME(self): pair_table = [] spk_paths = set() with open(self.meta_data, "r") as f: usage_list = f.readlines() for pair in usage_list: list_pair = pair.split() pair_1= os.path.join(self.root, list_pair[1]) pair_2= os.path.join(self.root, list_pair[2]) spk_paths.add(pair_1) spk_paths.add(pair_2) one_pair = [list_pair[0],pair_1,pair_2 ] pair_table.append(one_pair) return { "spk_paths": list(spk_paths), "total_spk_num": None, "pair_table": pair_table }
[ 3613 ]
def METHOD_NAME(self, buff): inits = self.params # Make a controller object code = ( "%(name)s = hardware.eyetracker.EyetrackerControl(\n" ) buff.writeIndentedLines(code % inits) buff.setIndentLevel(1, relative=True) code = ( "tracker=eyetracker,\n" "actionType=%(actionType)s\n" ) buff.writeIndentedLines(code % inits) buff.setIndentLevel(-1, relative=True) code = ( ")" ) buff.writeIndentedLines(code % inits)
[ 77, 176, 544 ]
def METHOD_NAME(path): files = os.listdir(path) for file in files: if path != '.': file=f'{path}/{file}' if os.path.isdir(file): METHOD_NAME(file) elif os.path.isfile(file): process_file(file)
[ 19, 1537 ]
def METHOD_NAME(tmp_path): from muse.examples import copy_model from muse.readers.csv import read_initial_assets copy_model("default", tmp_path / "default") copy_model("trade", tmp_path / "trade") def path(x, y): return ( tmp_path / x / "model" / "technodata" / "gas" / f"Existing{y.title()}.csv" ) assets = read_initial_assets(path("default", "capacity")) assert set(assets.dims) == {"year", "region", "asset"} assets = read_initial_assets(path("trade", "trade")) assert set(assets.dims) == {"year", "region", "asset", "dst_region"}
[ 9, 2471, 3407 ]
def METHOD_NAME(self, device): """Return multiple vars.""" n_wires = 2 dev = device(n_wires) obs1 = qml.Projector([0], wires=0) obs2 = qml.PauliZ(wires=1) func = qubit_ansatz def circuit(x): func(x) return qml.var(obs1), qml.var(obs2) qnode = qml.QNode(circuit, dev, diff_method=None) res = qnode(0.5) assert isinstance(res, tuple) assert len(res) == 2 assert isinstance(res[0], np.ndarray) assert res[0].shape == () assert isinstance(res[1], np.ndarray) assert res[1].shape == ()
[ 9, 107, 486 ]
def METHOD_NAME(): bad_file = os.path.join('./', 'CLISShio.txtX') bad_yeardata_path = os.path.join('./', 'Data', 'yeardata') with pytest.raises(IOError): CyShioTime(bad_file) with pytest.raises(IOError): shio = CyShioTime(shio_file) shio.set_shio_yeardata_path(bad_yeardata_path)
[ 9, 504 ]
def METHOD_NAME(instance, func_name, data): try: func = getattr(instance, func_name) except AttributeError as exc: raise InvalidPathError(f'Function {func_name} not found') from exc if isinstance(data, dict): return func(**data) if data is not None: return func(data) return func()
[ 128, 89, 717 ]
def METHOD_NAME(): """Test that named numpy methods are the same as the numpy function.""" x = np.random.randn(100) res_a = algo.bootstrap(x, func="mean", seed=0) res_b = algo.bootstrap(x, func=np.mean, seed=0) assert np.array_equal(res_a, res_b) res_a = algo.bootstrap(x, func="std", seed=0) res_b = algo.bootstrap(x, func=np.std, seed=0) assert np.array_equal(res_a, res_b) with pytest.raises(AttributeError): algo.bootstrap(x, func="not_a_method_name")
[ 9, 904, 144, 717 ]
def METHOD_NAME(self): pricing_types = ( PRICING_EPHEMERAL, PRICING_HARD, PRICING_SPECIAL, PRICING_VM, ) # Count nodes by type count_by_type = {t: 0 for t in pricing_types} for node in self.nodes: count_by_type[node.pricing_type] += 1 # Yield metrics for pricing_type, count in count_by_type.items(): yield { "name": self.metric, "group": self.group, "set": count, "labels": { "type": pricing_type.lower(), }, }
[ 567 ]
def METHOD_NAME(self, tmpdir, data_node, repo): repository = repo() repository.base_path = tmpdir # Create 5 entities with version 1.0 and 5 entities with version 2.0 for i in range(10): data_node.id = DataNodeId(f"data_node-{i}") data_node._version = f"{(i+1) // 5}.0" repository._save(data_node) objs = repository._load_all() assert len(objs) == 10 repository._delete_by("version", "1.0") assert len(repository._load_all()) == 5
[ 9, 34, 604 ]
def METHOD_NAME(data): try: response = response = urlquick.post(URL + 'documents', data=data) if 'key' in response.json(): result = URL + response.json()['key'] return True, result if 'message' in response.json(): return False, "Unable to upload log file: " + response.json()['message'] Script.log('error: %s' % response.text) return False, "Unable to upload log file" except Exception: return False, "Unable to retrieve the paste url"
[ 72, 390 ]
def METHOD_NAME(self, name: str) -> None: events = self._module_basename('qapi-events', name) types = self._module_basename('qapi-types', name) visit = self._module_basename('qapi-visit', name) self._genc.add(mcgen('''
[ 3287, 21, 298 ]
def METHOD_NAME(value, pattern): if next(iter(re.finditer(pattern, value)), None): return '"{}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', value)) return value
[ 3565, 217, 1992 ]
def METHOD_NAME() -> Architecture: machine = platform.machine().lower() if machine == "x86_64" or machine == "amd64": return Architecture.X86_64 elif machine == "aarch64" or machine == "arm64": return Architecture.ARM64 else: return Architecture.Other
[ 1056, 4290 ]
def METHOD_NAME(meta, fname): """ Write metadata files in ISCE format (.vrt and .xml files) """ import isce import isceobj # create isce object for xml file img = isceobj.createDemImage() img.setFilename(os.path.abspath(fname)) img.setWidth(meta.width) img.setLength(meta.length) img.setAccessMode('READ') img.bands = 1 img.dataType = 'SHORT' img.scheme = 'BIP' img.reference = 'WGS84' img.firstLatitude = meta.north + meta.lat_step / 2. img.firstLongitude = meta.west + meta.lon_step / 2. img.deltaLatitude = meta.lat_step img.deltaLongitude = meta.lon_step # write to xml file xml_file = fname + '.xml' img.dump(xml_file) return xml_file
[ 77, -1, 773 ]
async def METHOD_NAME(crl_file_name: Path) -> x509.CertificateRevocationList: """Load a single crl from file Args: crl_file_name (Path): file to load Returns: x509.CertificateRevocationList: Return loaded CRL """ content = await get_content(crl_file_name) if crl_file_name.suffix.lower() == '.der': return x509.load_der_x509_crl(content) return x509.load_pem_x509_crl(content)
[ 557, 8709 ]
def METHOD_NAME(data, indices, indptr, weight, bias=None): # pylint: disable=invalid-name """The default implementation of csrmm in topi. Parameters ---------- data : tvm.te.Tensor 1-D with shape [nonzeros] indices : tvm.te.Tensor 1-D with shape [nonzeros] indptr : tvm.te.Tensor 1-D with shape [m+1] weight : tvm.te.Tensor 2-D with shape [k, n] bias : tvm.te.Tensor, optional 1-D with shape [m] Returns ------- output : tvm.te.Tensor 2-D with shape [m, n] """ assert ( len(data.shape) == 1 and len(indices.shape) == 1 and len(indptr.shape) == 1 and len(weight.shape) == 2 ), "only support 2-dim csrmm" assert isinstance( weight, te.tensor.Tensor ), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`" assert ( data.dtype == weight.dtype ), f"Data and weight must have the same dtype, but they have {data.dtype} and {weight.dtype}" if bias is not None: assert len(bias.shape) == 1 M = simplify(indptr.shape[0] - 1) _, N = weight.shape def csrmm_default_ir(data, indices, indptr, weight, out): """define ir for csrmm""" irb = tvm.tir.ir_builder.create() data_ptr = irb.buffer_ptr(data) indices_ptr = irb.buffer_ptr(indices) indptr_ptr = irb.buffer_ptr(indptr) weight_ptr = irb.buffer_ptr(weight) out_ptr = irb.buffer_ptr(out) M = simplify(indptr.shape[0] - 1) _, N = weight.shape with irb.for_range(0, N, kind="vectorize", name="n") as n: with irb.for_range(0, M, kind="parallel", name="row") as row: dot = irb.allocate(data.dtype, (1,), name="dot", scope="local") out_ptr[row * N + n] = cast(0, data.dtype) dot[0] = cast(0, data.dtype) row_start = indptr_ptr[row] row_end = indptr_ptr[row + 1] row_elems = row_end - row_start with irb.for_range(0, row_elems, name="idx") as idx: elem = row_start + idx dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem] * N + n] out_ptr[row * N + n] += dot[0] return irb.get() oshape = (M, N) matmul = te.extern( oshape, [data, indices, indptr, weight], lambda ins, outs: csrmm_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), tag="csrmm", dtype=data.dtype, name="out", ) if bias is not None: matmul = te.compute(oshape, lambda i, j: matmul[i, j] + bias[i], tag=tag.BROADCAST) return matmul
[ 15479, 235 ]
def METHOD_NAME(commcare_user): data = {"user_id": commcare_user.get_id, "time": int(time.time())} return b64_aes_encrypt(json.dumps(data))
[ 2196, 598, 3056, 100 ]
def METHOD_NAME(self): pass
[ 618 ]
def METHOD_NAME(self): stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(self.check + ' --test=stdout/EXAMPLE03,,0')) self.assertRegex(stdout, r'Everything is ok.') self.assertEqual(stderr, '') self.assertEqual(retc, STATE_OK)
[ 9, 217, 250, 420, 4248, -1 ]
def METHOD_NAME(self, files): """ Return the number of files failed """ self.acquired.commit() self.available.commit() self.failed.commit() self.completed.commit() for i in files: # Check each set, instead of elif, just in case something has # got out of synch if i in self.available.files: self.available.files.remove(i) if i in self.completed.files: self.completed.files.remove(i) if i in self.acquired.files: self.acquired.files.remove(i) self.failed.addFile(i)
[ 180, 1537 ]
def METHOD_NAME(client, phase_factory, idea_factory): phase, module, project, idea = setup_phase( phase_factory, idea_factory, phases.RatingPhase ) url = reverse( "meinberlin_ideas:idea-delete", kwargs={"pk": idea.pk, "year": idea.created.year}, ) with freeze_phase(phase): count = models.Idea.objects.all().count() assert count == 1 moderator = idea.module.project.moderators.first() client.login(username=moderator.email, password="password") response = client.get(url) assert_template_response(response, "meinberlin_ideas/idea_confirm_delete.html") response = client.post(url) assert redirect_target(response) == "project-detail" assert response.status_code == 302 count = models.Idea.objects.all().count() assert count == 0
[ 9, 4070, 1046, 34, 623, 909, 3200 ]
def METHOD_NAME(actions, state): del state return actions
[ 784, 734, -1 ]