text
stringlengths
15
7.82k
ids
sequencelengths
1
7
def METHOD_NAME(self, val): self.value_mapper = val
[ 0, 36, 3782 ]
def METHOD_NAME(): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke( from_example_command, [ "--name", "my_dagster_project", "--example", "assets_dbt_python", "--version", "1.3.11", ], ) assert result.exit_code == 0 assert os.path.exists("my_dagster_project") assert os.path.exists("my_dagster_project/assets_dbt_python") assert os.path.exists("my_dagster_project/assets_dbt_python_tests") # ensure we filter out tox.ini because it's used in our own CI assert not os.path.exists("my_dagster_project/tox.ini")
[ 9, 280, 1441, 462, 8969, 7909 ]
def METHOD_NAME(self, runnables): for runnable in runnables: self._check_can_run(runnable)
[ 250, 1046, 22, 75 ]
def METHOD_NAME(self, data_model): """ Override to implement pre execute task logic Args: data_model: sub class of mxcubecore.model.procedure_model Data is validated by the data_model object Returns: """ pass
[ 709, 750 ]
def METHOD_NAME(file, filters): _envs = envs.load_yaml(file) all_flattens = {} def fatten_env_namespace(namespace_nests, local_envs): for k, v in local_envs.items(): if isinstance(v, dict): nests = copy.deepcopy(namespace_nests) nests.append(k) fatten_env_namespace(nests, v) elif (k == "dataset" or k == "phase" or k == "runner") and isinstance(v, list): for i in v: if i.get("name") is None: raise ValueError("name must be in dataset list. ", v) nests = copy.deepcopy(namespace_nests) nests.append(k) nests.append(i["name"]) fatten_env_namespace(nests, i) else: global_k = ".".join(namespace_nests + [k]) all_flattens[global_k] = v fatten_env_namespace([], _envs) ret = {} for k, v in all_flattens.items(): for f in filters: if k.startswith(f): ret[k] = v return ret
[ 19, 75, -1, 280, 406 ]
def METHOD_NAME(self): pass
[ 709, 710 ]
def METHOD_NAME( mock_workspace_scope: OperationScope, mock_operation_config: OperationConfig, mock_aml_services_2022_02_01_preview: Mock, mock_machinelearning_client: Mock, ) -> ComponentOperations: yield ComponentOperations( operation_scope=mock_workspace_scope, operation_config=mock_operation_config, service_client=mock_aml_services_2022_02_01_preview, all_operations=mock_machinelearning_client._operation_container, )
[ 248, 1007, 2206 ]
def METHOD_NAME(self): """Runs the data collection.""" resources = self._retrieve() all_violations = self._find_violations(resources) self._output_results(all_violations)
[ 22 ]
def METHOD_NAME(hoft, rangekwargs, outunit): spec = astro.range_spectrogram( hoft, 0.5, fftlength=0.25, overlap=0.125, method="median", nproc=2, **rangekwargs, ) assert isinstance(spec, Spectrogram) assert spec.shape[0] == 2 assert spec.unit == outunit assert spec.f0 == spec.df assert spec.dt == 0.5 * units.second assert spec.df == 4 * units.Hertz
[ 9, 661, 4291 ]
def METHOD_NAME(scope, text): return text
[ 1010, 791, 12386, 1178, 1952 ]
f METHOD_NAME(self):
[ 707 ]
def METHOD_NAME(self, text): text = text.replace('\\', '\\[u005C]'). \ replace('.', '\\[char46]'). \ replace('\'', '\\[u0027]'). \ replace('`', '\\[u0060]'). \ replace('~', '\\[u007E]') copy = text for char in copy: if len(char) != len(char.encode()): uni = char.encode('unicode_escape') \ .decode()[1:] \ .replace('x', 'u00') \ .upper() text = text.replace(char, '\\[u' + uni[1:] + ']') return text
[ 4748, 2107 ]
def METHOD_NAME(self): self.cpp_info.set_property("cmake_find_mode", "both") self.cpp_info.set_property("cmake_file_name", "Fontconfig") self.cpp_info.set_property("cmake_target_name", "Fontconfig::Fontconfig") self.cpp_info.set_property("pkg_config_name", "fontconfig") self.cpp_info.libs = ["fontconfig"] if self.settings.os in ("Linux", "FreeBSD"): self.cpp_info.system_libs.extend(["m", "pthread"]) fontconfig_file = os.path.join(self.package_folder, "bin", "etc", "fonts", "fonts.conf") self.runenv_info.prepend_path("FONTCONFIG_FILE", fontconfig_file) fontconfig_path = os.path.join(self.package_folder, "bin", "etc", "fonts") self.runenv_info.prepend_path("FONTCONFIG_PATH", fontconfig_path) # TODO: to remove in conan v2 self.cpp_info.names["cmake_find_package"] = "Fontconfig" self.cpp_info.names["cmake_find_package_multi"] = "Fontconfig" self.env_info.FONTCONFIG_FILE = fontconfig_file self.env_info.FONTCONFIG_PATH = fontconfig_path
[ 360, 100 ]
def METHOD_NAME(self): dups = self._findDuplicates() for addr, syms in dups.items(): for sym in syms: included = [other for other in syms if other in self._symbolsAbsolute and other.endswith("_"+sym)] for inc in included: self.log.debug("Deleting absolute symbol "+inc) del self._symbolsAbsolute[inc]
[ 950 ]
def METHOD_NAME(self): """Return self.user if set, otherwise return self.request.user""" if self.user is not None: return self.user return self.request.user
[ 19, 21 ]
def METHOD_NAME(apps, schema_editor): Sample = apps.get_model("seqr", "Sample") IgvSample = apps.get_model("seqr", "IgvSample") db_alias = schema_editor.connection.alias alignment_samples = IgvSample.objects.using(db_alias).all() if alignment_samples: print('Transferring {} alignment samples'.format(len(alignment_samples))) Sample.objects.bulk_create([ Sample( dataset_type='ALIGN', is_active=True, guid=sample.guid, dataset_file_path=sample.file_path, last_modified_date=sample.last_modified_date, individual=sample.individual, created_date=sample.created_date, created_by=sample.created_by, ) for sample in alignment_samples]) print('Deleting old sample models') alignment_samples.delete()
[ 132, -1, 379, 24, 5508, 700 ]
def METHOD_NAME(self, session): pass
[ 69, 1072 ]
def METHOD_NAME(self): return [Tags.CONTINUOUS]
[ 146, 114 ]
def METHOD_NAME(obj, phobostype=None): """Returns the name of an object relevant to Phobos. An optional *phobostype* parameter can be provided for objects with multiple uses, such as link objects (which also contain joint and motor information). If no *phobostype* is provided, the phobostype of the object is used. The object name itself is returned, stripped of namespaces. Args: obj(bpy.types.Object): object for which the name is requested phobostype: phobostype if relevant (e.g. 'motor') (Default value = None) Returns: : str -- The object's name """ if obj is None: return None nametype = phobostype if phobostype else obj.phobostype return stripNamespaceFromName(obj.name)
[ 19, 279, 156 ]
def METHOD_NAME(self): order = mock.Mock() order.info = {"hello": "world"} with self.assertRaises(ControlError): self.control._on_error(order, "test") order.violation.assert_called_with("Order has violated: None Error: test")
[ 9, 69, 168 ]
def METHOD_NAME(): with pytest.raises(ValueError): ContinuousSearchSpace(lower_bounds=jnp.array([]), upper_bounds=jnp.array([]))
[ 9, 2684, 1070, 173, 35, 634 ]
def METHOD_NAME(self): def imageformats_deps(): components = [] components.append("zlib::zlib") if self.options.with_jpeg == "libjpeg": components.append("libjpeg::libjpeg") elif self.options.with_jpeg == "libjpeg-turbo": components.append("libjpeg-turbo::jpeg") elif self.options.with_jpeg == "mozjpeg": components.append("mozjpeg::libjpeg") if self.options.with_jpeg2000: components.append("openjpeg::openjpeg") if self.options.with_png: components.append("libpng::libpng") if self.options.with_webp: components.append("libwebp::libwebp") if self.options.with_openexr or self.options.with_tiff: components.append("openexr::openexr") if self.options.with_raw: components.append("libraw::libraw") if self.options.with_jxr: components.append("jxrlib::jxrlib") if self.options.with_tiff: components.append("libtiff::libtiff") return components self.cpp_info.components["FreeImage"].libs = ["freeimage"] self.cpp_info.components["FreeImage"].requires = imageformats_deps() self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"] self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"] if not self.options.shared: self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
[ 360, 100 ]
f METHOD_NAME(self):
[ 9, 654, 1032, 3212, 45, 1032, 168 ]
def METHOD_NAME(): start_date = arrow.get("2014-09-30") end_date = arrow.get("2014-10-01") datespan = { "start_date": start_date.format("YYYY-MM-DD"), "end_date": end_date.format("YYYY-MM-DD"), } ts = parse_as_when(datespan) assert isinstance(ts, DateSpan) assert ts.start == start_date assert ts.end == end_date assert ts.spanning assert ts.all_day assert not ts.is_time assert ts.is_date assert ts.delta == timedelta(days=1)
[ 9, 1646, 18192 ]
def METHOD_NAME(self, arg): self.state.list_artifacts()
[ 74, 5277 ]
def METHOD_NAME(self): """Make a database connection.""" raise NotImplementedError
[ 707 ]
def METHOD_NAME(self, container_id: str, container_type: ContainerType, path: List[str]) -> IntAttribute: raise NeptuneOfflineModeFetchException
[ 19, 962, 309 ]
def METHOD_NAME(self): """The index of current shape to be drawn. Returns ------- Int The index as length of shapes attribute - 1. """ return len(self.shapes) - 1
[ 1056, 555 ]
def METHOD_NAME(self): import matplotlib as _mpl _mpl.use('Agg') import numpy as np from brian2 import prefs from brian2.utils.filetools import ensure_directory_of_file prefs.codegen.target = self.codegen_target prefs.core.default_float_dtype = self.dtype # Move to the file's directory for the run, so that it can do relative # imports and load files (e.g. figure style files) curdir = os.getcwd() os.chdir(os.path.dirname(self.filename)) sys.path.append(os.path.dirname(self.filename)) try: runpy.run_path(self.filename, run_name='__main__') if self.codegen_target == 'cython' and self.dtype == np.float64: for fignum in _mpl.pyplot.get_fignums(): fname = os.path.relpath(self.filename, self.example_dir) fname = fname.replace('/', '.').replace('\\\\', '.') fname = fname.replace('.py', '.%d.png' % fignum) fname = os.path.abspath(os.path.join(self.example_dir, '../docs_sphinx/resources/examples_images/', fname)) ensure_directory_of_file(fname) _mpl.pyplot.figure(fignum).savefig(fname) finally: _mpl.pyplot.close('all') os.chdir(curdir) sys.path.remove(os.path.dirname(self.filename)) device.reinit() set_device('runtime')
[ 2596 ]
def METHOD_NAME(coookie_widget): coookie_widget.setup({ "opt_1": "test", "opt_2": "{{ cookiecutter.opt_1 }}", }) ows = coookie_widget._widgets assert ows["opt_2"][1].get_value() == ows["opt_1"][1].get_value()
[ 9, 16544, 706, 338 ]
def METHOD_NAME(self): return self._data_bounds
[ 19, 365, 634 ]
def METHOD_NAME(op: Operator): r"""Expectation value of the supplied observable. **Example:** .. code-block:: python3 dev = qml.device("default.qubit", wires=2) @qml.qnode(dev) def circuit(x): qml.RX(x, wires=0) qml.Hadamard(wires=1) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliY(0)) Executing this QNode: >>> circuit(0.5) -0.4794255386042029 Args: op (Observable): a quantum observable object Returns: ExpectationMP: measurement process instance """ if not op.is_hermitian: warnings.warn(f"{op.name} might not be hermitian.") return ExpectationMP(obs=op)
[ 9082 ]
def METHOD_NAME(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0: return _torch_reduce_all(fn)(args[0]) return _torch_reduce_dim(fn)(*args, **kwargs)
[ 332, 667 ]
def METHOD_NAME(simple_protein): """ Protein-like molecule with cystein bridges removed using :func:`tune_cystein_bridges.remove_cystein_bridge_edges`. """ graph = copy.deepcopy(simple_protein) tune_cystein_bridges.remove_cystein_bridge_edges(graph) return graph
[ 53, 8707, 15653 ]
def METHOD_NAME(next_link=None): request = prepare_request(next_link) pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response
[ 19, 243 ]
def METHOD_NAME(component_config: HttpCounterSetup): return HttpCounter(device_config.id, component_config, device_config.configuration.url)
[ 129, 2469, 1007 ]
METHOD_NAME( self ) :
[ 9, 4161, 130, 1272, 623, 198, 7580 ]
def METHOD_NAME(): """ >>> no_as() enter hello exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'> """ with ContextManager("value"): print("hello")
[ 654, 947 ]
def METHOD_NAME(Str): Str = Str.rstrip(" \n") Str = Str.replace("\t", " ") Str += "\n" return Str
[ 356, 341, 5859 ]
def METHOD_NAME(im, to_bgr=False): im = np.swapaxes(im, 1, 2) im = np.swapaxes(im, 1, 0) if to_bgr: im = im[[2, 1, 0], :, :] return im
[ 2755 ]
def METHOD_NAME(self): if self.config.Campaign_AmbushEvade: return self._handle_ambush_evade() else: return self._handle_ambush_attack()
[ 276, 11792 ]
def METHOD_NAME( cls, exception: Union[BaseException, Type[BaseException]] ) -> List[str]: """For an Exception, return a list of all its base classes that inherit from Exception. Parameters ---------- exception: The exception or exception class whose ancestors should be retrieved Returns ------- A list of all base classes (and their base classes, etc.) that inherit from Exception. They will be in no particular order. """ if isinstance(exception, BaseException): exception_type = type(exception) else: exception_type = exception ancestors = [] to_traverse = [exception_type] self_classpath = f"{exception_type.__module__}.{exception_type.__name__}" while len(to_traverse) > 0: class_ = to_traverse.pop() for base in class_.__bases__: if not issubclass(base, Exception): # only interested in exception classes continue classpath = f"{base.__module__}.{base.__name__}" if classpath not in ancestors and self_classpath != classpath: ancestors.append(classpath) to_traverse.append(base) return ancestors
[ 3930, 280, 442 ]
def METHOD_NAME(size, source): for i in range(0, len(source), size): yield source[i:i + size]
[ 831 ]
def METHOD_NAME(self, args): """ Set the bucket Length. Will trigger a recalculation of buckets. Can take a while. Usage : resetBucketLength <typeName> <DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName> should exist and inherit the base type """ try: argList = args.split() if argList: typeName = argList[0].strip() else: gLogger.error("No type name specified") return # Try to import the type result = self.objectLoader.loadObject(f"DIRAC.AccountingSystem.Client.Types.{typeName}") if not result["OK"]: return result typeClass = result["Value"] gLogger.info(f"Loaded type {typeClass.__name__}") typeDef = typeClass().getDefinition() acClient = DataStoreClient() retVal = acClient.setBucketsLength(typeDef[0], typeDef[3]) if retVal["OK"]: gLogger.info("Type registered successfully") else: gLogger.error(f"Error: {retVal['Message']}") except Exception: self.showTraceback()
[ 74, 656, 2538, 799 ]
def METHOD_NAME(self, params): dest = params["dest_project"] # Make sure destination path exists. os.makedirs(os.path.join("/media", str(dest)), exist_ok=True) # Retrieve media that will be cloned. response_data = [] original_medias = get_media_queryset(self.kwargs["project"], params) # If there are too many Media to create at once, raise an exception. if original_medias.count() > self.MAX_NUM_MEDIA: raise Exception( "Maximum number of media that can be cloned in one request is " f"{self.MAX_NUM_MEDIA}. Try paginating request with start, stop, " "or after parameters." ) # If given media type is not part of destination project, raise an exception. if params["dest_type"] == -1: type_obj = MediaType.objects.filter(project=dest)[0] else: type_obj = MediaType.objects.get(pk=params["dest_type"]) if type_obj.project.pk != dest: raise Exception("Destination media type is not part of destination project!") # Look for destination section, if given. section = None if params.get("dest_section"): sections = Section.objects.filter(project=dest, name__iexact=params["dest_section"]) if sections.count() == 0: section = Section.objects.create( project=Project.objects.get(pk=dest), name=params["dest_section"], tator_user_sections=str(uuid1()), ) else: section = sections[0] objs = self._media_obj_generator(original_medias, dest, params["dest_type"], section) medias = bulk_create_from_generator(objs, Media) # Update resources. for media in medias: if media.media_files: for key in [ "streaming", "archival", "audio", "image", "thumbnail", "thumbnail_gif", "attachment", ]: for f in media.media_files.get(key, []): Resource.add_resource(f["path"], media) if key == "streaming": Resource.add_resource(f["segment_info"], media) # Return created IDs. ids = [media.id for media in medias] return {"message": f"Successfully cloned {len(ids)} medias!", "id": ids}
[ 72 ]
def METHOD_NAME(self): # For legacy compatibility pass
[ 3977 ]
METHOD_NAME(self):
[ 38, 1866, 1201 ]
def METHOD_NAME(self): copy(self, "COPYING.LIB", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) if self.settings.os == "Windows": cmake = CMake(self) cmake.install() else: autotools = Autotools(self) autotools.install() rmdir(self, os.path.join(self.package_folder, "lib", "locale")) if self.options.get_safe("shared"): rm(self, "*.a", os.path.join(self.package_folder, "lib")) rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) rmdir(self, os.path.join(self.package_folder, "share"))
[ 360 ]
def METHOD_NAME(self): """ Return the parameters of a :class:`.Distribution` object. To update the parameters of a :class:`.JointIndependent` or a :class:`.JointCopula` distribution, each parameter is assigned a unique string identifier as :code:`key_index` - where :code:`key` is the parameter name and :code:`index` the index of the marginal (e.g., location parameter of the 2nd marginal is identified as :code:`loc_1`). :return: Parameters of the distribution. """ return self.parameters
[ 19, 386 ]
def METHOD_NAME(self, num, lang, gender=None, category=None): suffixes = self.get_suffixes(num, lang, gender=gender, category=category) if not suffixes: return None return random.choice(suffixes)
[ 19, 4064 ]
def METHOD_NAME(self, resolution: Resolution) -> str: """ Returns the message to publish. """ external_url = get_server_setting(ServerSettingsVar.SEMATIC_DASHBOARD_URL) root_run = get_run(run_id=resolution.root_id) # TODO: in the future we might either have a url-aware server-side component build # this url for us (e.g. have resolutions.py build a path to the specific # resolution panel), or have a url which only contains the resolution root id # redirect to the full url path resolution_url = _RESOLUTION_URL_TEMPLATE.format( external_url=external_url, resolution_id=resolution.root_id, ) message = _MESSAGE_TEMPLATE.format( resolution_name=root_run.name or root_run.function_path, resolution_url=resolution_url, short_resolution_id=resolution.root_id[0:6], ) return message
[ 19, 277 ]
def METHOD_NAME(self): logger.debug( f"[cyan]Starting dashboard extraction from generic module {self.module_json_data.get('name')}[/cyan]" ) self.create_output_dirs() self.create_dashboards() self.create_module() return 0
[ 265, 763 ]
def METHOD_NAME(test_name, token, expected_header_value): """ Should match passed in token, no matter how many times token is retrieved. """ token_provider = InterpolatedStringTokenProvider(config=config, api_token=token, parameters=parameters) token_auth = BearerAuthenticator(token_provider, config, parameters=parameters) header1 = token_auth.get_auth_header() header2 = token_auth.get_auth_header() prepared_request = requests.PreparedRequest() prepared_request.headers = {} token_auth(prepared_request) assert {"Authorization": expected_header_value} == prepared_request.headers assert {"Authorization": expected_header_value} == header1 assert {"Authorization": expected_header_value} == header2
[ 9, 7070, 466, 11429 ]
def METHOD_NAME(self): """ L{MonkeyPatcher} is a context manager that applies its patches on entry and restore original values on exit. """ self.monkeyPatcher.addPatch(self.testObject, "foo", "patched value") with self.monkeyPatcher: self.assertEqual(self.testObject.foo, "patched value") self.assertEqual(self.testObject.foo, self.originalObject.foo)
[ 9, 198, 722 ]
def METHOD_NAME(mock_get_db): with patch('superagi.helper.auth.db') as mock_auth_db: response = client.get("/models_controller/verify_end_point?model_api_key=mock_key&end_point=mock_point&model_provider=mock_provider") assert response.status_code == 200
[ 9, 1162, 1798, 1669, 1434 ]
def METHOD_NAME(): # init away from the data, crash with a sensible warning ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]]) msg = "No point was within bandwidth=0.1" with pytest.raises(ValueError, match=msg): ms.fit( X, )
[ 9, -1, 75, 9969 ]
def METHOD_NAME(l_table: Table, r_table: Table, on: Union[str, Sequence[str]] = None, joins: Union[str, Sequence[str]] = None) -> Table: """The left_outer_join function creates a new table containing rows that have matching values in both tables. If there are multiple matches between a row from the left able and rows from the right table, all matching combinations will be included. Additionally, non-matching rows from the left tables will also be included in the new table. If no columns to match (on) are specified, then every combination of left and right table rows is included. Args: l_table (Table): the left table r_table (Table): the right table on (Union[str, Sequence[str]]): the column(s) to match, can be a common name or an equal expression, i.e. "col_a = col_b" for different column names joins (Union[str, Sequence[str]], optional): the column(s) to be added from right table to the result table, can be renaming expressions, i.e. "new_col = col"; default is None, default is None, meaning all the columns from the right table Returns: a new Table Raises: DHError """ try: on = to_sequence(on) joins = to_sequence(joins) with auto_locking_ctx(l_table, r_table): if joins: return Table(j_table=_JOuterJoinTools.leftOuterJoin(l_table.j_table, r_table.j_table, ",".join(on), ",".join(joins))) else: return Table(j_table=_JOuterJoinTools.leftOuterJoin(l_table.j_table, r_table.j_table, ",".join(on))) except Exception as e: raise DHError(e, message="failed to perform left-outer-join on tables.") from e
[ 879, 3945, 2831 ]
def METHOD_NAME(self): ns = Namespace( role=None, scope="fake-scope", system_assigned=None, user_assigned=None) with self.assertRaises(InvalidArgumentValueError) as context: validate_app_identity_assign_or_warning(ns) self.assertTrue("Parameter \"role\" and \"scope\" should be used together." in str(context.exception))
[ 9, 913, 61, 1018, 130, 1304, 14347 ]
def METHOD_NAME(data, width, height, depth, version): row_size = width * depth // 8 with io.BytesIO(data) as fp: rows = [rle_impl.encode(fp.read(row_size)) for _ in range(height)] bytes_counts = array.array(('H', 'I')[version - 1], map(len, rows)) encoded = b''.join(rows) with io.BytesIO() as fp: write_be_array(fp, bytes_counts) fp.write(encoded) result = fp.getvalue() return result
[ 421, 9285 ]
def METHOD_NAME(local_name): assert QName(None, None, local_name), 'qname bool'
[ 9, 863, 2019 ]
def METHOD_NAME(job): _handle_job_event(job)
[ 202, 5780 ]
def METHOD_NAME(self, username, password): user, created = User.objects.get_or_create(username=username) user.set_password(password) self._add_permissions_to_user(user, save=False) user.save() return user
[ 129, 21 ]
def METHOD_NAME(cls, cfg): """Validate arguments :param cfg: project configuration :type cfg: dict """ schema_yml = """ measures: list(include('measure-spec'), required=False) reporting_measures: list(include('measure-spec'), required=False) timeseries_csv_export: map(required=False) --- measure-spec: measure_dir_name: str(required=True) arguments: map(required=False) """ workflow_generator_args = cfg['workflow_generator']['args'] schema_yml = re.sub(r'^ {8}', '', schema_yml, flags=re.MULTILINE) schema = yamale.make_schema(content=schema_yml, parser='ruamel') data = yamale.make_data(content=json.dumps(workflow_generator_args), parser='ruamel') return yamale.METHOD_NAME(schema, data, strict=True)
[ 187 ]
def METHOD_NAME(self): return self.gw.ver
[ 281 ]
def METHOD_NAME(self, timeout: Optional[float] = None): # Called by HttpProtocol at the end of connection_task # If we've upgraded to websocket, we do our own closing if self.websocket is not None: # Note, we don't want to use websocket.close() # That is used for user's application code to send a # websocket close packet. This is different. self.websocket.end_connection(1001) else: super().METHOD_NAME()
[ 1462 ]
def METHOD_NAME(self, **options): if not self.check_memory(): raise Exception( "This instance has less than the recommended memory. Try running the import from a larger instance." ) url = options["url"] self.stdout.write("Downloading data from %s ..." % (url)) tmp = tempfile.NamedTemporaryFile() urllib.request.urlretrieve(url, tmp.name) tempdir = unzip(tmp.name) data_path = os.path.join(tempdir, "Data") try: self.stdout.write("Creating temp table..") self.create_temp_table() # import ONSPD into the temp table cmd = LocalImporter() cmd.table_name = self.temp_table_name cmd.path = data_path cmd.header = HEADERS["aug2022"] cmd.import_onspd() # grab the index CREATE statements from the old table before # we drop it. This will ensure we create the indexes on the # new table with the exact names django expects them to have # (e.g: uk_geo_utils_onspd_pcds_9d376544_uniq ) # so we can still run migrations and stuff on it indexes = self.get_index_statements() self.stdout.write("Building indexes..") for index in indexes: self.cursor.execute(index["temp_index_create_statement"]) # drop the old table, swap in the new one and rename the indexes # do this bit in a transaction so if it fails # we don't leave ourselves in an inconsistent state with transaction.atomic(): self.stdout.write("Swapping tables..") self.swap_tables() self.stdout.write("Renaming indexes..") for index in indexes: self.cursor.execute(index["index_rename_statement"]) finally: self.cursor.execute( "DROP TABLE IF EXISTS %s;" % (self.temp_table_name) ) self.stdout.write("Cleaning up temp files..") self.cleanup(tempdir) self.stdout.write("...done")
[ 276 ]
def METHOD_NAME(self, color, alpha=1.0): if color is None: self.brush = None else: self.brush = self.cr.get_brush(color, alpha=alpha)
[ 0, 1917 ]
def METHOD_NAME(): aead.register() mac.register() daead.register() prf.register() testing_servers.start('key_version')
[ 0, 1, 298 ]
def METHOD_NAME(): # Tests the exponential cutoff ks = np.geomspace(1E-2, 15., 128) t = ccl.nl_pt.PTNumberCountsTracer(b1=1.0) ptc1 = ccl.nl_pt.LagrangianPTCalculator(cosmo=COSMO) pk2d1 = ptc1.get_biased_pk2d(t, tracer2=t) pk1 = pk2d1(ks, 1.0, cosmo=COSMO) ptc2 = ccl.nl_pt.LagrangianPTCalculator(k_cutoff=10., n_exp_cutoff=2., cosmo=COSMO) pk2d2 = ptc2.get_biased_pk2d(t, tracer2=t) pk2 = pk2d2(ks, 1.0, cosmo=COSMO) expcut = np.exp(-(ks/10.)**2) assert np.allclose(pk1*expcut, pk2, atol=0, rtol=1E-3)
[ 9, 6454, 2627, 4833 ]
def METHOD_NAME(self, squeeze_dims, inplace, gc, dc): shape = [ 1 if dim in squeeze_dims else np.random.randint(1, 5) for dim in range(4) ] X = np.random.rand(*shape).astype(np.float32) op = core.CreateOperator( "Squeeze", "X", "X" if inplace else "Y", dims=squeeze_dims ) self.assertDeviceChecks(dc, op, [X], [0])
[ 9, 3822 ]
def METHOD_NAME(step): """ Step to create a contextElement if doesnt exist, and add the attributes data defined before add a context element with the before attrs and the followin entity properties | entity_id | entity_type | is_pattern(optional) | :param step: :return: """ check_world_attribute_is_not_none(['attributes_creation']) if not isinstance(world.context_elements, ContextElements): world.context_elements = ContextElements() for line in step.hashes: if "is_pattern" in line: world.context_elements.add_context_element(line['entity_id'], line['entity_type'], world.attributes_creation, line['is_pattern']) else: world.context_elements.add_context_element(line['entity_id'], line['entity_type'], world.attributes_creation)
[ 238, 385, 198, 669, 41, 983, 1553 ]
def METHOD_NAME( circuit: 'cirq.AbstractCircuit', pauli_string: 'cirq.PauliString' ) -> 'cirq.AbstractCircuit': """A circuit measuring the given observable at the end of the given circuit.""" assert pauli_string return circuit.from_moments( *circuit, pauli_string.to_z_basis_ops(), ops.measure(*sorted(pauli_string.keys()), key='out'), )
[ 1708, 222, 1299, 144, 6529 ]
def METHOD_NAME(q_large): tprs_full = dmtbx.triplet_generator( miller_set=q_large, discard_weights=True) tprs = dmtbx.triplet_generator( miller_set=q_large, amplitudes=q_large.data(), max_relations_per_reflection=0, discard_weights=True) assert tprs.n_relations().all_eq(tprs_full.n_relations()) for n in (1,10,100,1000): tprs = dmtbx.triplet_generator( miller_set=q_large, amplitudes=q_large.data(), max_relations_per_reflection=n, discard_weights=True) assert (tprs.n_relations() >= n).all_eq(tprs.n_relations() == n) n = 3 tprs = dmtbx.triplet_generator( miller_set=q_large, amplitudes=q_large.data(), max_relations_per_reflection=n, discard_weights=True) n_rel_full = tprs_full.n_relations() n_rel = tprs.n_relations() amp = q_large.data() for ih in range(q_large.indices().size()): if (n_rel[ih] == n_rel_full[ih]): continue aa_full = flex.double() for relation in tprs_full.relations_for(ih): aa_full.append(amp[relation.ik()] * amp[relation.ihmk()]) aa = flex.double() for relation in tprs.relations_for(ih): aa.append(amp[relation.ik()] * amp[relation.ihmk()]) aa_full = aa_full.select(flex.sort_permutation(data=aa_full, reverse=True)) assert approx_equal(aa_full[:n], aa)
[ 3446, 5419 ]
def METHOD_NAME(context, ipam, prefixlen): ifname = context.new_ifname ipaddr = getattr(context, ipam) ipaddr_fail = getattr(context, ipam) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=prefixlen) .commit() ) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) with pytest.raises(KeyError): (context.ndb.interfaces[ifname].del_ip(address=ipaddr_fail).commit()) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr)
[ 9, 1269, 1213, 590, 180 ]
def METHOD_NAME(self, speed=ETHER_SPEED_MBIT_1000): """ get pause time for given link speed in seconds :param speed: select link speed to get the pause time for, must be ETHER_SPEED_MBIT_[10,100,1000] # noqa: E501 :return: pause time in seconds :raises MACControlInvalidSpeedException: on invalid speed selector """ try: return self.pause_time * { ETHER_SPEED_MBIT_10: (0.0000001 * 512), ETHER_SPEED_MBIT_100: (0.00000001 * 512), ETHER_SPEED_MBIT_1000: (0.000000001 * 512 * 2) }[speed] except KeyError: raise MACControlInvalidSpeedException('Invalid speed selector given. ' # noqa: E501 'Must be one of ETHER_SPEED_MBIT_[10,100,1000]') # noqa: E501
[ 19, 2009, 104 ]
f METHOD_NAME(self):
[ 9, 1024, 1089, 2200, 1563 ]
def METHOD_NAME(self, person_titles: list[str], page: int = 1, per_page: int = 25, num_of_employees: list[int] = [], person_location: str = "", organization_domains: str = "") -> str: """ Execute the Apollo search tool. Args: person_titles : The titles of the people to search for. page : The page of results to retrieve. num_of_employees : The number of employees to filter by in format [start_range, end_range]. It is optional. person_location : Region country/state/city filter to search for. It is optional. organization_domains : The organization domains to search within. Returns: People data from the Apollo search. """ people_data = self.apollo_search_results(page, per_page, person_titles, num_of_employees, person_location, organization_domains) logger.info(people_data) people_list = [] if 'people' in people_data and len(people_data['people']) > 0: for person in people_data['people']: people_list.append({'first_name': person['first_name'], 'last_name': person['last_name'], 'name': person['name'], 'linkedin_url': person['linkedin_url'], 'email': person['email'], 'headline': person['headline'], 'title': person['title'], }) return people_list
[ 750 ]
def METHOD_NAME(self) -> int: ...
[ 15924 ]
def METHOD_NAME(): """Checking post.italicAngle value.""" check = CheckTester(opentype_profile, "com.google.fonts/check/italic_angle") ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf")) # italic-angle, style, fail_message test_cases = [ [1, "Italic", WARN, "positive"], [0, "Regular", PASS, None], # This must PASS as it is a non-italic [-21, "ThinItalic", WARN, "over-20-degrees"], [-30, "ThinItalic", WARN, "over-20-degrees"], [-31, "ThinItalic", WARN, "over-30-degrees"], [-91, "ThinItalic", FAIL, "over-90-degrees"], [0, "Italic", FAIL, "zero-italic"], [-1, "ExtraBold", FAIL, "non-zero-upright"], ] for value, style, expected_result, expected_msg in test_cases: ttFont["post"].italicAngle = value if expected_result != PASS: assert_results_contain( check(ttFont, {"style": style}), expected_result, expected_msg, f"with italic-angle:{value} style:{style}...", ) else: assert_PASS( check(ttFont, {"style": style}), f"with italic-angle:{value} style:{style}...", ) # Cairo, check left and right-leaning explicitly ttFont = TTFont(TEST_FILE("cairo/CairoPlay-Italic.rightslanted.ttf")) assert_PASS(check(ttFont, {"style": "Italic"})) ttFont["post"].italicAngle *= -1 assert_results_contain(check(ttFont, {"style": "Italic"}), WARN, "positive") ttFont = TTFont(TEST_FILE("cairo/CairoPlay-Italic.leftslanted.ttf")) assert_PASS(check(ttFont, {"style": "Italic"})) ttFont["post"].italicAngle *= -1 assert_results_contain(check(ttFont, {"style": "Italic"}), WARN, "negative") ttFont = TTFont(TEST_FILE("cairo/CairoPlay-Italic.rightslanted.ttf")) assert_PASS(check(ttFont, {"style": "Italic"})) ttFont["glyf"]["I"].endPtsOfContours = [] ttFont["glyf"]["I"].coordinates = [] ttFont["glyf"]["I"].flags = [] ttFont["glyf"]["I"].numberOfContours = 0 assert_results_contain(check(ttFont, {"style": "Italic"}), WARN, "empty-glyphs")
[ 9, 250, 15122, 3057 ]
def METHOD_NAME(self): res = self._master_client.ready_for_ps_relaunch() self.assertTrue(res.success)
[ 9, 1338, 43, 3617, 8364 ]
def METHOD_NAME(model: type[Model]) -> set[tuple[str, str]]: ...
[ 19, 252, 379, 2779 ]
async def METHOD_NAME(self, image_bytes): file_abspath = self.normalize_path(self.context.request.url) if not self.validate_path(file_abspath): logger.warning( "[RESULT_STORAGE] unable to write outside root path: %s", file_abspath, ) return temp_abspath = f"{file_abspath}.{str(uuid4()).replace('-', '')}" file_dir_abspath = dirname(file_abspath) logger.debug( "[RESULT_STORAGE] putting at %s (%s)", file_abspath, file_dir_abspath, ) self.ensure_dir(file_dir_abspath) with open(temp_abspath, "wb") as _file: _file.write(image_bytes) move(temp_abspath, file_abspath)
[ 1276 ]
async def METHOD_NAME(self, scrim: Scrim, slot: AssignedSlot): await scrim.refresh_slotlist_message() await ScrimsSlotManager.refresh_guild_message(scrim.guild_id, scrim.id) with suppress(AttributeError, discord.HTTPException): await scrim.slotlist_channel.send(f"{slot.team_name} ({slot.owner.mention}) -> Claimed Slot {slot.num}")
[ 18216, 2556 ]
def METHOD_NAME(file_name): """Delete a file if it exist. Cleanup after tests.""" if os.path.exists(file_name): os.remove(file_name)
[ 34, 171 ]
def METHOD_NAME(self): export_conandata_patches(self)
[ 294, 505 ]
def METHOD_NAME(): with pytest.raises(ValueError): slackapi_from_slack_workspace({}, SecretReader(), "foo")
[ 9, 729, 1976, 45 ]
def METHOD_NAME(self, test_env): t = test_env.from_string( '{% import "module" as m %}{{ m.test() }}', globals={"foo": 42} ) assert t.render() == "[42|23]" t = test_env.from_string('{% import "module" as m %}{{ m.test() }}') assert t.render() == "[|23]"
[ 9, 512, 41, 1779 ]
def METHOD_NAME(notify_db, notify_db_session, sample_invited_user): from_db = get_invited_user_by_id(sample_invited_user.id) assert from_db == sample_invited_user
[ 9, 19, 2487, 21, 604, 147 ]
def METHOD_NAME(client, data_type): single_class = {"class": "UuidTest", "properties": [{"dataType": [data_type], "name": "heat"}]} client.schema.create_class(single_class) created_class = client.schema.get("uuidTest") assert created_class["class"] == "UuidTest" client.schema.delete_class("UuidTest")
[ 9, 4977, 5431 ]
def METHOD_NAME() -> None: """Check whether directory development contain do not support syntax or not. * It should not ref document from other document in `docs` directory """ pattern = re.compile("(\\(\\.\\.[\\w./-]+\\.md\\))") dev_files_path = get_files_recurse(dev_en_dir) | get_files_recurse(dev_zh_dir) get_files_recurse(dev_en_dir) for path in dev_files_path: content = path.read_text() find = pattern.findall(content) assert ( not find ), f"File {str(path)} contain temporary not support syntax: {find}."
[ 828, 1207 ]
def METHOD_NAME(filename: unicode, mode: int = ..., device: int = ...) -> None: ...
[ -1 ]
f METHOD_NAME(self):
[ 9, 1341, 137, 3704, 3705, 2351, 3188 ]
def METHOD_NAME(): """Verify if we can pickle Table object""" table = Table() assert pickle.loads(pickle.dumps(table)) == table
[ 9, 217, 410, 279, 1046, 673, 12390 ]
def METHOD_NAME() -> Repo | None: """Git repository object. Returns: Repo: Repo object of content repo if exists else retun None. References: 1. GitPython - https://github.com/gitpython-developers/GitPython Notes: 1. Should be called when cwd inside content repository. """ try: if content_path := os.getenv("DEMISTO_SDK_CONTENT_PATH"): repo = Repo(content_path) logger.debug(f"Using content path: {content_path}") else: repo = Repo(Path.cwd(), search_parent_directories=True) except InvalidGitRepositoryError: logger.debug("Git repo was not found.") repo = None return repo
[ 1493 ]
def METHOD_NAME(obj, level=0): if obj is None: ret = 'null' elif isinstance(obj, str): ret = '"' + obj.replace('"', r'\"') + '"' elif isinstance(obj, list): elts = [METHOD_NAME(elt, level + 1) for elt in obj] ret = '[' + ', '.join(elts) + ']' elif isinstance(obj, dict): elts = ['"%s": %s' % (key.replace('"', r'\"'), METHOD_NAME(obj[key], level + 1)) for key in sorted(obj.keys())] ret = '{' + ', '.join(elts) + '}' else: assert False # not implemented if level == 1: ret = '\n' + ret return ret
[ 24, 763 ]
def METHOD_NAME(client, io_commands): for command in io_commands: client.exec_command(sudo=True, cmd=command, long_running=True)
[ 22, 249, 2458 ]
def METHOD_NAME(): parser = GenewaysActionParser(data_folder) actions = parser.actions assert len(actions) == 3 action0 = actions[0] action1 = actions[1] action2 = actions[2] assert action0.hiid == '1' assert action0.up == '2' assert action0.dn == '1' assert action0.actiontype == 'phosphorylate' assert action0.action_count == '1' assert action0.actionmention_count == '2' assert action0.plo == 'P' assert action0.max_score == '0.77' assert action0.max_prec == '0.88' assert len(action0.action_mentions) == 2 assert action1.hiid == '2' assert action1.up == '3' assert action1.dn == '4' assert action1.actiontype == 'bind' assert action1.action_count == '1' assert action1.actionmention_count == '1' assert action1.plo == 'P' assert action1.max_score == '0.12' assert action1.max_prec == '0.34' assert len(action1.action_mentions) == 1 assert action2.hiid == '3' assert action2.up == '5' assert action2.dn == '6' assert action2.actiontype == 'bind' assert action2.action_count == '1' assert action2.actionmention_count == '1' assert action2.plo == 'P' assert action2.max_score == '0.16' assert action2.max_prec == '0.17' assert len(action2.action_mentions) == 1
[ 9, 14351, 1006, 1319 ]
def METHOD_NAME(self): infile = self._create_infile('{"key":"💩"}') outfile = os_helper.TESTFN + '.out' self.addCleanup(os.remove, outfile) assert_python_ok('-m', 'json.tool', infile, outfile) with open(outfile, "rb") as f: lines = f.read().splitlines() # asserting an ascii encoded output file expected = [b'{', rb' "key": "\ud83d\udca9"', b"}"] self.assertEqual(lines, expected)
[ 9, 602, 4428, 235 ]
def METHOD_NAME(self, x: torch.tensor) -> torch.tensor: return (x - 1.0e04) / 3.0e4
[ 104 ]
def METHOD_NAME(dvc): props = {"x": "1"} stage = create_stage(PipelineStage, dvc, plots=["plot_file"], **kwargs) stage.outs[0].plot = props assert to_pipeline_file(stage)["something"][stage.PARAM_PLOTS] == [ {"plot_file": props} ]
[ 9, 1288, 3048 ]