text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(line):
"""Test sub header wrong."""
assert config._sub_header_re.match(line) is None | [
9,
1066,
572,
2647,
909
] |
def METHOD_NAME(self, event):
selected_rows = {i.row() for i in self.selectedIndexes()}
breakpoints = [self.breakpoint_mgr.breakpoints[r] for r in selected_rows]
menu = QMenu("", self)
if len(breakpoints):
if len(breakpoints) == 1:
menu.addAction("Edit breakpoint", lambda: self.edit_breakpoint(breakpoints[0]))
menu.addAction(
"Remove breakpoint" + ("s" if len(breakpoints) > 1 else ""),
lambda: self.remove_breakpoints(breakpoints),
)
menu.addSeparator()
menu.addAction("New breakpoint", self.new_breakpoint)
menu.exec_(event.globalPos()) | [
198,
2470,
417
] |
def METHOD_NAME(self, idempotency_key=None, **params):
return self._request(
"get",
"/v1/sources/{source}/source_transactions".format(
source=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
) | [
245,
1458,
1465
] |
def METHOD_NAME(self):
self.tube_settings[Tags.STRUCTURE_START_MM] = [0.5, 0, 0.5]
self.tube_settings[Tags.STRUCTURE_END_MM] = [0.5, 5, 0.5]
self.tube_settings[Tags.STRUCTURE_RADIUS_MM] = 0.4
ts = CircularTubularStructure(self.global_settings, self.tube_settings)
assert 0 < ts.geometrical_volume[0, 0, 0] < 1
assert 0 < ts.geometrical_volume[0, 4, 0] < 1 | [
9,
16237,
3811,
2351,
2276,
1516,
206
] |
def METHOD_NAME(fname, info, data_name="data"):
"""Load continuous (raw) data from a FieldTrip preprocessing structure.
This function expects to find single trial raw data (FT_DATATYPE_RAW) in
the structure data_name is pointing at.
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : path-like
Path and filename of the ``.mat`` file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
data_name : str
Name of heading dict/variable name under which the data was originally
saved in MATLAB.
Returns
-------
raw : instance of RawArray
A Raw Object containing the loaded data.
See :class:`mne.io.Raw` for documentation of attributes and methods.
See Also
--------
mne.io.Raw : Documentation of attributes and methods of RawArray.
"""
read_mat = _import_pymatreader_funcs("FieldTrip I/O")
fname = _check_fname(fname, overwrite="read", must_exist=True)
ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name])
# load data and set ft_struct to the heading dictionary
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data = np.array(ft_struct["trial"]) # create the main data array
if data.ndim > 2:
data = np.squeeze(data)
if data.ndim == 1:
data = data[np.newaxis, ...]
if data.ndim != 2:
raise RuntimeError(
"The data you are trying to load does not seem to " "be raw data"
)
raw = RawArray(data, info) # create an MNE RawArray
return raw | [
203,
772,
-1
] |
def METHOD_NAME(self, request):
"""
Extract relevant information from request to build a ClientValidationJWT
:param PreparedRequest request: request we will extract information from.
:return: ValidationPayload
"""
parsed = urlparse(request.url)
path = parsed.path
query_string = parsed.query or ""
return ValidationPayload(
method=request.method,
path=path,
query_string=query_string,
all_headers=request.headers,
signed_headers=ValidationClient.__SIGNED_HEADERS,
body=request.body or "",
) | [
56,
437,
288
] |
def METHOD_NAME(self, request: Dict) -> Dict:
return self._rm_client.create_simulation_application(**request) | [
579,
202,
377
] |
def METHOD_NAME():
df1 = GeoDataFrame({"col1": [1, 2], "geometry": s1}, crs=None)
df2 = GeoDataFrame({"col1": [1, 2], "geometry": s1}, crs={})
assert_geodataframe_equal(df1, df2) | [
9,
654,
8901
] |
def METHOD_NAME(match):
return '#' + match.group() | [
826,
4635
] |
def METHOD_NAME(self):
object_name = 'Crab'
mission = 'integral_rev3_scw'
heasarc = Heasarc()
with self.isdc_context:
table = heasarc.query_object(
object_name,
mission=mission,
radius='1 degree',
time="2020-09-01 .. 2020-12-01",
resultmax=10,
good_isgri=">1000",
cache=False
)
assert len(table) > 0 | [
9,
343,
335
] |
def METHOD_NAME(rconfig_mock, bridged):
rconfig_mock.networks = {
TESTNET1: create_network_config(
'nic', IFACE0, bridged=bridged, vlan=VLAN101
)
}
networks = {
TESTNET1: create_network_config(
'nic', IFACE0, bridged=bridged, vlan=VLAN102
)
}
state = nmstate.generate_state(networks=networks, bondings={})
vlan102_state = create_vlan_iface_state(IFACE0, VLAN102)
disable_iface_ip(vlan102_state)
base_nic_state = create_ethernet_iface_state(IFACE0)
disable_iface_ip(base_nic_state)
remove_vlan101_state = {
nmstate.Interface.NAME: f'{IFACE0}.{VLAN101}',
nmstate.Interface.STATE: nmstate.InterfaceState.ABSENT,
}
expected_state = {
nmstate.Interface.KEY: [
vlan102_state,
remove_vlan101_state,
base_nic_state,
]
}
if bridged:
iface_bridge_state = create_bridge_iface_state(
TESTNET1,
f'{IFACE0}.{VLAN102}',
options=generate_bridge_options(stp_enabled=False),
)
disable_iface_ip(iface_bridge_state)
expected_state[nmstate.Interface.KEY].append(iface_bridge_state)
sort_by_name(expected_state[nmstate.Interface.KEY])
assert expected_state == state | [
9,
2004,
1228,
4838,
147
] |
def METHOD_NAME(patch_inspect=True):
"""
Main entry point for patching the ``collections.abc`` and ``inspect``
standard library modules.
"""
PATCHED['collections.abc.Generator'] = _collections_abc.Generator = Generator
PATCHED['collections.abc.Coroutine'] = _collections_abc.Coroutine = Coroutine
PATCHED['collections.abc.Awaitable'] = _collections_abc.Awaitable = Awaitable
if patch_inspect:
import inspect
PATCHED['inspect.isawaitable'] = inspect.isawaitable = isawaitable | [
1575
] |
def METHOD_NAME(kafka_broker):
return kafka_broker.host + ':' + str(kafka_broker.port) | [
19,
707,
3
] |
def METHOD_NAME(dist_path, version, appimage_filename):
"""
Creates an AppImage file from the build artefacts created so far.
"""
copy_metadata_files(dist_path, version)
try:
os.remove(os.path.join(dist_path, appimage_filename)) # Ensure any old file is removed, if it exists.
except FileNotFoundError:
pass # If it didn't exist, that's even better.
generate_appimage(dist_path, appimage_filename)
sign_appimage(dist_path, appimage_filename) | [
56,
-1
] |
def METHOD_NAME(self):
if not self.success_url_name:
raise ImproperlyConfigured(
"Subclasses of wagtail.admin.views.generic.base.BaseOperationView must provide a "
"success_url_name attribute or a get_success_url method"
)
if self.next_url:
return self.next_url
return reverse(self.success_url_name, args=[quote(self.object.pk)]) | [
19,
1434,
274
] |
def METHOD_NAME(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values()) | [
6330,
2241
] |
def METHOD_NAME(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(network_set=NETWORK_SET)
) | [
9,
427,
130,
86,
1646,
365,
137
] |
def METHOD_NAME(state: State, blurpool_instance: BlurPool):
assert blurpool_instance.match(Event.INIT, state) == True | [
9,
10942,
668,
417
] |
def METHOD_NAME(self):
'''
The interface object for the
:doc:`Tenable Identity Exposure Lockout Policy APIs <lockout_policy>`.
'''
return LockoutPolicyAPI(self) | [
15059,
54
] |
def METHOD_NAME(val, validator_aok):
coerce_val = validator_aok.validate_coerce(val)
if isinstance(val, np.ndarray):
assert np.array_equal(coerce_val, val)
elif isinstance(val, list):
assert validator_aok.present(coerce_val) == tuple(val)
else:
assert coerce_val == val | [
9,
2290,
9463,
988,
227
] |
def METHOD_NAME(self):
spec = self.spec
# hsa-rocr-dev wants the directory containing the header files, but
# libelf adds an extra path (include/libelf) compared to elfutils
libelf_include = os.path.dirname(
find_headers("libelf", spec["elf"].prefix.include, recursive=True)[0]
)
args = [
self.define("LIBELF_INCLUDE_DIRS", libelf_include),
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
]
if self.spec.satisfies("@3.7.0:"):
args.append(self.define_from_variant("IMAGE_SUPPORT", "image"))
# device libs is bundled with llvm-amdgpu (default) or standalone
if self.spec.satisfies("^rocm-device-libs"):
bitcode_dir = spec["rocm-device-libs"].prefix.amdgcn.bitcode
else:
bitcode_dir = spec["llvm-amdgpu"].prefix.amdgcn.bitcode
args.append(self.define("BITCODE_DIR", bitcode_dir))
return args | [
334,
335
] |
def METHOD_NAME(self):
self.assertTrue(self.song.can_change_images) | [
9,
1046,
194,
3669
] |
def METHOD_NAME():
zone = np.asarray(["z1", "z2", "z2", "z1"])
assert_near(P.single.owner[zone], [100, 200, 200, 100]) | [
9,
69,
3802
] |
def METHOD_NAME(config, json_parsing=False):
admin_client = namespace["admin_client"]
cluster = namespace["cluster"]
projects = admin_client.list_project(name="System",
clusterId=cluster.id).data
assert len(projects) == 1
project = projects[0]
name = random_test_name("project-fluentd")
return admin_client.METHOD_NAME(name=name,
projectId=project.id,
fluentForwarderConfig=config,
enableJSONParsing=json_parsing,
outputFlushInterval=5
) | [
129,
155,
663
] |
def METHOD_NAME(self):
# If we run the normalization for the first time during the runtime, we have to gather the activity from DB
self.total_activity = self.total_activity or orm.sum(g.votes for g in db.ChannelMetadata)
channel_count = orm.count(db.ChannelMetadata.select(lambda g: g.status != LEGACY_ENTRY))
if not channel_count:
return
if self.total_activity > 0.0:
self.rescale(self.total_activity / channel_count)
self.bump_amount = 1.0 | [
1137
] |
f METHOD_NAME(self, flag): | [
9,
256,
40,
584
] |
def METHOD_NAME(db, client, username, password):
client.login(username=username, password=password)
url = reverse('upload')
response = client.post(url)
assert response.status_code == status_map['upload_post_empty'][username]
if not password:
assert response.url.startswith('/account/login/'), response.content | [
9,
172,
72,
35
] |
def METHOD_NAME(upgrader, pce_fcc_other_experiment):
value = upgrader.upgrade('functional_characterization_experiment', pce_fcc_other_experiment, current_version='5', target_version='6')
assert value['schema_version'] == '6'
assert value['plasmids_library_type'] == 'elements cloning'
assert value['notes'] == 'The plasmids_library_type of this pooled clone sequencing experiment needs to be checked as it was automatically upgraded by ENCD-5303.' | [
9,
4167,
2037,
2355,
738,
822,
24
] |
def METHOD_NAME(self):
data = {2: set([11]),
9: set([11, 8]),
10: set([11, 3]),
11: set([7, 5]),
8: set([7, 3, 8]), # includes something self-referential
}
orig = data.copy()
results = list(toposort(data))
assert data == orig | [
9,
362,
130,
680
] |
def METHOD_NAME(self):
rospy.logwarn("[PX4-SITL] Launching")
start_time = rospy.get_time()
args = ["./PX4-Autopilot/build/px4_sitl_default/bin/px4-commander",
"--instance", "0", "check"]
while rospy.get_time() - start_time < TIMEOUT:
process = spawn_process(args, insert_vglrun=False)
with process.stdout:
for line in iter(process.stdout.readline, ''):
if ("Prearm check: OK" in line):
return True
rospy.sleep(2)
return False | [
-1
] |
def METHOD_NAME(self, name: str, mode: int = 0o777) -> None: ... | [
6403
] |
def METHOD_NAME(self):
self.verbose = 1
self.tests_dir = "tests"
self.filter = None | [
15,
1881
] |
def METHOD_NAME(
environment_api_key: "EnvironmentAPIKey",
) -> None:
# Given
expected_client_api_key = environment_api_key.environment.api_key
expected_created_at = environment_api_key.created_at.isoformat()
expected_key = environment_api_key.key
# When
result = dynamodb.map_environment_api_key_to_environment_api_key_document(
environment_api_key,
)
# Then
assert result == {
"active": True,
"client_api_key": expected_client_api_key,
"created_at": expected_created_at,
"expires_at": None,
"id": Decimal(environment_api_key.pk),
"key": expected_key,
"name": "Test API Key",
} | [
9,
422,
1027,
58,
59,
24,
1027
] |
def METHOD_NAME(self):
"""get symbol points as line and test coords"""
symbol = self.map.symbolset.getSymbol(1)
assert symbol.name == 'circle'
line = symbol.getPoints()
assert line.numpoints == 1, line.numpoints
pt = self.getPointFromLine(line, 0)
self.assertPointsEqual(pt, mapscript.pointObj(1.0, 1.0)) | [
9,
19,
182
] |
def METHOD_NAME(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None, combine_size=None):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the tensor elements. The standard deviation is computed for the
flattened tensor by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened tensor.
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For tensors of
integer type the default is float64, for tensors of float types it is
the same as the array type.
out : Tensor, optional
Alternative output tensor in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input tensor.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`Tensor`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
combine_size: int, optional
The number of chunks to combine.
Returns
-------
standard_deviation : Tensor, see dtype parameter above.
If `out` is None, return a new tensor containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[1, 2], [3, 4]])
>>> mt.std(a).execute()
1.1180339887498949
>>> mt.std(a, axis=0).execute()
array([ 1., 1.])
>>> mt.std(a, axis=1).execute()
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = mt.zeros((2, 512*512), dtype=mt.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> mt.std(a).execute()
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> mt.std(a, dtype=mt.float64).execute()
0.44999999925494177
"""
ret = sqrt(
var(
a,
axis=axis,
dtype=dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
combine_size=combine_size,
)
)
if dtype is not None and ret.dtype != dtype:
ret = ret.astype(dtype)
return ret | [
1396
] |
def METHOD_NAME(self, request, *args, **kwargs):
jamf_instance = get_object_or_404(JamfInstance, pk=kwargs["pk"])
api_client = APIClient(**jamf_instance.serialize(decrypt_password=True))
jamf_instance_base_url = jamf_instance.base_url()
try:
setup_msg = api_client.setup()
except APIClientError:
msg = "Could not setup webhooks on {}.".format(jamf_instance_base_url)
messages.warning(request, msg)
logger.exception(msg)
else:
msg = "{}: {}".format(jamf_instance_base_url, setup_msg)
messages.info(request, msg)
logger.info(msg)
return redirect(jamf_instance) | [
19
] |
def METHOD_NAME(self):
"""
Test cluster SQL statement
"""
query = urllib.parse.quote("select count(*), min(indexid), max(indexid), avg(indexid) from txtai where text='This is a test'")
self.assertEqual(
self.client.get(f"search?query={query}").json(), [{"count(*)": 28, "min(indexid)": 0, "max(indexid)": 14, "avg(indexid)": 6.5}]
)
query = urllib.parse.quote("select count(*), text txt from txtai group by txt order by count(*) desc")
self.assertEqual(
self.client.get(f"search?query={query}").json(),
[{"count(*)": 28, "txt": "And another test"}, {"count(*)": 24, "txt": "This is a test"}],
)
query = urllib.parse.quote("select count(*), text from txtai group by text order by count(*) asc")
self.assertEqual(
self.client.get(f"search?query={query}").json(),
[{"count(*)": 24, "text": "This is a test"}, {"count(*)": 28, "text": "And another test"}],
)
query = urllib.parse.quote("select count(*) from txtai group by id order by count(*)")
self.assertEqual(self.client.get(f"search?query={query}").json(), [{"count(*)": 52}]) | [
9,
1621
] |
def METHOD_NAME(self, node: ASTComparisonOperator) -> str:
return self._expression_printer.METHOD_NAME(node) | [
38,
713,
837
] |
def METHOD_NAME(dtype, tIndex, fitResults, fitError=None, startParams=None, slicesUsed=None, resultCode=-1, **kwargs):
dtype = np.dtype(dtype)
if fitError is None:
fitError = -5e3 + 0 * fitResults
if startParams is None:
startParams = -5e3 + 0 * fitResults
slicesUsed = fmtSlicesUsed(slicesUsed)
ns = locals()
ns.update(kwargs)
res = np.zeros(1, dtype=dtype)
for n in dtype.names:
res[n] = _tuplify(ns[n])
return res | [
1699,
2187
] |
def METHOD_NAME(
self, resource_group_name: str, account_name: str, target_region: str, filter: str, **kwargs: Any
) -> AsyncIterable["_models.PercentileMetric"]:
"""Retrieves the metrics determined by the given filter for the given account target region. This
url is only for PBS and Replication Latency data.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param target_region: Target region to which data is written. Cosmos DB region, with spaces
between words and each word capitalized. Required.
:type target_region: str
:param filter: An OData filter expression that describes a subset of metrics to return. The
parameters that can be filtered are name.value (name of the metric, can have an or of multiple
names), startTime, endTime, and timeGrain. The supported operator is eq. Required.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PercentileMetric or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.PercentileMetric]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.PercentileMetricListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_metrics_request(
resource_group_name=resource_group_name,
account_name=account_name,
target_region=target_region,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PercentileMetricListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data) | [
245,
1097
] |
def METHOD_NAME():
"""
There's a small optimization for literals to avoid creating unnecessary temps
>>> optimize_literals1()
10
"""
x = 5
return (x := 10) | [
5107,
-1
] |
def METHOD_NAME(ftylist_or_function=(), **kws):
"""vectorize(ftylist_or_function=(), target='cpu', identity=None, **kws)
A decorator that creates a NumPy ufunc object using Numba compiled
code. When no arguments or only keyword arguments are given,
vectorize will return a Numba dynamic ufunc (DUFunc) object, where
compilation/specialization may occur at call-time.
Args
-----
ftylist_or_function: function or iterable
When the first argument is a function, signatures are dealt
with at call-time.
When the first argument is an iterable of type signatures,
which are either function type object or a string describing
the function type, signatures are finalized at decoration
time.
Keyword Args
------------
target: str
A string for code generation target. Default to "cpu".
identity: int, str, or None
The identity (or unit) value for the element-wise function
being implemented. Allowed values are None (the default), 0, 1,
and "reorderable".
cache: bool
Turns on caching.
Returns
--------
A NumPy universal function
Examples
-------
@vectorize(['float32(float32, float32)',
'float64(float64, float64)'], identity=0)
def sum(a, b):
return a + b
@vectorize
def sum(a, b):
return a + b
@vectorize(identity=1)
def mul(a, b):
return a * b
"""
if isinstance(ftylist_or_function, str):
# Common user mistake
ftylist = [ftylist_or_function]
elif inspect.isfunction(ftylist_or_function):
return dufunc.DUFunc(ftylist_or_function, **kws)
elif ftylist_or_function is not None:
ftylist = ftylist_or_function
def wrap(func):
vec = Vectorize(func, **kws)
for sig in ftylist:
vec.add(sig)
if len(ftylist) > 0:
vec.disable_compile()
return vec.build_ufunc()
return wrap | [
9768
] |
def METHOD_NAME(cls, data, scales, **params):
if params["breaks"] is not None:
breaks = np.asarray(params["breaks"])
if hasattr(scales.x, "transform"):
breaks = scales.x.transform(breaks)
elif params["binwidth"] is not None:
breaks = breaks_from_binwidth(
scales.x.dimension(),
params["binwidth"],
params["center"],
params["boundary"],
)
else:
breaks = breaks_from_bins(
scales.x.dimension(),
params["bins"],
params["center"],
params["boundary"],
)
new_data = assign_bins(
data["x"],
breaks,
data.get("weight"),
params["pad"],
params["closed"],
)
return new_data | [
226,
846
] |
def METHOD_NAME(monkeypatch, capsys) -> None:
class MyObject(Generic):
def action(self, pose: Pose, *, an: None | str = None) -> None:
pass
action.__action__ = ActionMetadata() # type: ignore
# @action tries to read from stdin
sio = io.StringIO()
sio.fileno = lambda: 0 # type: ignore # fake whatever fileno
monkeypatch.setattr("sys.stdin", sio)
obj_id = "123"
pose = Pose(Position(0, 0, 0), Orientation(1, 0, 0, 0))
setattr(pose, AP_ID_ATTR, "pose") # set pose id (simulate pose declaration in scene json)
my_obj = MyObject(obj_id, "")
my_obj.action(pose)
assert action._executed_action is None
out_before, _ = capsys.readouterr()
assert not out_before
patch_object_actions(MyObject)
setattr(MyObject, ACTION_NAME_ID_MAPPING_ATTR, {"name": "id"}) # this simulates what patch_with_action_mapping does
my_obj.action(pose, an="name")
assert action._executed_action is None
out_after, _ = capsys.readouterr()
arr = out_after.strip().split("\n")
assert len(arr) == 2
before_evt = ActionStateBefore.from_json(arr[0])
after_evt = ActionStateAfter.from_json(arr[1])
assert before_evt.data.action_id == "id"
assert after_evt.data.action_id == "id"
assert before_evt.data.action_point_ids is not None
assert "pose" in before_evt.data.action_point_ids
with pytest.raises(Arcor2Exception):
my_obj.action(pose)
assert action._executed_action is None
with pytest.raises(Arcor2Exception):
my_obj.action(pose, an="unknown_action_name")
assert action._executed_action is None | [
9,
1575,
279,
1116
] |
def METHOD_NAME():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"privacydeclaration",
sa.Column("id", sa.String(length=255), nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("name", sa.String(), nullable=True),
sa.Column("egress", sa.ARRAY(sa.String()), nullable=True),
sa.Column("ingress", sa.ARRAY(sa.String()), nullable=True),
sa.Column("data_use", sa.String(), nullable=False),
sa.Column("data_categories", sa.ARRAY(sa.String()), nullable=True),
sa.Column("data_qualifier", sa.String(), nullable=True),
sa.Column("data_subjects", sa.ARRAY(sa.String()), nullable=True),
sa.Column("dataset_references", sa.ARRAY(sa.String()), nullable=True),
sa.Column("system_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["system_id"],
["ctl_systems.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_privacydeclaration_data_use"),
"privacydeclaration",
["data_use"],
unique=False,
)
op.create_index(
op.f("ix_privacydeclaration_id"), "privacydeclaration", ["id"], unique=False
)
op.create_index(
op.f("ix_privacydeclaration_name"), "privacydeclaration", ["name"], unique=False
)
op.create_index(
op.f("ix_privacydeclaration_system_id"),
"privacydeclaration",
["system_id"],
unique=False,
)
# Data migration
bind = op.get_bind()
existing_declarations = bind.execute(
text("SELECT id, privacy_declarations FROM ctl_systems;")
)
for row in existing_declarations:
system_id = row["id"]
old_privacy_declarations = row["privacy_declarations"]
for privacy_declaration in old_privacy_declarations:
new_privacy_declaration_id: str = "pri_" + str(uuid.uuid4())
new_data = {
**privacy_declaration,
"system_id": system_id,
"id": new_privacy_declaration_id,
}
insert_privacy_declarations_query = text(
"INSERT INTO privacydeclaration (id, name, data_categories, data_qualifier, data_subjects, dataset_references, egress, ingress, system_id, data_use) "
"VALUES (:id, :name, :data_categories, :data_qualifier, :data_subjects, :dataset_references, :egress, :ingress, :system_id, :data_use)"
)
bind.execute(
insert_privacy_declarations_query,
new_data,
)
op.drop_column("ctl_systems", "privacy_declarations")
# ### end Alembic commands ### | [
738
] |
def METHOD_NAME(self):
"""Return the max speech and sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions) | [
232,
2758
] |
def METHOD_NAME(geo_type, late_value):
"""
Export a dataset to S3 as a UTF-8 CSV file.
We add single quotes to FIPS codes so Excel doesn't strip leading zeros.
geo_types are County, MetroArea or State.
late_values are percent_30_60 or percent_90.
Non-Metro areas are added to the MetroArea CSV.
Each CSV is to start with a National row for comparison.
CSVs are posted at
https://files.consumerfinance.gov/data/mortgage-performance/downloads/
The script also stores URLs and file sizes for use in page footnotes.
"""
date_list = FIPS.short_dates
thru_date = FIPS.dates[-1]
thru_month = thru_date[:-3]
geo_dict = {
"County": {
"queryset": CountyMortgageData.objects.filter(county__valid=True),
"headings": ["RegionType", "State", "Name", "FIPSCode"],
"fips_list": sorted(
[county.fips for county in County.objects.filter(valid=True)]
),
},
"MetroArea": {
"queryset": MSAMortgageData.objects.filter(msa__valid=True),
"headings": ["RegionType", "Name", "CBSACode"],
"fips_list": sorted(
[metro.fips for metro in MetroArea.objects.filter(valid=True)]
),
},
"NonMetroArea": {
"queryset": NonMSAMortgageData.objects.filter(
state__non_msa_valid=True
),
"headings": ["RegionType", "Name", "CBSACode"],
"fips_list": sorted(
[
"{}-non".format(state.fips)
for state in State.objects.filter(non_msa_valid=True)
]
),
},
"State": {
"queryset": StateMortgageData.objects.all(),
"headings": ["RegionType", "Name", "FIPSCode"],
"fips_list": sorted(
[
state.fips
for state in State.objects.exclude(
fips__in=STATES_TO_IGNORE
)
]
),
},
}
slug = "{}Mortgages{}DaysLate-thru-{}".format(
geo_type, LATE_VALUE_TITLE[late_value], thru_month
)
_map = geo_dict.get(geo_type)
fips_list = _map["fips_list"]
csvfile = StringIO()
writer = csv.writer(csvfile)
writer.writerow(_map["headings"] + date_list)
nation_starter = [NATION_STARTER[heading] for heading in _map["headings"]]
nation_ender = FIPS.nation_row[late_value]
writer.writerow(nation_starter + nation_ender)
for fips in fips_list:
records = _map["queryset"].filter(fips=fips)
record_starter = row_starter(geo_type, records.first())
record_ender = [
round_pct(getattr(record, late_value)) for record in records
]
writer.writerow(record_starter + record_ender)
if geo_type == "MetroArea":
non_map = geo_dict["NonMetroArea"]
for fips in non_map["fips_list"]:
records = non_map["queryset"].filter(fips=fips)
record_starter = row_starter("NonMetroArea", records.first())
record_ender = [
round_pct(getattr(record, late_value)) for record in records
]
writer.writerow(record_starter + record_ender)
bake_csv_to_s3(
slug, csvfile, sub_bucket="{}/downloads".format(MORTGAGE_SUB_BUCKET)
)
logger.info("Baked {} to S3".format(slug))
csvfile.seek(0, 2)
bytecount = csvfile.tell()
csv_size = format_file_size(bytecount)
save_metadata(csv_size, slug, thru_month, late_value, geo_type) | [
294,
15277,
732
] |
def METHOD_NAME(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir) | [
1260,
276
] |
def METHOD_NAME(self):
"""
Returns True if the zvol exists and the pool imported
"""
if not self.has_it():
return False
return self.is_up_prop() | [
137,
1
] |
def METHOD_NAME(self, parser):
"""Add command line options. Derived classes should override this
method to add more options."""
parser.add_option("--debug", dest="debug",
action="store_true",
help="display diagnostic information while running")
parser.add_option("-y", dest="noprompt",
action="store_true",
help="answer yes to every prompt") | [
238,
1881
] |
def METHOD_NAME(self) -> Optional[APIModule]:
if self.api.has_service("code"):
res = self.api.services.METHOD_NAME
# the order is important here
# its also important that patching only happens once
if not self.__api_patched:
self._request_code_execution = res.request_code_execution
self.__api_patched = True
res.request_code_execution = self.request_code_execution
return res
return None | [
544
] |
def METHOD_NAME():
verify_lstm(
"llvm",
tvm.cpu(0),
1,
1,
1,
1,
0,
True,
True,
False,
False,
"IFGO",
)
verify_lstm(
"llvm",
tvm.cpu(0),
8,
4,
8,
16,
0,
True,
False,
False,
False,
"IFGO",
) | [
9,
1376
] |
def METHOD_NAME(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this table trace .
row
If there is a layout grid, use the domain for this row
in the grid for this table trace .
x
Sets the horizontal domain of this table trace (in plot
fraction).
y
Sets the vertical domain of this table trace (in plot
fraction).
""" | [
1302,
1303
] |
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self, parsed_indicator):
if parsed_indicator.is_legacy:
indicator = getattr(self, parsed_indicator.category)
setattr(self, 'legacy_{}'.format(parsed_indicator.category), True)
if parsed_indicator.date_range:
date_range = parsed_indicator.date_range
if isinstance(indicator, ByTypeWithTotal):
indicator.totals.date_ranges.add(date_range)
else:
indicator.date_ranges.add(date_range)
elif parsed_indicator.category == const.CUSTOM_FORM:
self.custom_form.append(
TypedIndicator(
enabled=True,
date_range=parsed_indicator.date_range,
type=parsed_indicator.type
)
)
elif parsed_indicator.category == const.FORMS_SUBMITTED:
self.forms_submitted.enabled = True
if parsed_indicator.date_range:
self.forms_submitted.date_ranges.add(parsed_indicator.date_range)
else:
indicator = getattr(self, parsed_indicator.category)
if parsed_indicator.type:
indicator = indicator.get_or_add_for_type(parsed_indicator.type)
else:
indicator = indicator.totals
indicator.enabled = True
if parsed_indicator.date_range:
indicator.date_ranges.add(parsed_indicator.date_range) | [
0,
662
] |
def METHOD_NAME(
__key: _KeyType, __value_name: str | None, __reserved: Any, __type: int, __value: str | int
) -> None: ... # reserved is ignored | [
0,
99,
2258
] |
def METHOD_NAME(self):
"""Ebuilds with matching DIST_VERSION and package version."""
for PVR in ("1.7.0-r0", "1.7.0", "1.7.0-r100"):
self.assertNoReport(self.mk_check(), self.mk_pkg(PVR, "1.007")) | [
9,
3626
] |
def METHOD_NAME():
global _listening
_listening = False | [
5787,
158
] |
def METHOD_NAME(self, property_id=None):
# A * (rho * t + nsm)
if property_id is None:
property_id = self.property_id
n = len(property_id)
i = arange(n)
else:
self.model.log.debug('property_id = %r' % property_id)
n = len(property_id)
i = searchsorted(self.property_id, property_id)
mpa = zeros(n, dtype='float64')
rho = self.model.materials.get_density_by_material_id(self.material_id)
mpa = rho * self.thickness[i] + self.nsm[i]
return mpa | [
19,
2858,
2735,
690
] |
def METHOD_NAME(self, event):
if event.text == ' ':
if self.timer.running:
self.timer.stop()
else:
self.timer.start() | [
69,
59,
2971
] |
def METHOD_NAME(self, *args, **kwargs):
if not self._is_built:
self._build()
return super().METHOD_NAME(*args, **kwargs) | [
1100
] |
def METHOD_NAME(func):
return _inner_wrapper(func, CLOSE_PERMIT) | [
139,
1018,
1462,
6310
] |
def METHOD_NAME(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args | [
4693,
2857
] |
def METHOD_NAME(repo):
"""
Given
- A pack with 2 scripts
- An ID set including only one script
When
- running the create-content-artifacts command.
Then
- Verify that only the script in the ID set is exported to the pack artifacts.
"""
pack = repo.create_pack("Joey")
pack.pack_metadata.write_json(
{
"name": "Joey",
}
)
script1 = pack.create_script("HowYouDoing")
script2 = pack.create_script("ShareFood")
repo.id_set.write_json(
{
"Packs": {
"Joey": {
"ContentItems": {
"scripts": [
"HowYouDoing",
],
},
},
},
}
)
dir_path = repo.make_dir()
with ChangeCWD(repo.path):
runner = CliRunner()
result = runner.invoke(
main,
[
ARTIFACTS_CMD,
"-a",
dir_path,
"--no-zip",
"-fbi",
"-idp",
repo.id_set.path,
"-p",
"Joey",
],
)
assert result.exit_code == 0
scripts_folder_path = Path(dir_path) / "content_packs" / pack.name / "Scripts"
assert (scripts_folder_path / f"script-{script1.name}.yml").exists()
assert not (scripts_folder_path / f"script-{script2.name}.yml").exists() | [
9,
129,
8557,
41,
527,
604,
147
] |
def METHOD_NAME(self, samples: Samples, last_samples: Samples):
data: List[Union[float, List[Optional[Union[float, bool]]]]] = []
timestamp = time.time()
self.next_timestamp = timestamp
for metricinfo in self.metrics:
value = samples[metricinfo.desc.name]
if metricinfo.desc.instanced:
old_value = last_samples[metricinfo.desc.name]
assert isinstance(value, dict)
assert isinstance(old_value, dict)
# If we have less or more keys the data changed, send a meta message.
if value.keys() != old_value.keys():
self.need_meta = True
if metricinfo.derive == 'rate':
instances: List[Optional[Union[float, bool]]] = []
for key, val in value.items():
instances.append(self.calculate_sample_rate(val, old_value.get(key)))
data.append(instances)
else:
data.append(list(value.values()))
else:
old_value = last_samples.get(metricinfo.desc.name)
assert not isinstance(value, dict)
assert not isinstance(old_value, dict)
if metricinfo.derive == 'rate':
data.append(self.calculate_sample_rate(value, old_value))
else:
data.append(value)
if self.need_meta:
self.send_meta(samples, timestamp)
self.last_timestamp = self.next_timestamp
self.send_data(json.dumps([data]).encode()) | [
353,
682
] |
f METHOD_NAME(self,s): | [
7432
] |
def METHOD_NAME(vm):
"""Compiles the MKL FFT interfaces.
Args:
vm: Virtual Machine to compile on.
"""
vm.Install('build_tools')
mpi_lib = 'openmpi'
make_options = ('PRECISION=MKL_DOUBLE '
'interface=ilp64 '
f'mpi={mpi_lib} '
'compiler=gnu')
for interface in ('fftw2xc', 'fftw2xf', 'fftw3xc', 'fftw3xf'):
cmd = (f'cd /opt/intel/mkl/interfaces/{interface} && '
f'sudo make libintel64 {make_options}')
vm.RemoteCommand(cmd) | [
296,
703
] |
def METHOD_NAME(self, project):
run_dbt(["seed"])
run_dbt(["run"]) | [
484,
61,
22
] |
def METHOD_NAME():
mode = get_default_mode()
mode = mode.including("specialize", "local_mul_s_d")
for sp_format in sparse.sparse_formats:
inputs = [getattr(pytensor.sparse, sp_format + "_matrix")(), matrix()]
f = pytensor.function(inputs, sparse.mul_s_d(*inputs), mode=mode)
assert not any(
isinstance(node.op, sparse.MulSD) for node in f.maker.fgraph.toposort()
) | [
9,
125,
1998,
1305,
227
] |
def METHOD_NAME(
self, unknown_type_sample_path, unknown_type_template_path, generic_converter
):
"""Test getting state dict."""
generic_converter.parse(unknown_type_sample_path)
assert generic_converter.get_state_dict() == {
"expression_prefix": "o~",
"has_leading": {"/lala//iela": "/", "/lala/iela": "/", "lala/la": "/"},
"regex": "([\\/]?[\\w|\\/|-]+)~([\\+]?.*\\)|\\-|\\>[A-Za-z_]\\w*)",
"template": open(unknown_type_template_path).read(),
} | [
9,
19,
551,
553
] |
def METHOD_NAME(self):
""" An occurence test of returned and removed element from container """
uid, d = self.sessions.make()
self.sessions.pop(uid)
self.assertNotIn(uid, self.sessions) | [
2580,
130,
954,
217,
760
] |
def METHOD_NAME(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg) | [
390
] |
def METHOD_NAME(self):
lti = Client(
client_key=OTHER_LTI_CONSUMER_KEY,
client_secret=OTHER_LTI_CONSUMER_SECRET,
signature_type=SIGNATURE_TYPE_BODY,
)
(uri, _headers, body) = lti.sign(
uri=self.url_prefix + LTI_TPA_LOGIN_URL, http_method='POST',
headers={'Content-Type': FORM_ENCODED},
body={
'user_id': LTI_USER_ID,
'custom_tpa_next': '/account/finish_auth/?course_id=my_course_id&enrollment_action=enroll',
}
)
with self.settings(SOCIAL_AUTH_LTI_CONSUMER_SECRETS={OTHER_LTI_CONSUMER_KEY: OTHER_LTI_CONSUMER_SECRET}):
login_response = self.client.post(path=uri, content_type=FORM_ENCODED, data=body)
# The user should be redirected to the registration form
assert login_response.status_code == 302
assert login_response['Location'].endswith(reverse('signin_user'))
register_response = self.client.get(login_response['Location'])
self.assertContains(
register_response,
'"currentProvider": "Tool Consumer with Secret in Settings"',
)
self.assertContains(register_response, '"errorMessage": null') | [
9,
1046,
557,
3553,
444,
280,
817
] |
def METHOD_NAME(scores, classes, boxes, raw_cls_masks,
im_h, im_w, im_scale_y=None, im_scale_x=None, im_scale=None,
full_image_masks=True, encode_masks=False,
confidence_threshold=0.0, segmentoly_type=False):
no_detections = (np.empty((0, ), dtype=np.float32), np.empty((0, ), dtype=np.float32),
np.empty((0, 4), dtype=np.float32), [])
if scores is None:
return no_detections
scale = im_scale
if scale is None:
assert (im_scale_x is not None) and (im_scale_y is not None)
scale = [im_scale_x, im_scale_y, im_scale_x, im_scale_y]
confidence_filter = scores > confidence_threshold
scores = scores[confidence_filter]
classes = classes[confidence_filter]
boxes = boxes[confidence_filter]
if raw_cls_masks is not None:
raw_cls_masks = [segm for segm, is_valid in zip(raw_cls_masks, confidence_filter) if is_valid]
if len(scores) == 0:
return no_detections
boxes = boxes / scale
classes = classes.astype(np.uint32)
if raw_cls_masks is not None:
masks = []
for box, cls, raw_mask in zip(boxes, classes, raw_cls_masks):
raw_cls_mask = raw_mask[cls, ...] if segmentoly_type else raw_mask
mask = segm_postprocess(box, raw_cls_mask, im_h, im_w, full_image_masks, encode_masks)
masks.append(mask)
else:
masks = None
return scores, classes, boxes, masks | [
1710
] |
def METHOD_NAME(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOfficeATPDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230801preview:getOfficeATPDataConnector', __args__, opts=opts, typ=GetOfficeATPDataConnectorResult).value
return AwaitableGetOfficeATPDataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type')) | [
19,
1902,
14698,
365,
4059
] |
def METHOD_NAME(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute function for v7e-m DSP instructions of conv1d on NWC layout."""
if isinstance(strides, (tuple, list)):
strides = strides[0]
if isinstance(dilation, (tuple, list)):
dilation = dilation[0]
batch_size, data_width, in_channels = data.shape
kernel_size, out_channels, _ = kernel.shape
# Compute the output shape
dilated_kernel_size = (kernel_size - 1) * dilation + 1
pad_left, pad_right = get_pad_tuple1d(padding, (dilated_kernel_size,))
out_channels = simplify(out_channels)
out_width = simplify((data_width - dilated_kernel_size + pad_left + pad_right) // strides + 1)
# Apply padding
pad_before = [0, pad_left, 0]
pad_after = [0, pad_right, 0]
padded_data = pad(data, pad_before, pad_after, name="padded_data")
# Compute graph
rc = te.reduce_axis((0, in_channels), name="rc")
rw = te.reduce_axis((0, kernel_size), name="rw")
conv = te.compute(
(batch_size, out_width, out_channels),
lambda b, w, c: te.sum(
padded_data[b, w * strides + rw * dilation, rc].astype(out_dtype)
* kernel[rw, c, rc].astype(out_dtype),
axis=[rw, rc],
),
name="conv1d",
tag="conv1d_nwc",
)
###########################
# Config Space Definition #
###########################
n, ow, co = (
cfg.axis(batch_size.value),
cfg.axis(out_width.value),
cfg.axis(out_channels.value),
)
kw, ci = (
cfg.reduce_axis(kernel_size.value),
cfg.reduce_axis(in_channels.value),
)
owo, owi = cfg.define_split("tile_ow", ow, policy="factors", num_outputs=2)
cio, cii = cfg.define_split(
"tile_ci",
ci,
policy="factors",
num_outputs=2,
# TODO: check case with in_channels.value % 4 != 0 with AutoTVM
filter=None if cfg.is_fallback else lambda x: x.size[-1] % 4 == 0,
)
coo, coi = cfg.define_split("tile_co", co, policy="factors", num_outputs=2)
cfg.define_reorder(
"reorder_0_simd",
[n, owo, owi, coo, coi, kw, cio, cii],
policy="candidate",
candidate=[
[n, kw, owo, coo, cio, owi, coi, cii],
[n, kw, coo, owo, cio, owi, coi, cii],
[n, kw, owo, coo, cio, owi, coi, cii],
[n, kw, coo, owo, cio, owi, coi, cii],
],
)
cfg.define_knob("auto_unroll_max_step", [0, 2, 4, 8, 16, 32])
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
cfg.fallback_split("tile_ow", [-1, out_width.value])
cfg.fallback_split("tile_ci", [-1, in_channels.value])
cfg.fallback_split("tile_co", [-1, out_channels.value])
return conv | [
3518,
8240,
8241,
226
] |
def METHOD_NAME(request, conference_slug, proposal_slug):
return proposal_vote(request, conference_slug, proposal_slug, None) | [
4229,
9811,
188
] |
def METHOD_NAME(self, enabled):
self._is_running = enabled
self._update_state() | [
86,
146
] |
f METHOD_NAME(self): | [
9,
340,
9250,
97,
636
] |
def METHOD_NAME(self):
obj = self.base_factory()
self.submission_factory(**{self.relation_to_app: obj})
base_qs = RoundsAndLabs.objects.with_progress()
fetched_obj = base_qs.active().first()
self.assertEqual(fetched_obj, obj)
self.assertFalse(base_qs.inactive().exists()) | [
9,
923
] |
def METHOD_NAME(t):
"""This is for debugging purposes. Normally prefer to pass it around."""
global TIMING
log.info("Setting global profiler in process %r", psutil.Process())
TIMING = t | [
176,
285,
7275
] |
def METHOD_NAME(self,
path_to_src=None,
generator=None,
coverage_output_type=None,
debug=False,
valgrind=False):
"""
Create Makefiles and prepare targets with CMake.
Keyword arguments:
path_to_src - Path to source directory
generator - Type of Makefiles to generate
coverage_output_type - Generate HTML, XML or both reports
debug - Target debug or release build
"""
if generator is None:
generator = DEFAULT_CMAKE_GENERATORS.get(self.make_program,
"Unix Makefiles")
cmake = get_cmake_tool()
if cmake is None:
logging.error(
"No CMake found in Path. Install all the required tools.")
sys.exit(1)
args = [cmake,
"-G",
generator,
"-DBUILD_TESTING=ON"
"-DCMAKE_MAKE_PROGRAM=%s" % self.make_program,
"-DCMAKE_CXX_COMPILER=%s" % get_cxx_tool(),
"-DCMAKE_C_COMPILER=%s" % get_c_tool()]
if debug:
args.append("-DCMAKE_BUILD_TYPE=Debug")
if coverage_output_type:
args.append("-DCOVERAGE:STRING=%s" % coverage_output_type)
if valgrind:
valgrind = get_valgrind_tool()
if valgrind is None:
logging.error(
"No Valgrind found in Path. Install all the required tools.\n")
sys.exit(1)
args.append("-DVALGRIND=1")
args.append("-DMEMORYCHECK_COMMAND_OPTIONS=\"--track-origins=yes\" \"--leak-check=full\" \"--show-reachable=yes\" \"--error-exitcode=1\"")
else:
args.append("-DVALGRIND=0")
if path_to_src is not None:
args.append(path_to_src)
execute_program(args,
"CMake failed to run successfully. See error message.") | [
129,
16290
] |
def METHOD_NAME():
if not os.path.exists(LARGE_TXT_FILE):
with open(LARGE_TXT_FILE, "w") as file:
file.truncate(10 ** 8) # ~ 100MB
return LARGE_TXT_FILE | [
129,
1953,
310,
171
] |
def METHOD_NAME():
timeout = 2
socket.setdefaulttimeout(timeout)
for host in __check_internet_hosts__:
try:
for var in ("HTTP_PROXY", "HTTPS_PROXY"):
if not os.getenv(var) and not os.getenv(var.lower()):
continue
requests.get("http://%s" % host, allow_redirects=False, timeout=timeout)
return True
# try to resolve `host` for both AF_INET and AF_INET6, and then try to connect
# to all possible addresses (IPv4 and IPv6) in turn until a connection succeeds:
s = socket.create_connection((host, 80))
s.close()
return True
except: # pylint: disable=bare-except
pass
return False | [
2813,
69
] |
def METHOD_NAME():
pyblish.register_host("flame")
pyblish.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info("OpenPype Flame plug-ins registered ...")
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
log.info("OpenPype Flame host installed ...") | [
428
] |
def METHOD_NAME(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# check if /sbin/init is a symlink to systemd
# on SUSE, /sbin/init may be missing if systemd-sysvinit package is not installed.
if os.path.islink('/sbin/init') and os.path.basename(os.readlink('/sbin/init')) == 'systemd':
return True
return False | [
137,
4658,
3627,
8024
] |
def METHOD_NAME():
global net
net = BitcoinSimnet | [
0,
-1
] |
def METHOD_NAME(self):
path = "Phylip/one.dat"
with open(path) as handle:
phylip_text = handle.read()
path = "Phylip/three.dat"
with open(path) as handle:
phylip_text3 = handle.read()
path = "Phylip/four.dat"
with open(path) as handle:
phylip_text4 = handle.read()
handle = StringIO(phylip_text4 + "\n" + phylip_text4)
self.assertEqual(len(list(PhylipIterator(handle))), 2)
handle = StringIO(phylip_text3 + "\n" + phylip_text4 + "\n\n\n" + phylip_text)
self.assertEqual(len(list(PhylipIterator(handle))), 3) | [
9,
8587
] |
def METHOD_NAME(self):
"""
Retrieves the name of the component
Returns:
A string containing the name of the component
"""
return self.name | [
19,
156
] |
def METHOD_NAME(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.attn.attention.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.attn.attention.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.attn.attention.v_proj.weight
self.vb = None | [
0,
1010,
4407,
585
] |
def METHOD_NAME(arg):
arg_type, arg_name, arg_default, arg_desc = arg
def k(*xs):
if arg_type_post == ' or None':
return None
elif len(xs) == 1:
return xs[0]
else:
return xs
def m(f):
def w(x):
if arg_type_post == ' or None':
return f(x) if x else None
else:
return f(x) if x else f()
return w
arg_type_post = ''
if arg_name == 'SRC' or arg_name == 'DST': return
if arg_type.startswith("optional "):
arg_type = arg_type[len("optional "):]
arg_type_post = ' or None'
if arg_type == "vec3f":
arg_value = repr(tuple(map(m(float), arg_default.split(","))) if arg_default else k(0, 0, 0))
elif arg_type == "vec3i":
arg_value = repr(tuple(map(m(int), arg_default.split(","))) if arg_default else k(0, 0, 0))
elif arg_type == "vec2f":
arg_value = repr(tuple(map(m(float), arg_default.split(","))) if arg_default else k(0, 0))
elif arg_type == "vec2i":
arg_value = repr(tuple(map(m(int), arg_default.split(","))) if arg_default else k(0, 0))
elif arg_type == "vec4f":
arg_value = repr(tuple(map(m(float), arg_default.split(","))) if arg_default else k(0, 0, 0, 0))
elif arg_type == "vec4i":
arg_value = repr(tuple(map(m(int), arg_default.split(","))) if arg_default else k(0, 0, 0, 0))
elif arg_type == "float":
arg_value = repr(m(float)(arg_default))
elif arg_type == "int":
arg_value = repr(m(int)(arg_default))
elif arg_type == "bool":
arg_value = repr(m(bool)(arg_default))
elif arg_type in ("string", "multiline_string", "readpath", "writepath"):
if arg_type == "readpath":
arg_type = "str, readable path"
elif arg_type == "writepath":
arg_type = "str, writeable path"
elif arg_type == "multiline_string":
arg_type = "str, may have multiple lines"
else:
arg_type = "str"
arg_value = repr(arg_default if arg_default else f'<{arg_name}>')
elif arg_type in ("list", "ListObject"):
arg_type = "list"
arg_value = repr(list())
elif arg_type in ("dict", "DictObject"):
arg_type = "dict"
arg_value = repr(dict())
elif arg_type in ("prim", "primitive", "PrimitiveObject"):
arg_type = "ze.ZenoPrimitiveObject"
arg_value = f"<{arg_name}>"
elif arg_type in ("numeric", "NumericObject"):
arg_type = "any numeric types including int float vec3f vec3i"
arg_value = str(m(float)(arg_default) if ',' in arg_default else tuple(map(m(float), arg_default.split(","))))
elif arg_type in ("zany", "IObject"):
arg_type = "any type"
arg_value = f"<{arg_name}>"
elif arg_type.startswith("enum "):
options = arg_type[len("enum "):].split()
if options and arg_default not in options:
arg_default = options[0]
arg_value = repr(arg_default)
arg_type = f"options are: {' '.join(options)}"
else:
# if arg_type != "":
# print(f"warning: unknown type {arg_type}")
if arg_type == "" and not arg_type_post:
arg_default = repr(None)
arg_value = arg_default or f'<{arg_name}>'
p(f" {arg_name}={arg_value}," + ("" if not arg_type and not arg_type_post and not arg_desc else f" # {arg_type}{arg_type_post}{', ' + arg_desc if arg_desc and (arg_type or arg_type_post) else ''}")) | [
-1
] |
def METHOD_NAME(inactive=False, **kw):
"""Fetch all labels by a catalog query
"""
catalog = SETUP_CATALOG
query = {
"portal_type": "Label",
"is_active": True,
"sort_on": "title",
}
# Allow to update the query with the keywords
query.update(kw)
if inactive:
del query["is_active"]
return search(query, catalog) | [
539,
415
] |
def METHOD_NAME(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[0.0, 5.0, 4.0]])
self._gradient_input_test_assert(net, net.relu, inp, 1, [[1.0, 1.0, 1.0]]) | [
9,
53,
1789,
362,
10097
] |
def METHOD_NAME(log_file, verbosity, append):
level = {0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO}.get(verbosity, logging.DEBUG)
if log_file is not None:
handler = logging.FileHandler(log_file, mode=(append and 'a' or 'w'))
else:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(threadName)s::%(levelname)s::%(asctime)s::%(module)s::"
+ "%(lineno)d::%(name)s::(%(funcName)s) %(message)s"))
root_logger = logging.getLogger('')
root_logger.setLevel(level)
root_logger.addHandler(handler) | [
102,
663
] |
def METHOD_NAME(self, node):
return [u for u, v in self.graph.in_edges(node)] | [
19,
2189
] |
def METHOD_NAME(case_factory, event, attendees):
event_case_id = event._case_id.hex
event_group_id = event.event_id.hex
attendee_case_type = get_attendee_case_type(event.domain)
attendee_case_ids = (c.case_id for c in attendees)
case_structures = []
for case_id in attendee_case_ids:
event_host = CaseStructure(case_id=event_case_id)
attendee_host = CaseStructure(case_id=case_id)
case_structures.append(CaseStructure(
indices=[
CaseIndex(
relationship='extension',
identifier='event-host',
related_structure=event_host,
related_type=EVENT_CASE_TYPE,
),
CaseIndex(
relationship='extension',
identifier='attendee-host',
related_structure=attendee_host,
related_type=attendee_case_type,
),
],
attrs={
'case_type': EVENT_ATTENDEE_CASE_TYPE,
'owner_id': event_group_id,
'create': True,
},
))
case_factory.create_or_update_cases(case_structures) | [
0,
391,
1229
] |
def METHOD_NAME(self):
pass | [
709,
710
] |
def METHOD_NAME(tmpdir):
tmpdir = str(tmpdir)
dirstructure = [
'/cal1/public/',
'/cal1/private/',
'/cal2/public/',
'/cal3/public/',
'/cal3/work/',
'/cal3/home/',
'/cal4/cfgcolor/',
'/cal4/dircolor/',
'/cal4/cfgcolor_again/',
'/cal4/cfgcolor_once_more/',
'/singlecollection/',
]
for one in dirstructure:
os.makedirs(tmpdir + one)
filestructure = [
('/cal1/public/displayname', 'my calendar'),
('/cal1/public/color', 'dark blue'),
('/cal1/private/displayname', 'my private calendar'),
('/cal1/private/color', '#FF00FF'),
('/cal4/dircolor/color', 'dark blue'),
]
for filename, content in filestructure:
with open(tmpdir + filename, 'w') as metafile:
metafile.write(content)
return tmpdir | [
-1
] |
def METHOD_NAME(self, PanelItem, ItemsNumber):
log.debug("VFS.FreeVirtualFindData({0}, {1})".format(PanelItem, ItemsNumber)) | [
3712,
162,
416,
365
] |
def METHOD_NAME():
global _objects
t = TestRequired()
_objects['test_required_t'] = t
t.put()
return t | [
9,
984
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.