text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, channel_index):
self.channel_index = channel_index | [
0,
307,
724
] |
def METHOD_NAME(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(\n\n\n","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
ai("if True:\n if True:\n if True: \n")
ai("@a(")
ai("@a(b")
ai("@a(b,")
ai("@a(b,c")
ai("@a(b,c,")
ai("from a import (")
ai("from a import (b")
ai("from a import (b,")
ai("from a import (b,c")
ai("from a import (b,c,")
ai("[");
ai("[a");
ai("[a,");
ai("[a,b");
ai("[a,b,");
ai("{");
ai("{a");
ai("{a:");
ai("{a:b");
ai("{a:b,");
ai("{a:b,c");
ai("{a:b,c:");
ai("{a:b,c:d");
ai("{a:b,c:d,");
ai("a(")
ai("a(b")
ai("a(b,")
ai("a(b,c")
ai("a(b,c,")
ai("a[")
ai("a[b")
ai("a[b,")
ai("a[b:")
ai("a[b:c")
ai("a[b:c:")
ai("a[b:c:d")
ai("def a(")
ai("def a(b")
ai("def a(b,")
ai("def a(b,c")
ai("def a(b,c,")
ai("(")
ai("(a")
ai("(a,")
ai("(a,b")
ai("(a,b,")
ai("if a:\n pass\nelif b:")
ai("if a:\n pass\nelif b:\n pass\nelse:")
ai("while a:")
ai("while a:\n pass\nelse:")
ai("for a in b:")
ai("for a in b:\n pass\nelse:")
ai("try:")
ai("try:\n pass\nexcept:")
ai("try:\n pass\nfinally:")
ai("try:\n pass\nexcept:\n pass\nfinally:")
ai("with a:")
ai("with a as b:")
ai("class a:")
ai("class a(")
ai("class a(b")
ai("class a(b,")
ai("class a():")
ai("[x for")
ai("[x for x in")
ai("[x for x in (")
ai("(x for")
ai("(x for x in")
ai("(x for x in (") | [
9,
6600
] |
def METHOD_NAME(self):
resolving_time = 10
truth_time = np.array([self.intervals['time'][0],
self.intervals['time'][-1]])
truth_endtime = np.array([self.intervals['time'][-2],
self.intervals['time'][-1]]) + resolving_time
self._test_coincidence(resolving_time=resolving_time,
coincidence=1,
pre_trigger=0,
n_concidences_truth=2,
times_truth=truth_time,
endtime_truth=truth_endtime
)
pre_trigger = 2
self._test_coincidence(resolving_time=resolving_time,
coincidence=1,
pre_trigger=pre_trigger,
n_concidences_truth=2,
times_truth=truth_time - pre_trigger,
endtime_truth=truth_endtime
) | [
9,
529,
-1
] |
def METHOD_NAME(ctx, _, value):
"""Validate the uniqueness of the label of the code.
The exact uniqueness criterion depends on the type of the code, whether it is "local" or "remote". For the former,
the `label` itself should be unique, whereas for the latter it is the full label, i.e., `[email protected]`.
.. note:: For this to work in the case of the remote code, the computer parameter already needs to have been parsed
In interactive mode, this means that the computer parameter needs to be defined after the label parameter in the
command definition. For non-interactive mode, the parsing order will always be determined by the order the
parameters are specified by the caller and so this validator may get called before the computer is parsed. For
that reason, this validator should also be called in the command itself, to ensure it has both the label and
computer parameter available.
"""
from aiida.common import exceptions
from aiida.orm import load_code
computer = ctx.params.get('computer', None)
on_computer = ctx.params.get('on_computer', None)
if on_computer is False:
try:
load_code(value)
except exceptions.NotExistent:
pass
except exceptions.MultipleObjectsError:
raise click.BadParameter(f'multiple copies of the remote code `{value}` already exist.')
else:
raise click.BadParameter(f'the code `{value}` already exists.')
if computer is not None:
full_label = f'{value}@{computer.label}'
try:
load_code(full_label)
except exceptions.NotExistent:
pass
except exceptions.MultipleObjectsError:
raise click.BadParameter(f'multiple copies of the local code `{full_label}` already exist.')
else:
raise click.BadParameter(f'the code `{full_label}` already exists.')
return value | [
187,
636,
594
] |
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) | [
24,
3
] |
def METHOD_NAME(self, lcfs: BluemiraWire) -> PhysicalComponent:
"""
Build the x-z components of the plasma.
Parameters
----------
lcfs:
LCFS wire
"""
face = BluemiraFace(lcfs, self.name)
component = PhysicalComponent(self.LCFS, face)
apply_component_display_options(
component, color=BLUE_PALETTE["PL"], transparency=0.3
)
return component | [
56,
14595
] |
def METHOD_NAME(x):
if isinstance(x, torch.Tensor) and not hasattr(x, '_node'):
x = MetaProxy(x, placeholder=True, name='weight')
return x if not hasattr(x, '_node') else x._node | [
19,
1716
] |
def METHOD_NAME(self) -> None:
test_connection_fn = get_test_connection_fn(self.service_connection)
test_connection_fn(self.metadata, self.connection_obj, self.service_connection) | [
9,
550
] |
def METHOD_NAME(self, **kwargs: Any) -> AsyncIterable["_models.ResourceProviderOperation"]:
"""Returns list of all operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceProviderOperation or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.scvmm.models.ResourceProviderOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-06-05-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ResourceProviderOperationList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceProviderOperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data) | [
245
] |
async def METHOD_NAME(self, node_id=None, params=None, headers=None):
"""
Returns information about hot threads on each node in the cluster.
:arg node_id: A comma-separated list of node IDs or names to
limit the returned information; use `_local` to return information from
the node you're connecting to, leave empty to get information from all
nodes
:arg doc_type: The type to sample (default: cpu) Valid choices:
cpu, wait, block
:arg ignore_idle_threads: Don't show threads that are in known-
idle places, such as waiting on a socket select or pulling from an empty
task queue (default: true)
:arg interval: The interval for the second sampling of threads
:arg snapshots: Number of samples of thread stacktrace (default:
10)
:arg threads: Specify the number of threads to provide
information for (default: 3)
:arg timeout: Explicit operation timeout
"""
# type is a reserved word so it cannot be used, use doc_type instead
if "doc_type" in params:
params["type"] = params.pop("doc_type")
return await self.transport.perform_request(
"GET",
_make_path("_nodes", node_id, "hot_threads"),
params=params,
headers=headers,
) | [
3269,
1573
] |
def METHOD_NAME(cls, cfg: CfgNode):
# registry not used here, since embedding losses are currently local
# and are not used anywhere else
return cls._EMBED_LOSS_REGISTRY[cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME](cfg) | [
129,
347,
1572
] |
def METHOD_NAME(args):
data_dir = args.data_dir
reader_path, reader_file = os.path.split(args.reader_file)
reader_file, extension = os.path.splitext(reader_file)
batchsize = args.batchsize
place = args.place
file_list = [os.path.join(data_dir, x) for x in os.listdir(data_dir)]
sys.path.append(reader_path)
#sys.path.append(os.path.abspath("."))
reader_class = import_module(reader_file)
config = {"runner.inference": True}
dataset = reader_class.RecDataset(file_list, config=config)
loader = DataLoader(
dataset, batch_size=batchsize, places=place, drop_last=True)
return loader | [
129,
365,
467
] |
def METHOD_NAME(self):
self.close()
self.__init__() | [
1141
] |
def METHOD_NAME(self):
topic = f"{self.DEVICE_NAME}/{self.DEVICE_TYPE}"
config = {
"topicFilter": topic,
"converter": {
"type": "json",
"deviceNameTopicExpression": "(.*?)(?=/.*)",
"deviceTypeTopicExpression": f"(?<={self.DEVICE_NAME}/)(.*)",
"timeout": 60000,
"attributes": "*",
"timeseries": "*"
}
}
data = {
"Temperature": randint(0, 256),
"Pressure": randint(0, 256)
}
return topic, config, data | [
19,
398,
1170,
9,
365
] |
def METHOD_NAME(self):
export_conandata_patches(self) | [
294,
505
] |
def METHOD_NAME(G):
nodes = torch.nonzero(G.out_degrees(), as_tuple=False).squeeze(-1)
return nodes | [
416,
2261,
480
] |
def METHOD_NAME(self):
self.filename = tempfile.NamedTemporaryFile().name
self.status_file = action_runner.StatusFile(self.filename) | [
102,
452,
171
] |
def METHOD_NAME(drifted_data):
# Arrange
train_ds, test_ds = drifted_data
train_ds = Dataset(train_ds.data.drop(columns=['numeric_with_drift', 'categorical_with_drift']),
label=train_ds.label_name)
test_ds = Dataset(test_ds.data.drop(columns=['numeric_with_drift', 'categorical_with_drift']),
label=test_ds.label_name)
check = MultivariateDrift()
# Act
result = check.run(train_ds, test_ds)
# Assert
assert_that(result.value, has_entries({
'domain_classifier_auc': close_to(0.5, 0.03),
'domain_classifier_drift_score': close_to(0, 0.01),
'domain_classifier_feature_importance': has_entries(
{'categorical_without_drift': close_to(0.81, 0.001),
'numeric_without_drift': close_to(0.2, 0.02)}
),
})) | [
9,
654,
9231
] |
def METHOD_NAME(cls, model: tf.keras.Model):
"""
This function instantiates the feature extraction model for per layer outputs
:param model: Keras model.
:return: Intermediate keras model for feature extraction
"""
outputs = [layer.output for layer in model.layers]
intermediate_model = tf.keras.models.Model(inputs=model.inputs, outputs=outputs)
intermediate_model.trainable = False
return intermediate_model | [
19,
5086,
578
] |
def METHOD_NAME(self):
"""
Set the current for all modules to zero.
Includes dacs that are not controlled by this instrument (this is
intentional).
"""
# First set all "parameters" to zero.
# this ensures that the safe slow rampdown is used and that the
# correct values are known to the instrument.
for ch in self.channel_map:
self.set(ch, 0)
# "brute-set" all sources in known modules to zero, this is because
# this is also a safety method that should ensure we are in an all
# zero state.
for s in self.current_sources.values():
for dac in range(4):
s.set_current(dac, 0.0) | [
0,
3454,
313
] |
def METHOD_NAME(self, event):
"""
Handles all hover leave events.
:param event: QGraphicsSceneHoverEvent instance
"""
# locked objects don't need cursors
if not self.locked():
self._graphics_view.setCursor(QtCore.Qt.ArrowCursor) | [
1935,
3457,
417
] |
def METHOD_NAME(self):
with pytest.raises(NetworkError):
Network(mock.Mock(), "10.0.0.0/16", "0x0d15ea5e", mask="255.255.0.0") | [
9,
176,
1119,
361
] |
def METHOD_NAME(name):
name = name.lower()
if name is None or name == 'none':
return nn.Identity()
elif name.startswith('scale'):
scale_factor = float(name[5:])
return lambda x: x.clamp(0., scale_factor) / scale_factor
elif name.startswith('clamp'):
clamp_max = float(name[5:])
return lambda x: x.clamp(0., clamp_max)
elif name.startswith('mul'):
mul_factor = float(name[3:])
return lambda x: x * mul_factor
elif name == 'trunc_exp':
return trunc_exp
elif name.startswith('+') or name.startswith('-'):
return lambda x: x + float(name)
elif name.lower() == 'sigmoid':
return lambda x: torch.sigmoid(x)
elif name.lower() == 'tanh':
return lambda x: torch.tanh(x)
else:
return getattr(F, name) | [
19,
648
] |
def METHOD_NAME():
gateway: HTTPGateway = HTTPGateway.load_config(
f'yaml/test-http-gateway.yml', runtime_args={'port': [12345]}
)
with gateway:
assert isinstance(gateway, HTTPGateway)
assert gateway.cors is True
assert gateway.title == 'my-gateway-title'
assert gateway.description == 'my-gateway-description' | [
9,
557,
721,
14,
1434
] |
f METHOD_NAME(self): | [
9,
2639,
553,
215,
5806,
156
] |
def METHOD_NAME(obj):
return json.dumps(
obj, indent=4, separators=(",", ": "), sort_keys=True, check_circular=True
) | [
278,
763
] |
def METHOD_NAME(endpoint):
"""Logout from the platform and delete credentials."""
from renku.command.login import logout_command
communicator = ClickCallback()
logout_command().with_communicator(communicator).build().execute(endpoint=endpoint)
click.secho("Successfully logged out.", fg=color.GREEN) | [
2431
] |
METHOD_NAME(self): | [
102,
3312
] |
def METHOD_NAME(
names: t.List[str], time_period: MetricTimePeriod
) -> t.Dict[str, CountMetricWithGraph]:
"""
Given a time period and a set of metric names, gets the sum of the metric
over the period and a graphable list of timestamps and values.
The graph data always contains the start and end time stamps with None values
to make graphing easier.
"""
result = {}
start_time = _start_time(time_period).replace(second=0, microsecond=0, tzinfo=None)
end_time = datetime.now().replace(second=0, microsecond=0, tzinfo=None)
for metric_name in names:
stats = _get_cloudwatch_client().get_metric_statistics(
Namespace=METRICS_NAMESPACE,
MetricName=f"{metric_name}-count",
Statistics=["Sum"],
StartTime=start_time,
EndTime=end_time,
Period=_period(time_period),
)["Datapoints"]
total = int(functools.reduce(lambda acc, s: acc + s["Sum"], stats, 0))
graph_data: t.List[t.Tuple[datetime, t.Optional[int]]] = [
# Removing tzinfo because you can't work with timezone aware
# datetime objects and timezone unaware timedelta objects. Either
# way, eventually, these get decomposed to an epoch value, so this
# will not hurt.
# `_pad_with_None_values` expects timezone unaware objects.
(s["Timestamp"].replace(tzinfo=None), int(s["Sum"]))
for s in stats
]
graph_data.sort(key=lambda t: t[0]) # Sort by timestamp
graph_data = _pad_with_None_values(graph_data, start_time, end_time)
result[metric_name] = CountMetricWithGraph(total, graph_data)
return result | [
19,
29,
41,
303
] |
def METHOD_NAME(
cls, data: CompressionContainerProtocol, **kwargs: OptionalKwargs
) -> bytes:
"""Convert the high-level AT representation back into a BitStream."""
return data.to_bytes() | [
183
] |
def METHOD_NAME(self, ep, **kwargs):
return self._doRequest('post', ep, **kwargs) | [
72
] |
def METHOD_NAME(mocked_connections, monkeypatch):
"""Env vars should be used over defaults in connect"""
# Use monkeypatch to set environment variable only for this test
monkeypatch.setenv("FLOWMACHINE_LOG_LEVEL", "DUMMY_ENV_LOG_LEVEL")
monkeypatch.setenv("FLOWDB_PORT", "6969")
monkeypatch.setenv("FLOWMACHINE_FLOWDB_USER", "DUMMY_ENV_FLOWDB_USER")
monkeypatch.setenv("FLOWMACHINE_FLOWDB_PASSWORD", "DUMMY_ENV_FLOWDB_PASSWORD")
monkeypatch.setenv("FLOWDB_HOST", "DUMMY_ENV_FLOWDB_HOST")
monkeypatch.setenv("DB_CONNECTION_POOL_SIZE", "7777")
monkeypatch.setenv("DB_CONNECTION_POOL_OVERFLOW", "2020")
monkeypatch.setenv("REDIS_HOST", "DUMMY_ENV_REDIS_HOST")
monkeypatch.setenv("REDIS_PORT", "5050")
monkeypatch.setenv("REDIS_PASSWORD", "DUMMY_ENV_REDIS_PASSWORD")
(
core_set_log_level_mock,
core_init_Connection_mock,
core_init_StrictRedis_mock,
core_init_start_threadpool_mock,
) = mocked_connections
connect()
core_set_log_level_mock.assert_called_with(
"flowmachine.debug", "DUMMY_ENV_LOG_LEVEL"
)
core_init_Connection_mock.assert_called_with(
port=6969,
user="DUMMY_ENV_FLOWDB_USER",
password="DUMMY_ENV_FLOWDB_PASSWORD",
host="DUMMY_ENV_FLOWDB_HOST",
database="flowdb",
pool_size=7777,
overflow=2020,
)
core_init_StrictRedis_mock.assert_called_with(
host="DUMMY_ENV_REDIS_HOST", port=5050, password="DUMMY_ENV_REDIS_PASSWORD"
)
core_init_start_threadpool_mock.assert_called_with(
7777
) # for the time being, we should have num_threads = num_db_connections | [
9,
485,
2654
] |
def METHOD_NAME(timeout=CACHE_TIMEOUT) -> List[WebhookData]:
with opentracing_trace("get_observability_webhooks", "webhooks"):
buffer_name = get_buffer_name()
if cached := _webhooks_mem_cache.get(buffer_name, None):
webhooks_data, check_time = cached
if monotonic() - check_time <= timeout:
return webhooks_data
webhooks_data = cache.get(WEBHOOKS_KEY)
if webhooks_data is None:
webhooks_data = []
if webhooks := get_webhooks_for_event(WebhookEventAsyncType.OBSERVABILITY):
domain = Site.objects.get_current().domain
for webhook in webhooks:
webhooks_data.append(
WebhookData(
id=webhook.id,
saleor_domain=domain,
target_url=webhook.target_url,
secret_key=webhook.secret_key,
)
)
cache.set(WEBHOOKS_KEY, webhooks_data, timeout=CACHE_TIMEOUT)
_webhooks_mem_cache[buffer_name] = (webhooks_data, monotonic())
return webhooks_data | [
19,
3436
] |
def METHOD_NAME(self, arg):
arg_doc = arg.name
if arg.annotation:
annotation = self._fmt_expr(arg.annotation)
arg_doc = arg_doc + (': %s' % annotation)
return arg_doc | [
4739,
4889,
718
] |
def METHOD_NAME(message: Dict[str, str]) -> str:
"""
Extract the plain text that was used for signing to compare signatures.
This is done based on the message type. See this URL for more information:
https://docs.aws.amazon.com/sns/latest/dg/sns-example-code-endpoint-java-servlet.html
:param message: SNS Message
:return: Plain text for creating the signature on the client side
"""
keys: Sequence[str] = ()
m_type = message.get('Type')
if m_type in (
SnsNotificationType.SubscriptionConfirmation.value,
SnsNotificationType.UnsubscribeConfirmation.value,
):
keys = (
'Message',
'MessageId',
'SubscribeURL',
'Timestamp',
'Token',
'TopicArn',
'Type',
)
elif m_type == SnsNotificationType.Notification.value:
if message.get('Subject'):
keys = (
'Message',
'MessageId',
'Subject',
'Timestamp',
'TopicArn',
'Type',
)
else:
keys = (
'Message',
'MessageId',
'Timestamp',
'TopicArn',
'Type',
)
pairs = [f'{key}\n{message.get(key)}' for key in keys]
return '\n'.join(pairs) + '\n' | [
19,
526,
24,
2452
] |
def METHOD_NAME(name, saltenv="base"):
r"""
.. versionadded:: 2015.8.0
Display the rendered software definition from a specific sls file in the
local winrepo cache. This will parse all Jinja. Run pkg.refresh_db to pull
the latest software definitions from the master.
.. note::
This function does not ask a master for an sls file to render. Instead
it directly processes the file specified in `name`
Args:
name str: The name/path of the package you want to view. This can be the
full path to a file on the minion file system or a file on the local
minion cache.
saltenv str: The default environment is ``base``
Returns:
dict: Returns a dictionary containing the rendered data structure
.. note::
To use a file from the minion cache start from the local winrepo root
(``C:\salt\var\cache\salt\minion\files\base\win\repo-ng``). If you have
``.sls`` files organized in subdirectories you'll have to denote them
with ``.``. For example, if you have a ``test`` directory in the winrepo
root with a ``gvim.sls`` file inside, would target that file like so:
``test.gvim``. Directories can be targeted as well as long as they
contain an ``init.sls`` inside. For example, if you have a ``node``
directory with an ``init.sls`` inside, target that like so: ``node``.
CLI Example:
.. code-block:: bash
salt '*' winrepo.show_sls gvim
salt '*' winrepo.show_sls test.npp
salt '*' winrepo.show_sls C:\test\gvim.sls
"""
# Passed a filename
if os.path.exists(name):
sls_file = name
# Use a winrepo path
else:
# Get the location of the local repo
repo = _get_local_repo_dir(saltenv)
# Add the sls file name to the path
repo = repo.split("\\")
definition = name.split(".")
repo.extend(definition)
# Check for the sls file by name
sls_file = "{}.sls".format(os.sep.join(repo))
if not os.path.exists(sls_file):
# Maybe it's a directory with an init.sls
sls_file = "{}\\init.sls".format(os.sep.join(repo))
if not os.path.exists(sls_file):
# It's neither, return
return "Software definition {} not found".format(name)
# Load the renderer
renderers = salt.loader.render(__opts__, __salt__)
config = {}
# Run the file through the renderer
try:
config = salt.template.compile_template(
sls_file,
renderers,
__opts__["renderer"],
__opts__["renderer_blacklist"],
__opts__["renderer_whitelist"],
)
# Return the error if any
except SaltRenderError as exc:
log.debug("Failed to compile %s.", sls_file)
log.debug("Error: %s.", exc)
config["Message"] = "Failed to compile {}".format(sls_file)
config["Error"] = "{}".format(exc)
return config | [
697,
4687
] |
def METHOD_NAME(pd_series: pd.Series) -> pd.Series:
"""
>>> import datetime
>>> d = datetime.datetime
>>> date_index1 = [d(2000,1,1,23),d(2000,1,2,23),d(2000,1,3,23)]
>>> s1 = pd.Series([0,5,6], index=date_index1)
>>> replace_all_zeros_with_nan(s1)
2000-01-01 23:00:00 NaN
2000-01-02 23:00:00 5.0
2000-01-03 23:00:00 6.0
dtype: float64
"""
copy_pd_series = copy(pd_series)
copy_pd_series[copy_pd_series == 0.0] = np.nan
if all(copy_pd_series.isna()):
copy_pd_series[:] = np.nan
return copy_pd_series | [
369,
75,
3523,
41,
4082
] |
def METHOD_NAME(cls):
cls.pse = PeriodicTable() | [
0,
1,
2
] |
def METHOD_NAME(
from_: Union[str, bytes], salt: Union[str, bytes], init_code: Union[str, bytes]
) -> ChecksumAddress:
"""
.. deprecated:: use mk_contract_address_2
:param from_:
:param salt:
:param init_code:
:return:
"""
warnings.warn(
"`generate_address_2` is deprecated, use `mk_contract_address_2`",
DeprecationWarning,
)
return mk_contract_address_2(from_, salt, init_code) | [
567,
85,
988
] |
def METHOD_NAME(self, data, w=1.0, axis=-1):
"""
Evaluate a numerical volume integral of the given data
on this grid.
data: Data to integrate.
w: Optional weighting function.
axis: Axis to integrate over.
"""
return (self.VpVol*self.dr * w * data).sum(axis) | [
4071
] |
def METHOD_NAME(self) -> None:
self.assertEqual(1000, next_int()) | [
9,
6690,
243,
962
] |
def METHOD_NAME(self):
# init and place scrollbar
self.vert_scrollbar = SafeScrollbar(self, orient=tk.VERTICAL)
self.vert_scrollbar.grid(row=0, column=1, sticky=tk.NSEW)
# init and place tree
self.tree = ttk.Treeview(self, yscrollcommand=self.vert_scrollbar.set)
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
# set single-cell frame
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
# init tree events
self.tree.bind("<<TreeviewSelect>>", self._on_select, True)
self.tree.bind("<Map>", self._update_frame_contents, True)
# configure the only tree column
self.tree.column("#0", anchor=tk.W, stretch=True)
# self.tree.heading('#0', text='Item (type @ line)', anchor=tk.W)
self.tree["show"] = ("tree",)
self._class_img = get_workbench().get_image("outline-class")
self._method_img = get_workbench().get_image("outline-method") | [
176,
1551
] |
def METHOD_NAME(n, admin_client, entry_name=None):
# We need an entry in the schedule to create TRs for
try:
entry = ScheduleEntry.objects.get(name=entry_name)
except Exception:
test_entry = TEST_SCHEDULE_ENTRY
if entry_name is not None:
test_entry["name"] = entry_name
rjson = post_schedule(admin_client, test_entry)
entry_name = rjson["name"]
entry = ScheduleEntry.objects.get(name=entry_name)
for i in range(n):
started = timezone.now()
tr = TaskResult(
schedule_entry=entry,
task_id=i + 1,
started=started,
finished=started + ONE_MICROSECOND,
duration=ONE_MICROSECOND,
status="success",
detail="",
)
tr.max_disk_usage = TEST_MAX_DISK_USAGE
tr.save()
return entry_name | [
129,
758,
51
] |
def METHOD_NAME(self, config_dict):
"""Given a configuration dictionary, make any modifications required
by the driver.
The default behavior is to make no changes.
This method gives a driver the opportunity to modify configuration
settings that affect its performance. For example, if a driver can
support hardware archive record generation, but software archive record
generation is preferred, the driver can change that parameter using
this method.
"""
pass | [
2444,
200
] |
def METHOD_NAME(self):
"""
Check if rpmdb binary was found in root_dir to indicate
that the rpm system is present.
"""
rpm_bin = Path.which(
'rpmdb', root_dir=self.root_dir, access_mode=os.X_OK
)
if not rpm_bin:
return False
return True | [
220,
3466
] |
def METHOD_NAME(self):
audit_info = AWS_Audit_Info(
session_config=None,
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
region_name=AWS_REGION,
),
audited_account=AWS_ACCOUNT_NUMBER,
audited_account_arn=AWS_ACCOUNT_ARN,
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=AWS_REGION,
credentials=None,
assumed_role_info=None,
audited_regions=None,
organizations_metadata=None,
audit_resources=None,
mfa_enabled=False,
audit_metadata=Audit_Metadata(
services_scanned=0,
expected_checks=[],
completed_checks=0,
audit_progress=0,
),
)
return audit_info | [
0,
4331,
1422,
100
] |
def METHOD_NAME(self):
cloudfront_client = mock.MagicMock
cloudfront_client.distributions = {
"DISTRIBUTION_ID": Distribution(
arn=DISTRIBUTION_ARN,
id=DISTRIBUTION_ID,
region=REGION,
origins=[],
default_cache_config=DefaultCacheConfigBehaviour(
realtime_log_config_arn="",
viewer_protocol_policy=ViewerProtocolPolicy.https_only,
field_level_encryption_id="",
),
)
}
with mock.patch(
"prowler.providers.aws.services.cloudfront.cloudfront_service.CloudFront",
new=cloudfront_client,
):
# Test Check
from prowler.providers.aws.services.cloudfront.cloudfront_distributions_field_level_encryption_enabled.cloudfront_distributions_field_level_encryption_enabled import (
cloudfront_distributions_field_level_encryption_enabled,
)
check = cloudfront_distributions_field_level_encryption_enabled()
result = check.execute()
assert len(result) == 1
assert result[0].region == REGION
assert result[0].resource_arn == DISTRIBUTION_ARN
assert result[0].resource_id == DISTRIBUTION_ID
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"CloudFront Distribution {DISTRIBUTION_ID} has Field Level Encryption disabled."
)
assert result[0].resource_tags == [] | [
9,
206,
664,
101,
33,
326,
1295
] |
def METHOD_NAME(index: int, register: int) -> qulacs_core.QuantumGate_CPTP: ... | [
479
] |
def METHOD_NAME():
"""Override plt.show to show_matplotlib method
"""
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
matplotlib.pyplot.show=show_matplotlib
except Exception as e:
print(e)
return | [
102,
17124,
697
] |
def METHOD_NAME(token: str,
update_access_time: bool = True
) -> Optional[model.User]:
data = decode(token)
if not data:
return None
# do preprocessing in reverse order, allowing onion-like "unwrapping" of
# the data, added during postprocessing, when token was
# created. `Interface._reverse_iteration_order` cannot be used here,
# because all other methods of IApiToken should be executed in a normal
# order and only `IApiToken.preprocess_api_token` must be different.
for plugin in reversed(list(_get_plugins())):
data = plugin.preprocess_api_token(data)
if not data or u"jti" not in data:
return None
token_obj = model.ApiToken.get(data[u"jti"])
if not token_obj:
return None
if update_access_time:
token_obj.touch(True)
return token_obj.owner | [
19,
21,
280,
466
] |
def METHOD_NAME(self, model, inputs, return_outputs=False):
return model(
input_ids=inputs["input_ids"],
attention_mask=torch.ones_like(inputs["input_ids"]).bool(),
labels=inputs["input_ids"],
).loss | [
226,
1572
] |
def METHOD_NAME(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',) | [
9,
14703,
130,
2777
] |
def METHOD_NAME(cid, classes):
if len(classes) == 0:
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
try:
classes[obj.cid] = obj
except AttributeError:
pass
if cid in classes:
return classes[cid]
else:
return None | [
1155
] |
def METHOD_NAME(self, user, save=True):
# Gives `user` unrestricted model-level access to everything listed in
# `auth_permission`. Without this, actions
# on individual instances are immediately denied and object-level permissions
# are never considered.
if user.is_anonymous:
user = User.objects.get(id=settings.ANONYMOUS_USER_ID)
user.user_permissions.set(Permission.objects.all())
if save:
user.save() | [
238,
804,
24,
21
] |
def METHOD_NAME(args, classnames, template, predictor):
with torch.no_grad():
clip_weights = []
for classname in classnames:
classname = classname.replace('_', ' ')
text = {'text': [t.format(classname) for t in template]}
class_embeddings = extract_embedding(args, text, predictor, ["text"])
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
clip_weights.append(class_embedding)
clip_weights = torch.stack(clip_weights, dim=1).cuda()
return clip_weights | [
567,
4226,
733
] |
def METHOD_NAME(path):
if not path:
return {}
with open(path, 'r') as f:
obs_labels = [line.rstrip() for line in f]
return {c: i for i, c in enumerate(obs_labels)} | [
557,
1834,
636,
553
] |
def METHOD_NAME(self):
for record in self:
if record.zip and 20 < len(record.zip):
raise exceptions.ValidationError(
_(
"TicketBAI Invoice %(name)s:\n"
"Customer %(customer)s ZIP Code %(zip)s longer than "
"expected. "
"Should be 20 characters max.!"
)
% {
"name": record.tbai_invoice_id.name,
"customer": record.name,
"zip": record.zip,
}
) | [
250,
1426
] |
def METHOD_NAME(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.XPATH, "//title")
with pytest.raises(WebDriverException):
element.click() | [
9,
427,
1471,
3589,
442,
1646,
4069
] |
def METHOD_NAME():
return {genesis: {}} | [
111,
467,
468
] |
def METHOD_NAME(config: ConfigRoot):
seednode_config = config.seednode_config_folder()
seednode_config_file = seednode_config / 'p2p.toml'
data = utils.read_toml_file(seednode_config_file)
p2p_toml.patch_for_seednode(data, config)
utils.write_toml_file(seednode_config_file, data) | [
1575,
16373,
4172,
200
] |
def METHOD_NAME(shape=_shape_3d_default(), affine=_affine_eye()):
"""Return a ones-filled 3D Nifti1Image (identity affine).
Mostly used for set up in other fixtures in other testing modules.
"""
return _img_ones(shape, affine) | [
2029,
1529,
11650
] |
def METHOD_NAME():
filesystem_id = random_chars(96) if random_bool() else None
params = {
"filesystem_id": filesystem_id,
"uuid": str(uuid.uuid4()),
"journalist_designation": random_chars(50),
"flagged": bool_or_none(),
"last_updated": random_datetime(nullable=True),
"pending": bool_or_none(),
"interaction_count": random.randint(0, 1000),
}
sql = """INSERT INTO sources (filesystem_id, uuid,
journalist_designation, flagged, last_updated, pending,
interaction_count)
VALUES (:filesystem_id, :uuid, :journalist_designation,
:flagged, :last_updated, :pending, :interaction_count)
"""
db.engine.execute(text(sql), **params) | [
238,
1458
] |
def METHOD_NAME(lvl, msg, n=1, *, name=None):
"""
Log once per n times.
Args:
lvl (int): the logging level
msg (str):
n (int):
name (str): name of the logger to use. Will use the caller's module by default.
"""
caller_module, key = _find_caller()
_LOG_COUNTER[key] += 1
if n == 1 or _LOG_COUNTER[key] % n == 1:
logging.getLogger(name or caller_module).log(lvl, msg) | [
390,
292,
293
] |
def METHOD_NAME(self: Parser, node: doc.Expr) -> None:
pass | [
72,
716,
125,
559
] |
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available Storage Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_04_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-04-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245
] |
def METHOD_NAME(tmpdir):
git_clone("https://github.com/python-poetry/poetry.git", tmpdir)
run_isort((str(tmpdir), "--skip", "tests")) | [
9,
2463
] |
async def METHOD_NAME(
decoy: Decoy,
mock_run_controller: RunController,
mock_maintenance_engine_store: MaintenanceEngineStore,
) -> None:
"""It should clear an existing maintenance run before issuing play action."""
run_id = "some-run-id"
action_id = "some-action-id"
created_at = datetime(year=2021, month=1, day=1)
action_type = RunActionType.PLAY
request_body = RequestModel(data=RunActionCreate(actionType=action_type))
expected_result = RunAction(
id="some-action-id",
createdAt=created_at,
actionType=RunActionType.PLAY,
)
decoy.when(mock_maintenance_engine_store.current_run_id).then_return("some-id")
decoy.when(
mock_run_controller.create_action(
action_id=action_id,
action_type=action_type,
created_at=created_at,
)
).then_return(expected_result)
result = await create_run_action(
runId=run_id,
request_body=request_body,
run_controller=mock_run_controller,
action_id=action_id,
created_at=created_at,
maintenance_engine_store=mock_maintenance_engine_store,
)
decoy.verify(await mock_maintenance_engine_store.clear(), times=1)
assert result.content.data == expected_result
assert result.status_code == 201 | [
9,
64,
1006,
11197,
4107,
22
] |
def METHOD_NAME(self, v, nbit: int = 14):
r"""Quantize value using floor."""
return np.floor(v * (2**nbit)) | [
16582
] |
def METHOD_NAME(self):
content = "foo.c:1;test;zkouška sirén"
store = self.parse_store(content.encode("utf-8"))
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
store = self.parse_store(content.encode("utf-8"), encoding="utf-8")
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
store = self.parse_store(content.encode("iso-8859-2"), encoding="iso-8859-2")
assert len(store.units) == 1
assert store.units[0].source == "test"
assert store.units[0].target == "zkouška sirén"
with pytest.raises(UnicodeDecodeError):
store = self.parse_store(content.encode("iso-8859-2"), encoding="utf-8") | [
9,
2300
] |
def METHOD_NAME(config_instance: config.Config):
METHOD_NAME = ansible_playbook.AnsiblePlaybook("playbook", config_instance)
return METHOD_NAME | [
89
] |
def METHOD_NAME(self, loops):
# Derived from here, taking some shortcuts: https://github.com/dmlc/tvm/blob/master/topi/tests/python/test_topi_conv2d_nchw.py
import topi
from topi.util import get_const_tuple
import tvm
from plaidbench import core
device = self.params.backend_opts['tvm_driver']
ctx = tvm.context(device, 0)
with tvm.target.create(device):
I, W, B, i_np, w_np, b_np = self.create_dataset_tvm()
O = topi.nn.dense_default(I, W, B)
t_sched = topi.cuda.dense.schedule_dense([O])
i = tvm.nd.array(i_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(b_np, ctx)
with tvm.build_config(auto_unroll_max_step=1400, unroll_explicit=device != 'cuda'):
op = tvm.build(t_sched, [I, W, B], device, name=self.get_key())
sw = core.StopWatch(False)
sw.start()
for _ in range(loops):
op(i, w, b)
ctx.sync()
sw.stop()
return sw.elapsed() | [
22,
578,
7816
] |
def METHOD_NAME(self):
"""
Text representation of a scatter plot graphics primitive.
EXAMPLES::
sage: import numpy
sage: from sage.plot.scatter_plot import ScatterPlot
sage: ScatterPlot(numpy.array([0,1,2]), numpy.array([3.5,2,5.1]), {})
Scatter plot graphics primitive on 3 data points
"""
return 'Scatter plot graphics primitive on %s data points' % len(self.xdata) | [
92
] |
def METHOD_NAME(self, position="const Vector&", gridLevel="const int"):
"Find the GridCellIndex for the given position on the given level"
return "GridCellIndexType" | [
753,
118,
-1
] |
def METHOD_NAME(model: SomeScheme):
logger.info("Should not get here, will fail on body validation") | [
3027,
3028,
442
] |
def METHOD_NAME(upgrader, award_2):
award_2['schema_version'] = '6'
award_2['milestones'] = [
{'assay_term_name': 'single-nuclei ATAC-seq'},
{'assay_term_name': 'HiC'},
]
value = upgrader.upgrade('award', award_2, target_version='7')
assert value['schema_version'] == '7'
TestCase().assertListEqual(
sorted(value['milestones'], key=lambda x: x['assay_term_name']),
sorted(
[
{'assay_term_name': 'single-nucleus ATAC-seq'},
{'assay_term_name': 'HiC'}
],
key=lambda x: x['assay_term_name']
)
) | [
9,
2585,
738,
8059
] |
def METHOD_NAME(self):
"""
Retrieves the speed of fan in revolutions per minute (RPM)
Returns:
An integer, speed of the fan in RPM
"""
attr_path = HWMON_DIR + self.fan_rpm_attr
attr_rv = self.__get_attr_value(attr_path)
if (attr_rv != 'ERR'):
return int(float(attr_rv))
else:
return False | [
19,
1942,
3466
] |
def METHOD_NAME(self):
return self._main_ids | [
307,
308
] |
def METHOD_NAME(self, conn, statement, obfuscated_statement, query_signature):
null_parameter = ','.join(
'null' for _ in range(self._get_number_of_parameters_for_prepared_statement(conn, query_signature))
)
execute_prepared_statement_query = EXECUTE_PREPARED_STATEMENT_QUERY.format(
prepared_statement=query_signature, generic_values=null_parameter
)
try:
return self._execute_query_and_fetch_rows(
conn,
EXPLAIN_QUERY.format(
explain_function=self._config.statement_samples_config.get(
'explain_function', 'datadog.explain_statement'
),
statement=execute_prepared_statement_query,
),
)
except Exception as e:
logged_statement = obfuscated_statement
if self._config.log_unobfuscated_plans:
logged_statement = statement
logger.warning(
'Failed to explain parameterized statement(%s)=[%s] | err=[%s]',
query_signature,
logged_statement,
e,
)
return None | [
6117,
2246,
925
] |
def METHOD_NAME(self):
ownership = OwnershipProcessor('ownership')
got = ownership.process_source_zone(zone.copy())
self.assertEqual(
[
'',
'*',
'_owner.a',
'_owner.a._wildcard',
'_owner.a.the-a',
'_owner.aaaa.the-aaaa',
'_owner.txt.the-txt',
'the-a',
'the-aaaa',
'the-txt',
],
sorted([r.name for r in got.records]),
)
found = False
for record in got.records:
if record.name.startswith(ownership.txt_name):
self.assertEqual([ownership.txt_value], record.values)
# test _is_ownership while we're in here
self.assertTrue(ownership._is_ownership(record))
found = True
else:
self.assertFalse(ownership._is_ownership(record))
self.assertTrue(found) | [
9,
356,
1458,
2456
] |
def METHOD_NAME(
self, remote_cmd: str = "", remote_file: Optional[str] = None
) -> int:
"""Get the file size of the remote file."""
return self._remote_file_size_unix(
remote_cmd=remote_cmd, remote_file=remote_file
) | [
2437,
171,
1318
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
"""
return pulumi.get(self, "subnet_resource_id") | [
1782,
191,
147
] |
def METHOD_NAME(self):
symbols = symtable.symtable("42", "?", "single") | [
9,
97
] |
f METHOD_NAME(self): | [
9,
8568,
555
] |
def METHOD_NAME(subscription, query_dataset: Dataset):
entity_key: EntityKey = map_aggregate_to_entity_key(
query_dataset, subscription.snuba_query.aggregate
)
_delete_from_snuba(
query_dataset,
subscription.subscription_id,
entity_key,
) | [
34,
835,
280,
2519
] |
def METHOD_NAME(cls, *args, **kwargs):
_get_message(wrapped_obj, category, action)
# if class __new__ is empty
if new_class is object.__new__:
return new_class(cls)
return new_class(cls, *args, **kwargs) | [
2354,
3847
] |
def METHOD_NAME(self, request, course_id, *args, **kwargs):
"""
Checks for course author access for the given course by the requesting user.
Calls the view function if has access, otherwise raises a 403.
"""
course_key = CourseKey.from_string(course_id)
if not has_course_author_access(request.user, course_key):
raise DeveloperErrorViewMixin.api_error(
status_code=status.HTTP_403_FORBIDDEN,
developer_message='The requesting user does not have course author permissions.',
error_code='user_permissions',
)
return view(self, request, course_key, *args, **kwargs) | [
291,
1179
] |
def METHOD_NAME(file_name, extension=None, **kwargs) -> pd.DataFrame:
if extension is None:
_, extension = split_ext(file_name)
with open(file_name, "rb") as file_pointer:
file_bytes: bytes = file_pointer.read()
if len(file_bytes) == 0:
# empty file means empty data frame
return pd.DataFrame()
elif extension in [".xls", ".xlsx"]:
bytes_io = io.BytesIO(file_bytes)
return pd.read_excel(bytes_io, **kwargs)
elif extension == ".ods":
bytes_io = io.BytesIO(file_bytes)
return pd.read_excel(bytes_io, engine="odf", **kwargs)
encoding = chardet.detect(file_bytes)["encoding"]
if encoding is None:
encoding = "utf8"
kwargs["encoding"] = encoding
file_str = file_bytes.decode(encoding)
if extension == ".json":
string_io = io.StringIO(file_str)
data_frame = pd.read_json(string_io, typ="frame", **kwargs)
assert isinstance(data_frame, pd.DataFrame)
return data_frame
cleaned_file_str = re.sub(
r"[^\x00-\x7f]", "", file_str
) # remove unicode characters, e.g. BOM
file_lines = cleaned_file_str.splitlines()
file_lines = [s for s in file_lines if len(s.strip()) > 0]
comment_prefix: str | None = None
comment_m = re.match(
r"^(?P<prefix>[£$%^#/\\]+)", file_lines[0]
) # detect prefix only at start of file
if comment_m is not None:
comment_prefix = comment_m.group("prefix")
if comment_prefix is not None:
file_lines = [s for s in file_lines if not s.startswith(comment_prefix)]
cleaned_file_str = "\n".join(file_lines)
dialect = None
try:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(cleaned_file_str)
except csv.Error:
pass
if extension == ".tsv":
kwargs["sep"] = "\t"
elif extension == ".csv":
kwargs["sep"] = "[,;]"
if len(file_lines) == 0:
# empty data frame
return pd.DataFrame()
elif len(file_lines) == 1 and str_is_convertible_to_float(file_lines[0]):
# just a number in a file
return pd.DataFrame([float(file_lines[0])])
elif len(file_lines) == 2 and str_is_convertible_to_float(file_lines[1]):
# a number with a column name in a file
return pd.DataFrame(
[float(file_lines[1])],
columns=[file_lines[0].strip()],
)
if isinstance(kwargs.get("sep"), str):
if kwargs["sep"] not in cleaned_file_str:
del kwargs["sep"]
if kwargs.get("sep") is None:
if (
dialect is not None
and dialect.delimiter != " " # single space should be \s+
and re.match(r"[a-zA-Z0-9\.]", dialect.delimiter) is None # ignore letters
):
kwargs["sep"] = dialect.delimiter
else:
kwargs["sep"] = r"\s+"
if len(file_lines) < 2:
kwargs["header"] = None
else:
scores = [
mean(
map(
float,
map(
str_is_convertible_to_float, re.split(kwargs["sep"], file_line)
),
)
)
for file_line in file_lines[:10]
]
if min(scores[1:]) > 0.0:
# check if there are at least some float values, without
# which this heuristic would be pointless
if scores[0] >= min(scores[1:]):
# the first line does not have a lower amount of floats
# than subsequent lines
kwargs["header"] = None
string_io = io.StringIO(cleaned_file_str)
data_frame = pd.read_csv(string_io, engine="python", **kwargs)
assert isinstance(data_frame, pd.DataFrame)
# data_frame.reset_index(inplace=True) # restore detected index_col
def strip_if_str(s):
if isinstance(s, str):
return s.strip()
return s
data_frame.rename(columns=strip_if_str, inplace=True)
if data_frame.columns[0] == "Unnamed: 0":
# detect index_col that pandas may have missed
if not any(
isinstance(s, float)
or (isinstance(s, str) and str_is_convertible_to_float(s))
for s in data_frame["Unnamed: 0"]
):
data_frame.set_index("Unnamed: 0", inplace=True)
data_frame.index.rename(None, inplace=True)
return data_frame | [
203,
6169
] |
def METHOD_NAME(client, some_blog, snapshot):
data = {
"data": {
"type": "entries",
"attributes": {
"blog": some_blog.pk,
"body-text": "body",
"headline": "headline",
},
}
}
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
assert snapshot == perform_error_test(client, data) | [
9,
865,
33,
343,
309,
168
] |
def METHOD_NAME(self, filepath):
""" Sets the media file to be played by the widget.
Args:
filepath (`pathlib.Path`): The path to the media file path
"""
self.anim = GdkPixbuf.PixbufAnimation.new_from_file(str(filepath))
self.base_size = (self.anim.get_width(), self.anim.get_height())
self.anim_iter = self.anim.get_iter(None)
self.set_transform()
self.advance_gif() | [
0,
171
] |
def METHOD_NAME(self):
_test_llama()
_test_chatglm()
_test_bloom() | [
9,
578,
557,
411
] |
def METHOD_NAME( z, r ):
dens = np.ones_like( z )
# Make the density smooth at rmax
dens = np.where( r > rmax-smooth_r,
np.cos(0.5*np.pi*(r-smooth_r)/smooth_r)**2, dens)
# Make the density 0 below p_zmin
dens = np.where( z < p_zmin, 0., dens )
# Make a linear ramp
dens = np.where( (z>=p_zmin) & (z<p_zmin+ramp),
(z-p_zmin)/ramp*dens, dens )
return( dens ) | [
13565,
717,
11833
] |
def METHOD_NAME(organisation_id: int, campaign_id: int):
"""Creates new campaign from DTO"""
# Check if campaign exists
CampaignService.get_campaign(campaign_id)
# Check if organisation exists
OrganisationService.get_organisation_by_id(organisation_id)
statement = campaign_organisations.insert().values(
campaign_id=campaign_id, organisation_id=organisation_id
)
db.session.execute(statement)
db.session.commit()
new_campaigns = CampaignService.get_organisation_campaigns_as_dto(
organisation_id
)
return new_campaigns | [
129,
8852,
4074
] |
def METHOD_NAME(
chain: SupportedBlockchain,
timeout: int = NODE_CONNECTION_TIMEOUT,
node_names: Optional[Sequence[NodeName]] = None,
) -> DictNodeNameNodeAttributes:
"""Attempt to connect to either a default sequence of reliable nodes for
testing (e.g. Parity ones) or to a custom ones (via `node_names` param).
Finally return the available node attributes map.
NB: prioritising nodes by block_number is disabled.
"""
def attempt_connect_node(node: NodeName) -> tuple[NodeName, Optional[NodeNameAttributes]]:
try:
node_interface = SubstrateInterface(
url=node.endpoint(),
type_registry_preset='kusama',
use_remote_preset=True,
)
except (requests.exceptions.RequestException, SubstrateRequestException) as e:
message = (
f'Substrate tests failed to connect to {node} node at '
f'endpoint: {node.endpoint()}. Connection error: {e!s}.',
)
log.error(message)
return node, None
log.info(f'Substrate tests connected to {node} node at endpoint: {node_interface.url}.')
node_attributes = NodeNameAttributes(
node_interface=node_interface,
weight_block=BlockNumber(0),
)
return node, node_attributes
if chain == SupportedBlockchain.KUSAMA:
node_names = node_names or KUSAMA_TEST_NODES
else:
raise AssertionError(f'Unexpected substrate chain type: {chain} at test')
greenlets = [gevent.spawn(attempt_connect_node, node) for node in node_names]
jobs = gevent.joinall(greenlets, timeout=timeout)
# Populate available node attributes map
available_node_attributes_map: DictNodeNameNodeAttributes = {}
for job in jobs:
node, node_attributes = job.value
if node_attributes:
available_node_attributes_map[node] = node_attributes
connected_nodes = set(available_node_attributes_map.keys())
not_connected_nodes = set(node_names) - connected_nodes
if not_connected_nodes:
log.info(
f'Substrate {chain} tests failed to connect to nodes: '
f'{",".join([str(node) for node in not_connected_nodes])} ',
)
return available_node_attributes_map | [
3142,
707,
9,
480
] |
def METHOD_NAME(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.options.shared:
del self.options.fPIC | [
111
] |
def METHOD_NAME(self):
query = dns.message.make_query('example', 'NS', want_dnssec=True)
query.flags |= dns.flags.AD
expectedNS = dns.rrset.from_text('example.', 0, 'IN', 'NS', 'ns1.example.', 'ns2.example.')
res = self.sendUDPQuery(query)
self.assertMessageIsAuthenticated(res)
self.assertRRsetInAnswer(res, expectedNS) | [
9,
12017
] |
def METHOD_NAME(device, simulation_factory, lattice_snapshot_factory):
"""Test that Clusters produces finite size clusters."""
if (isinstance(device, hoomd.device.GPU)
and hoomd.version.gpu_platform == 'ROCm'):
pytest.xfail("Clusters fails on ROCm (#1605)")
sim = simulation_factory(
lattice_snapshot_factory(particle_types=['A', 'B'],
dimensions=3,
a=4,
n=7,
r=0.1))
mc = hoomd.hpmc.integrate.Sphere(default_d=0.1, default_a=0.1)
mc.shape['A'] = dict(diameter=1.1)
mc.shape['B'] = dict(diameter=1.3)
sim.operations.integrator = mc
cl = hoomd.hpmc.update.Clusters(trigger=hoomd.trigger.Periodic(5),
pivot_move_probability=0.5)
sim.operations.updaters.append(cl)
sim.run(10)
avg = cl.avg_cluster_size
assert avg > 0 | [
9,
11261,
687
] |
def METHOD_NAME(self):
# Generate a noisy signal
fs = 2400
T = 6
nsamples = T * fs
t = np.linspace(0, T, nsamples, endpoint=False)
a = 0.02
f0 = 300
x = 0.5 * np.sin(2 * np.pi * f0 * t)
x += 0.25 * np.sin(2 * np.pi * 2 * f0 * t)
x += 0.25 * np.sin(2 * np.pi * 3 * f0 * t)
#data = x.astype(np.complex64)
data = np.sin(2 * np.pi * f0 * t).astype(np.complex64)
print("Len data", len(data))
a, b = self.narrowband_iir(f0, 100, fs)
s = a.sum() + b.sum()
#a /= s
#b /= s
print(a, b)
filtered_data = signal_functions.iir_filter(a, b, data)
#plt.plot(data, label='Noisy signal')
plt.plot(np.fft.fft(filtered_data), label='Filtered signal (%g Hz)' % f0)
plt.legend(loc='upper left')
plt.show() | [
9,
-1,
3412
] |
def METHOD_NAME(upgrader, library_linkers):
value = upgrader.upgrade('library', library_linkers, current_version='15', target_version='16')
assert value['schema_version'] == '16'
assert len(value['linkers']) == 4
assert {
'type': 'linker a top',
'sequence': 'GGCCGCGATATCTTATCCAAC'
} in value['linkers']
assert {
'type': 'linker a bottom',
'sequence': 'GGCCGCGATATCTTATCCAAC'
} in value['linkers']
assert {
'type': 'linker b top',
'sequence': 'GTTGGATAAGATATCGC'
} in value['linkers']
assert {
'type': 'linker b bottom',
'sequence': 'GTTGGATAAGATATCGC'
} in value['linkers']
assert value['notes'] == 'The linkers of this library were converted as linker a and linker b have been deprecated as linkers type.' | [
9,
3106,
738,
3473,
24,
7635
] |
def METHOD_NAME(self, rtype=None, name=None, content=None):
query = {"domain": self.domain_id}
payload = self._get("/dnsListRecords", query)
records = []
for record in payload.find("reply").findall("resource_record"):
processed_record = {
"type": record.find("type").text,
"name": record.find("host").text,
"ttl": record.find("ttl").text,
"content": record.find("value").text,
"id": record.find("record_id").text,
}
records.append(processed_record)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record for record in records if record["name"] == self._full_name(name)
]
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records | [
245,
2530
] |
def METHOD_NAME():
pd_net = pn.kb_extrem_dorfnetz(p_load_mw=.002, q_load_mvar=.001)
assert abs(pd_net.line.length_km.sum() - 3.088) < 0.00000001
assert abs(pd_net.load.p_mw.sum() - .116) < 0.00000001
assert abs(pd_net.load.q_mvar.sum() - 0.058) < 0.00000001
assert len(pd_net.bus.index) == 118
assert len(pd_net.line.index) == 116
assert len(pd_net.trafo.index) == 1
pp.runpp(pd_net)
assert pd_net.converged | [
9,
986,
14685,
15776
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.