text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, client_id):
"""Default method for ``AuthorizationServer.query_client``. Developers MAY
rewrite this function to meet their own needs.
"""
try:
return self.client_model.objects.get(client_id=client_id)
except self.client_model.DoesNotExist:
return None | [
539,
340
] |
def METHOD_NAME(hdv2_stack):
# Test energy bands
ebands = hdv2_stack.energy_bands([0.1, 60] * u.MeV, 0.1 * u.MeV, dx=1 * u.um)
# Expected energy bands, in MeV (only in active layers)
expected = np.array([[3.5, 3.8], [4.6, 4.9], [5.6, 5.7], [6.4, 6.5], [7.1, 7.2]])
assert np.allclose(ebands.to(u.MeV).value[:5, :], expected, atol=0.15) | [
9,
10327,
1501,
5121,
1620,
923
] |
def METHOD_NAME(self, layout, ctx):
if ctx.type == 'ID':
if ctx.id is not None and isinstance(ctx.id, bpy.types.Object):
layout.label(text=ctx.id.name, icon='OBJECT_DATA')
else:
layout.label(text="Invalid id")
elif ctx.type == 'MODIFIER':
layout.label(text=ctx.modifier_name, icon='MODIFIER')
elif ctx.type == 'GROUP_NODE':
layout.label(text=ctx.ui_name, icon='NODE')
elif ctx.type == 'SIMULATION_ZONE':
layout.label(text="Simulation Zone")
elif ctx.type == 'REPEAT_ZONE':
layout.label(text="Repeat Zone")
elif ctx.type == 'VIEWER_NODE':
layout.label(text=ctx.ui_name) | [
1100,
6169,
198
] |
def METHOD_NAME(image):
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) | [
197,
4091,
24,
17692
] |
def METHOD_NAME(x: T) -> T:
return x | [
2989
] |
def METHOD_NAME(self, params):
export_format = params.get("format")
items = operator.itemgetter(
"model", "fields", "ids", "domain", "import_compat", "context", "user_ids"
)(params)
(model_name, fields_name, ids, domain, import_compat, context, user_ids) = items
model = self.env[model_name].with_context(
import_compat=import_compat, **context
)
records = model.browse(ids) or model.search(
domain, offset=0, limit=False, order=False
)
if not model._is_an_ordinary_table():
fields_name = [field for field in fields_name if field["name"] != "id"]
field_names = [f["name"] for f in fields_name]
import_data = records.export_data(field_names).get("datas", [])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val["label"].strip() for val in fields_name]
if export_format == "csv":
csv = CSVExport()
return csv.from_data(columns_headers, import_data)
else:
xls = ExcelExport()
return xls.from_data(columns_headers, import_data) | [
19,
171,
459
] |
def METHOD_NAME():
a = ArrayKey("A")
b = ArrayKey("B")
null_key = ArrayKey("NULL")
source_a = ExampleSourceRandomLocation(a)
source_b = ExampleSourceRandomLocation(b)
pipeline = (source_a, source_b) + MergeProvider() + CustomRandomLocation(null_key)
with build(pipeline):
with pytest.raises(PipelineRequestError):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (200, 20, 20))),
b: ArraySpec(roi=Roi((1000, 100, 100), (220, 22, 22))),
}
)
) | [
9,
4183
] |
def METHOD_NAME(builder, fusedActivationFunction): builder.PrependInt8Slot(0, fusedActivationFunction, 0) | [
238,
6531,
648,
559
] |
def METHOD_NAME(self, config: Mapping[str, Any]) -> List[Stream]:
config = self._validate_and_transform(config)
authenticator = self.get_authenticator(config)
args = {"authenticator": authenticator, "domain": config["domain"], "projects": config["projects"]}
incremental_args = {**args, "start_date": config.get("start_date")}
render_fields = config.get("render_fields", False)
issues_stream = Issues(
**incremental_args,
expand_changelog=config.get("expand_issue_changelog", False),
render_fields=render_fields,
)
issue_fields_stream = IssueFields(**args)
experimental_streams = []
if config.get("enable_experimental_streams", False):
experimental_streams.append(
PullRequests(issues_stream=issues_stream, issue_fields_stream=issue_fields_stream, **incremental_args)
)
return [
ApplicationRoles(**args),
Avatars(**args),
Boards(**args),
BoardIssues(**incremental_args),
Dashboards(**args),
Filters(**args),
FilterSharing(**args),
Groups(**args),
issues_stream,
IssueComments(**incremental_args),
issue_fields_stream,
IssueFieldConfigurations(**args),
IssueCustomFieldContexts(**args),
IssueLinkTypes(**args),
IssueNavigatorSettings(**args),
IssueNotificationSchemes(**args),
IssuePriorities(**args),
IssueProperties(**incremental_args),
IssueRemoteLinks(**incremental_args),
IssueResolutions(**args),
IssueSecuritySchemes(**args),
IssueTypeSchemes(**args),
IssueTypeScreenSchemes(**args),
IssueVotes(**incremental_args),
IssueWatchers(**incremental_args),
IssueWorklogs(**incremental_args),
JiraSettings(**args),
Labels(**args),
Permissions(**args),
PermissionSchemes(**args),
Projects(**args),
ProjectAvatars(**args),
ProjectCategories(**args),
ProjectComponents(**args),
ProjectEmail(**args),
ProjectPermissionSchemes(**args),
ProjectTypes(**args),
ProjectVersions(**args),
Screens(**args),
ScreenTabs(**args),
ScreenTabFields(**args),
ScreenSchemes(**args),
Sprints(**args),
SprintIssues(**incremental_args),
TimeTracking(**args),
Users(**args),
UsersGroupsDetailed(**args),
Workflows(**args),
WorkflowSchemes(**args),
WorkflowStatuses(**args),
WorkflowStatusCategories(**args),
] + experimental_streams | [
1196
] |
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.DomainRegistration/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) | [
56,
245,
710,
377
] |
def METHOD_NAME():
'''
Displaying service bootlist entries as logical device name
'''
cmd = "bootlist -m service -o"
for line in process.system_output(cmd, shell=True,
ignore_status=True).splitlines():
return line.split()[-1] | [
52,
549,
1692,
398
] |
def METHOD_NAME(package, prop):
return [
prop.child_with_tag("DefinedValue").children[0].attributes["type"],
prop.child_with_tag("DefiningValue").children[0].attributes["type"]
] | [
275,
44,
1042,
410,
99
] |
def METHOD_NAME(setup_groups, parameter_type):
"""Situation: LABEL of entity_03 is exactly equal to UUID of entity_01.
Verify that using an ambiguous identifier gives precedence to the UUID interpretation. Appending the special
ambiguity breaker character will force the identifier to be treated as a LABEL.
"""
entity_01, entity_02, entity_03 = setup_groups
identifier = f'{entity_03.label}'
result = parameter_type.convert(identifier, None, None)
assert result.uuid == entity_01.uuid
identifier = f'{entity_03.label}{OrmEntityLoader.label_ambiguity_breaker}'
result = parameter_type.convert(identifier, None, None)
assert result.uuid == entity_03.uuid | [
9,
9624,
636,
4977
] |
METHOD_NAME(self): | [
34
] |
def METHOD_NAME(self, data):
if data.get("start_date", None) is not None and data.get("target_date", None) is not None and data.get("start_date", None) > data.get("target_date", None):
raise serializers.ValidationError("Start date cannot exceed target date")
return data | [
187
] |
def METHOD_NAME(self):
self.preferencesChanged.emit() | [
69,
3958,
1180
] |
def METHOD_NAME(wrk_output):
return str(wrk_output.get('lat_avg')) + separator + str(wrk_output.get('lat_stdev')) + separator + str(
wrk_output.get('lat_max')) + separator + str(wrk_output.get('req_avg')) + separator + str(
wrk_output.get('req_stdev')) + separator + str(wrk_output.get('req_max')) + separator + str(
wrk_output.get('lat_distrib_50%')) + separator + str(wrk_output.get('lat_distrib_75%')) + separator + str(
wrk_output.get('lat_distrib_90%')) + separator + str(wrk_output.get('lat_distrib_99%')) + separator + str(
wrk_output.get('bytes_sec_tot')) + separator + str(wrk_output.get('req_sec_tot')) + separator + str(
wrk_output.get('tot_requests')) + separator + str(wrk_output.get('tot_duration')) + separator + str(wrk_output.get('err_connect')) + separator + str(
wrk_output.get('err_read')) + separator + str(wrk_output.get('err_write')) + separator + str(
wrk_output.get('err_timeout')) | [
-1,
365
] |
def METHOD_NAME(cls, host):
# self.payload = {'image': '', 'shape': []}
new_instance = cls(host)
return new_instance | [
176,
2139
] |
def METHOD_NAME(model_id):
ab = AppBase()
if ab._is_streamlit(model_id):
return "streamlit"
if ab._is_swagger(model_id):
return "swagger"
if ab._is_dash(model_id):
return "dash" | [
991,
44
] |
def METHOD_NAME(root: models.Page, _info: ResolveInfo):
return root.published_at | [
1014,
3973,
153
] |
def METHOD_NAME(arg):
if isinstance(arg, Number):
return True
try:
astensor(arg)
return True
except ValueError:
return False | [
250,
718
] |
def METHOD_NAME(self, url):
url = f'{self.baseurl}/{url}'
header = { 'Authorization' : f'Basic {self.token}', 'Accept' : 'application/json' }
result = self.session.METHOD_NAME(url, headers=header, verify=self.sslcert)
code = result.status_code
result = result.json()
status = result.pop('status')
if code in (202, 204):
result['error'] = None
else:
result['error'] = status
return result | [
34
] |
def METHOD_NAME(df_local, chunk_size, parallel, array_type):
df = df_local
x = df.x.to_numpy()
total = 0
for i1, i2, chunk in df.to_items(['x'], chunk_size=chunk_size, parallel=parallel, array_type=array_type):
np.testing.assert_array_equal(x[i1:i2], chunk[0][1])
total += sum(chunk[0][1])
assert total == x.sum() | [
9,
24,
1768
] |
def METHOD_NAME(self):
"""轮训任务"""
from backend.container_service.clusters import tasks
tasks.ClusterOrNodeTaskPoller.start(
{"model_type": self.__class__.__name__, "pk": self.pk}, tasks.TaskStatusResultHandler
) | [
2510,
758
] |
def METHOD_NAME(self):
self.assertTrue(cops.is_multi(MULTI_SOLN_STK, self.config))
self.assertFalse(cops.is_multi(GENERIC_SOLN_STK, self.config))
self.assertFalse(cops.is_multi(NON_DOCKER_SOLN_STK, self.config)) | [
9,
137,
457
] |
def METHOD_NAME(old_factory, new_factory, source):
return re.sub(
r"""
\b{}\b
""".format(
old_factory
),
r"{}".format(new_factory),
source,
flags=re.VERBOSE,
) | [
1112,
7291
] |
def METHOD_NAME(
self, enforce_valid_values
):
raw = RawDataElement(Tag(0x88880002), None, 4, b"unknown", 0, True, True)
with pytest.raises(KeyError):
DataElement_from_raw(raw) | [
9,
62,
437,
854,
41,
5265,
1205
] |
def METHOD_NAME(self):
return self.mysql_passwd | [
235,
4001,
1563,
21,
8052
] |
def METHOD_NAME():
""" Locate the built-in device storage that user can see via file browser.
Often found at: /sdcard/
This is storage is SHARED, and visible to other apps and the user.
It will remain untouched when your app is uninstalled.
Returns directory path to storage.
WARNING: You need storage permissions to access this storage.
"""
if _android_has_is_removable_func():
sdpath = _get_sdcard_path()
# Apparently this can both return primary (built-in) or
# secondary (removable) external storage depending on the device,
# therefore check that we got what we wanted:
if not Environment.isExternalStorageRemovable(File(sdpath)):
return sdpath
if "EXTERNAL_STORAGE" in os.environ:
return os.environ["EXTERNAL_STORAGE"]
raise RuntimeError(
"unexpectedly failed to determine " +
"primary external storage path"
) | [
1379,
751,
948,
157
] |
def METHOD_NAME(self, spec, prefix):
mkdirp(prefix.bin)
# The list of files to install varies with release...
# ... but skip the spack-build-{env}out.txt files.
files = [x for x in glob.glob("*") if not re.match("^spack-", x)]
for f in files:
METHOD_NAME(f, prefix.bin)
# Set up a helper script to call java on the jar file,
# explicitly codes the path for java and the jar file.
script_sh = join_path(
os.path.dirname(__file__),
"picard_with_parameters.sh" if "+parameters" in spec else "picard.sh",
)
script = prefix.bin.picard
METHOD_NAME(script_sh, script)
set_executable(script)
# Munge the helper script to explicitly point to java and the
# jar file.
java = self.spec["java"].prefix.bin.java
kwargs = {"ignore_absent": False, "backup": False, "string": False}
filter_file("^java", java, script, **kwargs)
filter_file("picard.jar", join_path(prefix.bin, "picard.jar"), script, **kwargs) | [
428
] |
def METHOD_NAME():
"""
Test to stop bluetooth service
"""
mock = MagicMock(return_value="Ok")
with patch.dict(bluez.__salt__, {"service.stop": mock}):
assert bluez.stop() == "Ok" | [
9,
631
] |
def METHOD_NAME():
devices = [InputDevice(fn) for fn in list_devices()]
devices.append(NonUsbDevice(EDevices.MFRC522.name))
devices.append(NonUsbDevice(EDevices.RDM6300.name))
devices.append(NonUsbDevice(EDevices.PN532.name))
return devices | [
19,
165
] |
def METHOD_NAME(x, weight, range, **params):
"""
Compute density
"""
import statsmodels.api as sm
x = np.asarray(x, dtype=float)
not_nan = ~np.isnan(x)
x = x[not_nan]
bw = params["bw"]
kernel = params["kernel"]
n = len(x)
assert isinstance(bw, (str, float)) # type narrowing
if n == 0 or (n == 1 and isinstance(bw, str)):
if n == 1:
warn(
"To compute the density of a group with only one "
"value set the bandwidth manually. e.g `bw=0.1`",
PlotnineWarning,
)
warn(
"Groups with fewer than 2 data points have been removed.",
PlotnineWarning,
)
return pd.DataFrame()
# kde is computed efficiently using fft. But the fft does
# not support weights and is only available with the
# gaussian kernel. When weights are relevant we
# turn off the fft.
if weight is None:
if kernel != "gau":
weight = np.ones(n) / n
else:
weight = np.asarray(weight, dtype=float)
if kernel == "gau" and weight is None:
fft = True
else:
fft = False
if bw == "nrd0":
bw = nrd0(x)
kde = sm.nonparametric.KDEUnivariate(x)
kde.fit(
kernel=kernel,
bw=bw, # type: ignore
fft=fft,
weights=weight,
adjust=params["adjust"],
cut=params["cut"],
gridsize=params["gridsize"],
clip=params["clip"],
)
x2 = np.linspace(range[0], range[1], params["n"])
try:
y = kde.evaluate(x2)
if np.isscalar(y) and np.isnan(y):
raise ValueError("kde.evaluate returned nan")
except ValueError:
y = []
for _x in x2:
result = kde.evaluate(_x)
if isinstance(result, float):
y.append(result)
else:
y.append(result[0])
y = np.asarray(y)
# Evaluations outside the kernel domain return np.nan,
# these values and corresponding x2s are dropped.
# The kernel domain is defined by the values in x, but
# the evaluated values in x2 could have a much wider range.
not_nan = ~np.isnan(y)
x2 = x2[not_nan]
y = y[not_nan]
return pd.DataFrame(
{
"x": x2,
"density": y,
"scaled": y / np.max(y) if len(y) else [],
"count": y * n,
"n": n,
}
) | [
226,
2915
] |
def METHOD_NAME( curl, response, proxy, timeout ):
raise ResponseError( curl, response, proxy, timeout ) | [
241,
721,
168
] |
def METHOD_NAME(
self,
authors_repo: AuthorRepository,
author_id: UUID = Parameter(
title="Author ID",
description="The author to retrieve.",
), | [
19,
2997
] |
def METHOD_NAME(cls, value, info):
field = cls.model_fields[info.field_name]
field_name = field.alias or info.field_name
if field_name in info.context['configured_fields']:
value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field)
else:
value = getattr(defaults, f'instance_{info.field_name}', lambda: value)()
return validation.utils.make_immutable(value) | [
187
] |
def METHOD_NAME(self, request):
return (self.page - 1) * self.limit | [
19,
1540
] |
def METHOD_NAME(s, expected):
assert _simplify_conversion_flag(s) == expected | [
9,
7403,
1719,
584
] |
def METHOD_NAME():
try:
real_teardown()
finally:
patch_obj.stop() | [
531,
481,
2
] |
def METHOD_NAME(self, payload, payload_type):
fuzzer = self.fuzzers.get(payload_type)
if fuzzer:
return fuzzer(payload)
return [] | [
9282,
288
] |
def METHOD_NAME(self):
"""Return if the file should be handled inline.
If not, and unless your application supports other dispositions
than the standard inline and attachment, it should be handled
as an attachment.
"""
return self.disposition in {None, 'inline'} | [
137,
1817
] |
def METHOD_NAME(self):
# Test that when we don't specify a chunk the default is to read all chunks
# Use the single chunk test to generate a network with some data in the default
# (0,0,0) chunk and some data the other test is supposed to ignore in chunk 001
self.test_single_chunk()
# Continue using this network, checking that we pick up on the data in chunk 001
chunk0 = self.ps.load_positions()
# Unloaded the chunks loaded by the other test. The desired behavior is then to
# read all chunks.
self.ps.exclude_chunk(Chunk((0, 0, 0), None))
chunk_all = self.ps.load_positions()
self.assertGreater(len(chunk_all), len(chunk0)) | [
9,
235,
464
] |
def METHOD_NAME(self, data):
if not data:
return
sdata = []
for key, val in data:
if not key:
raise ex.Error("secret key name can not be empty")
if val is None:
raise ex.Error("secret value can not be empty")
val = "crypt:"+base64.urlsafe_b64encode(self.encrypt(val, cluster_name="join", encode=True)).decode()
sdata.append("data.%s=%s" % (key, val))
self.set_multi(sdata)
self.log.info("secret keys '%s' added", ",".join([k for k, v in data]))
# refresh if in use
self.postinstall(key) | [
238,
219
] |
def METHOD_NAME(self):
self.assertEqual(
time.strftime("%y", (2013, 1, 1, 0, 0, 0, 0, 0, 0)), "13") | [
9,
320,
1887,
6033
] |
def METHOD_NAME(self, key):
if isinstance(key, slice):
return self._str_slice(start=key.start, stop=key.stop, step=key.step)
else:
return self._str_get(key) | [
3,
5181
] |
f METHOD_NAME(self): | [
9,
699,
69,
374,
2707,
1471,
69
] |
def METHOD_NAME(self):
self.s["AA"] = "B"
self.s["aa"] = "C"
self.s["BB"] = "D"
self.s["Aa"] = "E"
self.s.write()
self.s.reload()
self.failUnlessEqual(set(self.s["aa"].split()), {"C", "B", "E"}) | [
9,
457,
331
] |
def METHOD_NAME(
port: Optional[int] = None,
socket: Optional[str] = None,
host: str = "localhost",
max_workers: Optional[int] = None,
fixed_server_id: Optional[str] = None,
log_level: str = "INFO",
use_python_environment_entry_point: bool = False,
container_image: Optional[str] = None,
container_context: Optional[str] = None,
location_name: Optional[str] = None,
inject_env_vars_from_instance: bool = False,
startup_timeout: int = 0,
instance_ref=None,
**kwargs,
):
from dagster._grpc import DagsterGrpcServer
from dagster._grpc.proxy_server import DagsterProxyApiServicer
if seven.IS_WINDOWS and port is None:
raise click.UsageError(
"You must pass a valid --port/-p on Windows: --socket/-s not supported."
)
if not (port or socket and not (port and socket)):
raise click.UsageError("You must pass one and only one of --port/-p or --socket/-s.")
setup_interrupt_handlers()
configure_loggers(log_level=log_level.upper())
logger = logging.getLogger("dagster.code_server")
container_image = container_image or os.getenv("DAGSTER_CURRENT_IMAGE")
# in the gRPC api CLI we never load more than one module or python file at a time
module_name = check.opt_str_elem(kwargs, "module_name")
python_file = check.opt_str_elem(kwargs, "python_file")
loadable_target_origin = LoadableTargetOrigin(
executable_path=sys.executable if use_python_environment_entry_point else None,
attribute=kwargs["attribute"],
working_directory=get_working_directory_from_kwargs(kwargs),
module_name=module_name,
python_file=python_file,
package_name=kwargs["package_name"],
)
server_termination_event = threading.Event()
api_servicer = DagsterProxyApiServicer(
loadable_target_origin=loadable_target_origin,
fixed_server_id=fixed_server_id,
container_image=container_image,
container_context=(
json.loads(container_context) if container_context is not None else None
),
inject_env_vars_from_instance=inject_env_vars_from_instance,
location_name=location_name,
log_level=log_level,
startup_timeout=startup_timeout,
instance_ref=deserialize_value(instance_ref, InstanceRef) if instance_ref else None,
server_termination_event=server_termination_event,
logger=logger,
)
server = DagsterGrpcServer(
server_termination_event=server_termination_event,
dagster_api_servicer=api_servicer,
port=port,
socket=socket,
host=host,
max_workers=max_workers,
logger=logger,
)
code_desc = " "
if loadable_target_origin.python_file:
code_desc = f" for file {loadable_target_origin.python_file} "
elif loadable_target_origin.package_name:
code_desc = f" for package {loadable_target_origin.package_name} "
elif loadable_target_origin.module_name:
code_desc = f" for module {loadable_target_origin.module_name} "
server_desc = (
f"Dagster code proxy server{code_desc}on port {port} in process {os.getpid()}"
if port
else f"Dagster code proxy server{code_desc}in process {os.getpid()}"
)
logger.info("Started %s", server_desc)
try:
server.serve()
except KeyboardInterrupt:
# Terminate cleanly on interrupt
logger.info("Code proxy server was interrupted")
finally:
logger.info("Shutting down %s", server_desc) | [
447,
462
] |
def METHOD_NAME(X, n_neighbors):
"""A fast computation of knn indices.
Parameters
----------
X: array of shape (n_samples, n_features)
The input data to compute the k-neighbor indices of.
n_neighbors: int
The number of nearest neighbors to compute for each sample in ``X``.
Returns
-------
knn_indices: array of shape (n_samples, n_neighbors)
The indices on the ``n_neighbors`` closest points in the dataset.
"""
knn_indices = np.empty((X.shape[0], n_neighbors), dtype=np.int32)
for row in numba.prange(X.shape[0]):
# v = np.argsort(X[row]) # Need to call argsort this way for numba
v = X[row].argsort(kind="quicksort")
v = v[:n_neighbors]
knn_indices[row] = v
return knn_indices | [
2602,
9896,
1894
] |
def METHOD_NAME(self, path: str) -> None:
"""Start recording a trace file to disk.
Parameters
----------
path : str
Write the trace to this path.
"""
self.trace_file = PerfTraceFile(path) | [
447,
2576,
171
] |
def METHOD_NAME(bot, rc_data, pipe_data):
"""
Return only the bots which are directly connected
If a bot has more than one inputs or outputs stop processing
"""
cbs = []
sqs2bot, dsq_count = convert_pipedata(pipe_data)
cbot = bot
while True:
dst_qs = pipe_data[cbot].get('destination-queues', [])
if len(dst_qs) == 1:
dst_q = dst_qs[0]
_bot = sqs2bot[dst_q]
cbot = _bot[0]
count = dsq_count[dst_q]
if count > 1:
break
else:
cbs.append(cbot)
else:
break
return cbs | [
2261,
13903
] |
def METHOD_NAME(*args: Any) -> Any:
result = args
for fun in functions:
new_args = make_iter(result)
result = fun(*new_args)
return result | [
921
] |
def METHOD_NAME(self):
"""Initialize SfmData and SfmTrack"""
self.data = SfmData()
# initialize SfmTrack with 3D point
self.tracks = SfmTrack() | [
0,
1
] |
def METHOD_NAME(cls):
cls.realpath = os.path.realpath(__file__)
cls.path = os.path.dirname(cls.realpath) | [
0,
1,
2
] |
def METHOD_NAME(self, *column_texts):
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text_line in column_texts:
node = nodes.paragraph('')
vl = ViewList()
if text_line is None:
continue
for text in text_line.split('\n'):
vl.append(text, '%s:%d' % (source, line))
with switch_source_input(self.state, vl):
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph) and len(node.children) == 1:
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
return row | [
129,
843
] |
def METHOD_NAME(
exploit_host,
mock_snmp_exploit_client,
mock_http_agent_binary_server_registrar,
):
mock_snmp_exploit_client.exploit_host.return_value = (True, True)
result = exploit_host()
assert mock_http_agent_binary_server_registrar.clear_reservation.called
assert mock_http_agent_binary_server_registrar.reserve_download.called
assert result.exploitation_success
assert result.propagation_success | [
9,
4714,
1806,
7909
] |
def METHOD_NAME(self, dataset):
return DataLoader(
dataset, batch_size=self.datarc['eval_batch_size'],
shuffle=False, num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
) | [
19,
1171,
568
] |
def METHOD_NAME(network_manager_connection_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubscriptionNetworkManagerConnectionResult]:
"""
Get a specified connection created by this subscription.
:param str network_manager_connection_name: Name for the network manager connection.
"""
... | [
19,
835,
1228,
722,
550,
146
] |
def METHOD_NAME(self, key):
'''Find a parameter in the associated .ers file'''
return self.header[key] | [
19,
572,
49
] |
def METHOD_NAME(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self, result_file_path: str) -> List[Report]:
""" Parse the given analyzer result. """
reports: List[Report] = []
if os.path.isdir(result_file_path):
report_file = os.path.join(result_file_path, "report.json")
self.__infer_out_parent_dir = os.path.dirname(result_file_path)
else:
report_file = result_file_path
self.__infer_out_parent_dir = os.path.dirname(
os.path.dirname(result_file_path))
if not os.path.exists(report_file):
LOG.error("Report file does not exist: %s", report_file)
return reports
try:
with open(report_file, 'r',
encoding="utf-8", errors="ignore") as f:
bugs = json.load(f)
except IOError:
LOG.error("Failed to parse the given analyzer result '%s'. Please "
"give an infer output directory which contains a valid "
"'report.json' file.", result_file_path)
return reports
for bug in bugs:
report = self.__parse_report(bug)
if report:
reports.append(report)
return reports | [
19,
3378
] |
METHOD_NAME(cls) -> List[str]: | [
19,
867,
277,
4078,
2931
] |
def METHOD_NAME():
mesh = cpt.mesh_sphere(radius=1, center=(0, 0, 0), resolution=(10, 10)).immersed_part()
S, gradG_1 = cpt.Delhommeau().evaluate(mesh, mesh, 0.0, np.infty, 1.0, early_dot_product=False)
assert gradG_1.shape == (mesh.nb_faces, mesh.nb_faces, 3) | [
9,
-1,
4197,
555
] |
def METHOD_NAME(self):
"""
Indicates whether this represents a predefined source position.
:return: True if predefined, otherwise False.
:rtype: bool
"""
return self.equals(ASTSourceLocation.get_predefined_source_position()) | [
137,
4446,
1458,
195
] |
def METHOD_NAME(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e | [
2277,
43,
4027
] |
def METHOD_NAME(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Lists available operations for the Microsoft.Kusto provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~kusto_management_client.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop(
'cls', None) # type: ClsType["models.OperationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-18"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query(
"api_version", api_version, 'str')
request = self._client.get(
url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(
url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize(
'OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code,
response=response, error_map=error_map)
raise HttpResponseError(
response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
) | [
245
] |
def METHOD_NAME(self):
pass | [
156,
164
] |
def METHOD_NAME(
storage_mock, enterprise_data_fixture, synced_roles
):
user, _ = enterprise_data_fixture.create_enterprise_admin_user_and_token()
with freeze_time("2023-01-01 12:00:00"):
workspace_1 = CreateWorkspaceActionType.do(user, "workspace 1").workspace
with freeze_time("2023-01-01 12:00:10"):
workspace_2 = CreateWorkspaceActionType.do(user, "workspace 2").workspace
csv_settings = {
"csv_column_separator": ",",
"csv_first_row_header": True,
"export_charset": "utf-8",
}
stub_file = BytesIO()
storage_mock.open.return_value = stub_file
close = stub_file.close
stub_file.close = lambda: None
csv_export_job = JobHandler().create_and_start_job(
user, AuditLogExportJobType.type, **csv_settings, sync=True
)
csv_export_job.refresh_from_db()
assert csv_export_job.state == JOB_FINISHED
data = stub_file.getvalue().decode(csv_settings["export_charset"])
bom = "\ufeff"
assert data == (
bom
+ "User Email,User ID,Group Name,Group ID,Action Type,Description,Timestamp,IP Address\r\n"
+ f"{user.email},{user.id},{workspace_2.name},{workspace_2.id},Create group,"
f'"Group ""{workspace_2.name}"" ({workspace_2.id}) created.",2023-01-01 12:00:10+00:00,\r\n'
+ f"{user.email},{user.id},{workspace_1.name},{workspace_1.id},Create group,"
f'"Group ""{workspace_1.name}"" ({workspace_1.id}) created.",2023-01-01 '
f"12:00:00+00:00,\r\n"
)
close()
# With a different separator and wihout header
csv_settings = {
"csv_column_separator": "|",
"csv_first_row_header": False,
"export_charset": "utf-8",
}
stub_file = BytesIO()
storage_mock.open.return_value = stub_file
close = stub_file.close
stub_file.close = lambda: None
csv_export_job = JobHandler().create_and_start_job(
user, AuditLogExportJobType.type, **csv_settings, sync=True
)
csv_export_job.refresh_from_db()
assert csv_export_job.state == JOB_FINISHED
data = stub_file.getvalue().decode(csv_settings["export_charset"])
bom = "\ufeff"
assert data == (
bom + f"{user.email}|{user.id}|{workspace_2.name}|{workspace_2.id}|Create "
f'group|"Group ""{workspace_2.name}"" ({workspace_2.id}) '
f'created."|2023-01-01 12:00:10+00:00|\r\n'
+ f"{user.email}|{user.id}|{workspace_1.name}|{workspace_1.id}|Create "
f'group|"Group ""{workspace_1.name}"" ({workspace_1.id}) '
f'created."|2023-01-01 12:00:00+00:00|\r\n'
)
close() | [
9,
1422,
390,
294,
732,
3705
] |
def METHOD_NAME(field_name, override_field):
original_raw_yaml = {
'jtype': 'SimpleIndexer',
'with': {'a': 123, 'b': 'test'},
'metas': {'name': 'test-name', 'workspace': 'test-work-space'},
'requests': {'/foo': 'bar'},
}
updated_raw_yaml = original_raw_yaml
JAMLCompatible()._override_yml_params(updated_raw_yaml, field_name, override_field)
if override_field:
assert updated_raw_yaml[field_name] == override_field
else:
assert original_raw_yaml == updated_raw_yaml
# assure we don't create py_modules twice
if override_field == 'metas' and 'py_modules' in override_field:
assert 'py_modules' in updated_raw_yaml['metas']
assert 'py_modules' not in updated_raw_yaml | [
9,
345,
5024,
434
] |
def METHOD_NAME(m2ee):
"""
Injects Dynatrace configuration to java runtime
"""
if not is_agent_enabled():
logging.debug(
"Skipping Dynatrace OneAgent setup, required env vars are not set"
)
return
logging.info("Enabling Dynatrace OneAgent")
try:
manifest = get_manifest()
except Exception:
logging.warning("Failed to parse Dynatrace manifest file", exc_info=True)
return
agent_path = get_agent_path()
logging.debug("Agent path: [%s]", agent_path)
if not os.path.exists(agent_path):
raise Exception(f"Dynatrace Agent not found: {agent_path}")
# dynamic default
default_env.update({"DT_TENANTTOKEN": manifest.get("tenantToken")})
for key, dv in default_env.items():
value = os.environ.get(key, dv)
if value is not None:
util.upsert_custom_environment_variable(m2ee, key, value)
util.upsert_custom_environment_variable(
m2ee, "DT_CONNECTION_POINT", get_connection_endpoint()
)
util.upsert_javaopts(
m2ee,
[
f"-agentpath:{os.path.abspath(agent_path)}",
"-Xshare:off",
],
) | [
86,
200
] |
def METHOD_NAME(self):
"""
Return the canonical label of ``self``.
EXAMPLES::
sage: P = species.SubsetSpecies()
sage: S = P.structures(["a", "b", "c"])
sage: [s.canonical_label() for s in S]
[{}, {'a'}, {'a'}, {'a'}, {'a', 'b'}, {'a', 'b'}, {'a', 'b'}, {'a', 'b', 'c'}]
"""
rng = list(range(1, len(self._list) + 1))
return self.__class__(self.parent(), self._labels, rng) | [
6208,
636
] |
def METHOD_NAME(self):
# This bug was originally caused by beaker-provision running the same
# command twice concurrently, which would typically cause them both to
# report failure at the exact same moment. As a result, we had two
# concurrent transactions marking the same system as broken.
# This test just touches the model objects directly, so that we can
# use threads to guarantee concurrent transactions.
with session.begin():
system = data_setup.create_system(status=u'Automated')
self.check_status_durations(system)
class MarkBrokenThread(Thread):
def __init__(self, **kwargs):
super(MarkBrokenThread, self).__init__(**kwargs)
self.ready_evt = Event()
self.continue_evt = Event()
def run(self):
try:
session.begin()
system_local = System.query.get(system.id)
assert system_local.status == SystemStatus.automated
self.ready_evt.set()
self.continue_evt.wait()
system_local.mark_broken(reason=u'Murphy', service=u'testdata')
session.commit()
except:
# We expect one thread to get an exception, don't care which one though.
# Catching it here just prevents it spewing onto stderr.
log.exception('Exception in MarkBrokenThread (one is expected)')
thread1 = MarkBrokenThread()
thread2 = MarkBrokenThread()
thread1.start()
thread2.start()
thread1.ready_evt.wait()
thread2.ready_evt.wait()
thread1.continue_evt.set()
thread2.continue_evt.set()
thread1.join()
thread2.join()
with session.begin():
session.expire_all()
self.assertEquals(system.status, SystemStatus.broken)
self.check_status_durations(system) | [
9,
5604,
682
] |
f METHOD_NAME(self,output,workingDir): | [
250,
43,
146,
374
] |
def METHOD_NAME():
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Rect(x='x', y='y', width=0.9, height=0.9))
plot.add_tools(ResetTool(), ZoomInTool())
code = RECORD("xrstart", "p.x_range.start", final=False) + \
RECORD("xrend", "p.x_range.end", final=False) + \
RECORD("yrstart", "p.y_range.start", final=False) + \
RECORD("yrend", "p.y_range.end")
plot.tags.append(CustomJS(name="custom-action", args=dict(p=plot), code=code))
plot.toolbar_sticky = False
return plot | [
93,
1288
] |
def METHOD_NAME(self, node, expr_str, **kwargs):
try:
return DivisionToFortranDivisionMapper()(
self.expr_parser(expr_str, **kwargs))
except Exception as e:
raise LoopyError(
"Error parsing expression '%s' on line %d of '%s': %s"
% (expr_str, node.item.span[0], self.filename, str(e))) | [
214,
2078
] |
def METHOD_NAME(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension | [
19,
2916
] |
def METHOD_NAME(request, exception, template_name="404.html"):
response = render(request, template_name)
response.status_code = 404
return response | [
13049
] |
def METHOD_NAME(ceph_cluster, **kw):
"""
Test Cases Covered :
CEPH-83574724 - Create Clone of subvolume which is completely filled with data till disk quota exceeded
Pre-requisites :
1. We need atleast one client node to execute this test case
1. creats fs volume create cephfs if the volume is not there
2. ceph fs subvolumegroup create <vol_name> <group_name> --pool_layout <data_pool_name>
Ex : ceph fs subvolumegroup create cephfs subvolgroup_full_vol_1
3. ceph fs subvolume create <vol_name> <subvol_name> [--size <size_in_bytes>] [--group_name <subvol_group_name>]
[--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>] [--namespace-isolated]
Ex: ceph fs subvolume create cephfs subvol_2 --size 5368706371 --group_name subvolgroup_1
4. Create Data on the subvolume
Ex: python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 400 --files
100 --files-per-dir 10 --dirs-per-dir 2 --top /mnt/cephfs_fuse1baxgbpaia_1/
5. Create snapshot of the subvolume
Ex: ceph fs subvolume snapshot create cephfs subvol_2 snap_1 --group_name subvolgroup_full_vol_1
Test Case Flow:
1. Fill the subvolume with more data than subvolume size i.e., > 5G
2. Create Clone out of subvolume
3. Mount the cloned volume
4. Validate the contents of cloned volume with contents present volume
Clean Up:
1. Delete Cloned volume
2. Delete subvolumegroup
"""
try:
fs_util = FsUtils(ceph_cluster)
config = kw.get("config")
clients = ceph_cluster.get_ceph_objects("client")
build = config.get("build", config.get("rhbuild"))
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
log.info("checking Pre-requisites")
if len(clients) < 1:
log.info(
f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
)
return 1
default_fs = "cephfs"
mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(10))
)
client1 = clients[0]
fs_details = fs_util.get_fs_info(client1)
if not fs_details:
fs_util.create_fs(client1, "cephfs")
subvolumegroup_list = [
{"vol_name": default_fs, "group_name": "subvolgroup_full_vol_1"},
]
for subvolumegroup in subvolumegroup_list:
fs_util.create_subvolumegroup(client1, **subvolumegroup)
subvolume = {
"vol_name": default_fs,
"subvol_name": "subvol_full_vol",
"group_name": "subvolgroup_full_vol_1",
"size": "5368706371",
}
fs_util.create_subvolume(client1, **subvolume)
log.info("Get the path of sub volume")
subvol_path, rc = client1.exec_command(
sudo=True,
cmd=f"ceph fs subvolume getpath {default_fs} subvol_full_vol subvolgroup_full_vol_1",
)
kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
mon_node_ips = fs_util.get_mon_node_ips()
fs_util.kernel_mount(
[clients[0]],
kernel_mounting_dir_1,
",".join(mon_node_ips),
sub_dir=f"{subvol_path.strip()}",
)
client1.exec_command(
sudo=True,
cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 5 --file-size 1024 "
f"--files 2048 --top "
f"{kernel_mounting_dir_1}",
long_running=True,
)
client1.exec_command(
sudo=True,
cmd=f"dd if=/dev/zero of={kernel_mounting_dir_1}/file_5gb bs=1M count=5000",
long_running=True,
)
c_out2, c_err2 = client1.exec_command(
sudo=True,
cmd="ceph fs subvolume info cephfs subvol_full_vol --group_name subvolgroup_full_vol_1 -f json",
)
c_out2_result = json.loads(c_out2)
log.info(c_out2_result)
if c_out2_result["bytes_used"] >= c_out2_result["bytes_quota"]:
pass
else:
log.error("Unable to fill the volume completely,So TC can not be verified")
return 1
snapshot = {
"vol_name": default_fs,
"subvol_name": "subvol_full_vol",
"snap_name": "snap_1",
"group_name": "subvolgroup_full_vol_1",
}
fs_util.create_snapshot(client1, **snapshot)
full_vol_1 = {
"vol_name": default_fs,
"subvol_name": "subvol_full_vol",
"snap_name": "snap_1",
"target_subvol_name": "full_vol_1",
"group_name": "subvolgroup_full_vol_1",
}
fs_util.create_clone(client1, **full_vol_1)
fs_util.validate_clone_state(client1, full_vol_1, timeout=1200)
clonevol_path, rc = client1.exec_command(
sudo=True,
cmd=f"ceph fs subvolume getpath {default_fs} "
f"{full_vol_1['target_subvol_name']}",
)
fuse_mounting_dir_2 = f"/mnt/cephfs_fuse{mounting_dir}_3/"
fs_util.fuse_mount(
[client1],
fuse_mounting_dir_2,
extra_params=f" -r {clonevol_path.strip()}",
)
client1.exec_command(
sudo=True, cmd=f"diff -qr {kernel_mounting_dir_1} {fuse_mounting_dir_2}"
)
return 0
except Exception as e:
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
log.info("Clean Up in progess")
rmclone_list = [
{"vol_name": default_fs, "subvol_name": "full_vol_1"},
]
for clone_vol in rmclone_list:
fs_util.remove_subvolume(client1, **clone_vol, force=True, validate=False)
if locals().get("snapshot", None):
fs_util.remove_snapshot(client1, **snapshot, validate=False, check_ec=False)
fs_util.remove_subvolume(client1, **subvolume, validate=False, check_ec=False)
for subvolumegroup in subvolumegroup_list:
fs_util.remove_subvolumegroup(client1, **subvolumegroup, force=True) | [
22
] |
def METHOD_NAME(parameters):
print("Parameters:", parameters)
# Read data
sentences = utils.read_sentences_from_file(
parameters["path_to_input_file"],
one_sentence_per_line=parameters["one_sentence_per_line"],
)
# Identify mentions
ner_model = NER.get_model(parameters)
ner_output_data = ner_model.predict(sentences)
sentences = ner_output_data["sentences"]
mentions = ner_output_data["mentions"]
output_folder_path = parameters["output_folder_path"]
if (
(output_folder_path is not None)
and os.path.exists(output_folder_path)
and os.listdir(output_folder_path)
):
print(
"The given output directory ({}) already exists and is not empty.".format(
output_folder_path
)
)
answer = input("Would you like to empty the existing directory? [Y/N]\n")
if answer.strip() == "Y":
print("Deleting {}...".format(output_folder_path))
shutil.rmtree(output_folder_path)
else:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(
output_folder_path
)
)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
sentences, utils.get_sentences_txt_file_path(output_folder_path)
)
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
# Generate candidates and get the data that describes the candidates
candidate_generator = CG.get_model(parameters)
candidate_generator.process_mentions_for_candidate_generator(
sentences=sentences, mentions=mentions
)
for mention in mentions:
mention["candidates"] = candidate_generator.get_candidates(mention)
if parameters["consider_additional_datafetcher"]:
data_fetcher = CDF.get_model(parameters)
for candidate in mention["candidates"]:
data_fetcher.get_data_for_entity(candidate)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
# Reranking
reranking_model = R.get_model(parameters)
reranking_model.rerank(mentions, sentences)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
utils.write_end2end_pickle_output(sentences, mentions, output_folder_path)
utils.present_annotated_sentences(
sentences,
mentions,
utils.get_end2end_pretty_output_file_path(output_folder_path),
)
# Showcase results
utils.present_annotated_sentences(sentences, mentions) | [
57
] |
def METHOD_NAME(self, partitions: Sequence[str]) -> OptimizationSchedule:
"""
Get the next schedule for optimizing partitions. The provided partitions
are subdivided into parallel number of partitions and the cutoff time
for each schedule is determined by when parallelism boundaries are
reached.
"""
current_time = datetime.now()
if current_time >= self.__full_job_end_time:
raise OptimizedSchedulerTimeout(
f"Optimize job cutoff time exceeded "
f"{self.__full_job_end_time}. Abandoning"
)
if self.__parallel == 1:
return OptimizationSchedule(
partitions=[self.sort_partitions(partitions)],
cutoff_time=self.__last_midnight
+ timedelta(hours=settings.OPTIMIZE_JOB_CUTOFF_TIME),
)
else:
if current_time < self.__parallel_start_time:
return OptimizationSchedule(
partitions=[self.sort_partitions(partitions)],
cutoff_time=self.__parallel_start_time,
)
elif current_time < self.__parallel_end_time:
return OptimizationSchedule(
partitions=self.subdivide_partitions(partitions, self.__parallel),
cutoff_time=self.__parallel_end_time,
start_time_jitter_minutes=self.start_time_jitter(),
)
else:
return OptimizationSchedule(
partitions=[self.sort_partitions(partitions)],
cutoff_time=self.__full_job_end_time,
) | [
19,
243,
507
] |
def METHOD_NAME(self):
rot_ref = sp_rot.from_matrix(self.matrices)
matrices = rot_ref.as_matrix().astype(self.dtype)
self.assertTrue(
np.allclose(self.matrices, matrices, atol=utest_tolerance(self.dtype))
) | [
9,
8221,
2298
] |
def METHOD_NAME(self):
return self.context.description | [
19,
1067
] |
def METHOD_NAME():
parser = options.get_interactive_generation_parser()
parser.add_argument('--prompts', type=str, default=None, required=True)
parser.add_argument('--output', type=str, default=None, required=True)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--samples-per-prompt', type=int, default=1)
args = options.parse_args_and_arch(parser)
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
main(args) | [
615,
57
] |
def METHOD_NAME(self):
"""
- 1 Zone
- RGB Support
- Implements every option type/combo:
- None EffectOption
- Static EffectOption with colour only
- Wave EffectOption with parameters only <<<
"""
device = Backend.DeviceItem()
device.name = "Dummy Keyboard"
device.form_factor = self.get_form_factor("keyboard")
device.serial = "DUMMY0001"
device.keyboard_layout = "en_GB"
device.matrix = DummyMatrix()
zone = Backend.DeviceItem.Zone()
zone.zone_id = "main"
zone.label = "Main Zone"
device.zones.append(zone)
class Nothing(Backend.EffectOption):
def __init__(self):
super().__init__()
self.uid = "none"
self.label = "None"
def apply(self):
pass
class Static(Backend.EffectOption):
def __init__(self):
super().__init__()
self.uid = "static"
self.label = "Static"
self.active = True
self.colours_required = 1
self.colours = ["#00FF00"]
def apply(self):
pass
class Wave(Backend.EffectOption):
def __init__(self):
super().__init__()
self.uid = "wave"
self.label = "Wave"
param_1 = Backend.EffectOption.Parameter()
param_1.data = 1
param_1.label = "Left"
param_2 = Backend.EffectOption.Parameter()
param_2.data = 2
param_2.label = "Right"
param_2.active = True
self.parameters = [param_1, param_2]
def apply(self, data):
pass
for option in [Nothing, Static, Wave]:
zone.options.append(option())
return device | [
19,
10864,
398
] |
def METHOD_NAME(table_path):
schema_path = table_path / SCHEMA_FILE
try:
schema = Schema.from_schema_file(schema_path)
return schema.schema.get("fields")
except Exception as e:
logging.warning(f"Unable to open schema: {e}") | [
19,
135
] |
def METHOD_NAME(mapping, key=None):
"""Recursively convert 'multi_field' type (deprecated since version 0.9) to
'string' in an Elastic mapping. The field named the same as the property is
no longer necessary since the top-level property becomes the default in
versions >=1.0.
See: https://www.elastic.co/guide/en/elasticsearch/reference/1.7/_multi_fields.html
Transforms:
"city": {
"fields": {
"city": {"type": "string", "index": "analyzed"},
"exact": {"type": "string", "index": "not_analyzed"},
},
"type": "multi_field"
}
To:
"city": {
"fields": {
"exact": {"type": "string", "index": "not_analyzed"}
},
"type": "string"
}
"""
if isinstance(mapping, dict):
if "fields" in mapping and mapping.get("type") == "multi_field":
mapping = mapping.copy()
if key is None or key not in mapping["fields"]:
raise ValueError(f"'multi_field' property {key!r} is missing "
f"the 'default' field: {mapping}")
mapping.update(mapping["fields"].pop(key))
if mapping.get("index") == "analyzed":
# {"index": "analyzed"} is the default
del mapping["index"]
return {k: METHOD_NAME(v, k) for k, v in mapping.items()}
elif isinstance(mapping, (tuple, list, set)):
return [METHOD_NAME(v) for v in mapping]
else:
return mapping | [
1053,
457,
101
] |
def METHOD_NAME(self):
"""Setup the environment for testing z3."""
global TEST_WORKSPACE
TEST_WORKSPACE = env.get_workspace('z3')
# Set the TEST_WORKSPACE used by the tests.
os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE
test_config = {}
test_project = 'suppress'
project_info = project.get_info(test_project)
# Copy the test project to the workspace. The tests should
# work only on this test project.
test_proj_path = os.path.join(TEST_WORKSPACE, "test_proj")
shutil.copytree(project.path(test_project), test_proj_path)
project_info['project_path'] = test_proj_path
test_config['test_project'] = project_info
# Suppress file should be set here if needed by the tests.
suppress_file = None
# Skip list file should be set here if needed by the tests.
skip_list_file = None
# Get an environment which should be used by the tests.
test_env = env.test_env(TEST_WORKSPACE)
# Create a basic CodeChecker config for the tests, this should
# be imported by the tests and they should only depend on these
# configuration options.
codechecker_cfg = {
'suppress_file': suppress_file,
'skip_list_file': skip_list_file,
'check_env': test_env,
'workspace': TEST_WORKSPACE,
'checkers': []
}
# Clean the test project, if needed by the tests.
ret = project.clean(test_project)
if ret:
sys.exit(ret)
test_config['codechecker_cfg'] = codechecker_cfg
# Export the test configuration to the workspace.
env.export_test_cfg(TEST_WORKSPACE, test_config) | [
102,
2
] |
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(device):
print("New device (%s)" % device)
mainloop.quit()
sys.exit(0) | [
129,
398,
1922
] |
def METHOD_NAME(self, key, value):
self.config[key] = value | [
200,
0,
2793
] |
def METHOD_NAME(self) -> bool:
if self.hci is not None:
devinfo = device_info(self.hci)
_tx = devinfo["stat"]["byte_tx"]
_rx = devinfo["stat"]["byte_rx"]
tx, s_tx = format_bytes(_tx)
rx, s_rx = format_bytes(_rx)
_u_speed = self.up_speed.calc(_tx)
_d_speed = self.down_speed.calc(_rx)
self.set_blinker_by_speed(self.up_blinker, _u_speed)
self.set_blinker_by_speed(self.down_blinker, _d_speed)
u_speed, s_u_speed = format_bytes(_u_speed)
d_speed, s_d_speed = format_bytes(_d_speed)
self.set_data(tx, s_tx, rx, s_rx, u_speed, s_u_speed, d_speed, s_d_speed)
return True | [
86
] |
def METHOD_NAME(self):
return all(d.METHOD_NAME for d in self.datasets) | [
1466,
518
] |
def METHOD_NAME():
try:
job = Mock()
_check_job_status(
job,
{
"TransformationJobStatus": "Failed",
"FailureReason": "CapacityError: Unable to provision requested ML compute capacity",
},
"TransformationJobStatus",
)
assert False, "sagemaker.exceptions.CapacityError should have been raised but was not"
except Exception as e:
assert type(e) == sagemaker.exceptions.CapacityError
assert e.actual_status == "Failed"
assert "Completed" in e.allowed_statuses
assert "Stopped" in e.allowed_statuses | [
9,
870,
241,
1420,
168,
1646,
3534
] |
def METHOD_NAME(self):
"""
Test RuntimeError is thrown when add_start_token = True and yet add_special_tokens = False
"""
with self.assertRaises(RuntimeError):
create_agent(
{
'model': 'hugging_face/dialogpt',
'add_special_tokens': False,
'add_start_token': True,
}
) | [
9,
447,
466
] |
def METHOD_NAME(self, from_node, to_node):
wx, wy = self.lqr_planner.lqr_planning(
from_node.x, from_node.y, to_node.x, to_node.y, show_animation=False)
px, py, course_lens = self.sample_path(wx, wy, self.step_size)
if px is None:
return None
newNode = copy.deepcopy(from_node)
newNode.x = px[-1]
newNode.y = py[-1]
newNode.path_x = px
newNode.path_y = py
newNode.cost += sum([abs(c) for c in course_lens])
newNode.parent = from_node
return newNode | [
8961
] |
def METHOD_NAME(
project_directory: str,
project_factory: ProjectFactory,
fixture_dir: FixtureDirGetter,
) -> Poetry:
return project_factory(name="simple", source=fixture_dir(project_directory)) | [
2463
] |
def METHOD_NAME(self):
result = Transaction.sale({
"amount": TransactionAmounts.Authorize,
"merchant_account_id": TestHelper.us_bank_merchant_account_id,
"payment_method_nonce": TestHelper.generate_invalid_us_bank_account_nonce(),
"options": {
"submit_for_settlement": True,
"store_in_vault": True
}
})
self.assertFalse(result.is_success)
error_code = result.errors.for_object("transaction").on("payment_method_nonce")[0].code
self.assertEqual(error_code, ErrorCodes.Transaction.PaymentMethodNonceUnknown) | [
9,
466,
1465,
130,
622
] |
def METHOD_NAME(self) -> Dict[str, Any]:
"""
Return a dictionary of hyperparameter names to values.
"""
return self.env.hparams | [
19,
8866
] |
def METHOD_NAME(self, val: int) -> None:
"""Write an unsigned long with big endian byte order"""
self.write(pack(b">L", val)) | [
77,
673,
7456
] |
def METHOD_NAME(self):
point3d = self.surface.point2d_to_3d(volmdlr.Point2D(0.5, 0.5))
self.assertTrue(point3d.is_close(volmdlr.Point3D(0.002252005, -0.002475453, -0.5))) | [
9,
9821,
24,
1529
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.