text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint() | [
17793
] |
def METHOD_NAME(T):
return (T[:9],T[9:]) | [
265
] |
def METHOD_NAME(
entity: E,
death_cutoff: datetime = datetime(2000, 1, 1),
birth_cutoff: Optional[datetime] = None, | [
250,
1349,
4833
] |
def METHOD_NAME(self):
register = Registry()
register("plugin1")(Plugin)
plugin = register["plugin1"]
request = factory.get("/xyz")
step = FormStepFactory(
form__slug="myform",
form__authentication_backends=["plugin1"],
form_definition__login_required=True,
)
form = step.form
self.assertEqual(form.authentication_backends, ["plugin1"])
options = register.get_options(request, form)
self.assertEqual(len(options), 1)
option = options[0]
self.assertEqual(option.identifier, "plugin1")
self.assertEqual(option.label, "some human readable label")
self.assertEqual(option.url, plugin.get_start_url(request, form)) | [
9,
19,
1881
] |
def METHOD_NAME(self, index: int):
"""
Return an example's length (number of tokens), used for batching. Here
we return the max across all examples at index across all underlying
datasets.
"""
return max(
dataset.METHOD_NAME(self._map_index_to_dataset(key, index))
for key, dataset in self.datasets.items()
) | [
181,
1735
] |
def METHOD_NAME():
devs = os.listdir("/sys/class/net/")
devs = list(filter(networkConfig.isBridge, devs))
return devs | [
245,
7479
] |
def METHOD_NAME(signal: np.ndarray, length: int, expected: np.ndarray) -> None:
"""Test of pad()."""
padded_array = pad(signal, length)
assert isinstance(padded_array, np.ndarray)
np.testing.assert_array_equal(padded_array, expected) | [
9,
2459
] |
f METHOD_NAME(self, state: State) -> Hparams: | [
19,
8866
] |
def METHOD_NAME(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional | [
19,
665,
586
] |
def METHOD_NAME(profile_path, our_app_id_re):
"""Processes one mobileprovision file.
Checks if its app ID matches one of our example apps, and symlinks it in the
appropriate location if so.
Args:
profile_path: Path to the mobileprovision file.
our_app_id_re: Regular expression to extract the example name from one of
out app ids.
"""
app_id = get_app_id(profile_path)
if not app_id:
print(f"Could not parse '{profile_path}', skipping")
return
match = our_app_id_re.match(app_id)
if not match:
return
app_name = match.group(1)
app_dir_name = app_name.lower()
if app_dir_name not in example_names:
print(f"The app id '{app_id}' has our prefix, but does not seem to match" +
"any of our examples. Skipping.")
return
print(f"Found profile for {app_name}")
link_path = os.path.join(examples_ios, app_dir_name,
"provisioning_profile.mobileprovision")
update_symlink(profile_path, link_path) | [
356,
337
] |
def METHOD_NAME(self, message: str) -> None:
"""Callback for failed connection check. Displays an error message, then emits the
check_complete signal (but not the connection available signal)."""
# This must run on the main GUI thread
if hasattr(self, "connection_check_message") and self.connection_check_message:
self.connection_check_message.close()
if NetworkManager.HAVE_QTNETWORK:
QtWidgets.QMessageBox.critical(
None, translate("AddonsInstaller", "Connection failed"), message
)
else:
# pylint: disable=line-too-long
QtWidgets.QMessageBox.critical(
None,
translate("AddonsInstaller", "Missing dependency"),
translate(
"AddonsInstaller",
"Could not import QtNetwork -- see Report View for details. Addon Manager unavailable.",
),
)
self.check_complete.emit() | [
1228,
550,
1423
] |
def METHOD_NAME(self):
# set up a cosmology
# compute image postions
# compute J and velocity dispersion
D_dt = self.td_cosmo._lens_cosmo.ddt
D_d = self.td_cosmo._lens_cosmo.dd
D_s = self.td_cosmo._lens_cosmo.ds
D_ds = self.td_cosmo._lens_cosmo.dds
fermat_potential_list = self.td_cosmo.fermat_potential(
self.kwargs_lens, self.kwargs_ps
)
dt_list = self.td_cosmo.time_delays(
self.kwargs_lens, self.kwargs_ps, kappa_ext=0
)
dt = dt_list[0] - dt_list[1]
d_fermat = fermat_potential_list[0] - fermat_potential_list[1]
D_dt_infered = self.td_cosmo.ddt_from_time_delay(
d_fermat_model=d_fermat, dt_measured=dt
)
npt.assert_almost_equal(D_dt_infered, D_dt, decimal=5)
r_eff = 0.5
kwargs_lens_light = [{"Rs": r_eff * 0.551, "center_x": 0, "center_y": 0}]
kwargs_anisotropy = {"r_ani": 1}
anisotropy_model = "OM"
kwargs_numerics_galkin = {
"interpol_grid_num": 500,
"log_integration": True,
"max_integrate": 10,
"min_integrate": 0.001,
}
self.td_cosmo.kinematics_modeling_settings(
anisotropy_model,
kwargs_numerics_galkin,
analytic_kinematics=True,
Hernquist_approx=False,
MGE_light=False,
MGE_mass=False,
)
J = self.td_cosmo.velocity_dispersion_dimension_less(
self.kwargs_lens,
kwargs_lens_light,
kwargs_anisotropy,
r_eff=r_eff,
theta_E=self.kwargs_lens[0]["theta_E"],
gamma=2,
)
J_map = self.td_cosmo.velocity_dispersion_map_dimension_less(
self.kwargs_lens,
kwargs_lens_light,
kwargs_anisotropy,
r_eff=r_eff,
theta_E=self.kwargs_lens[0]["theta_E"],
gamma=2,
)
assert len(J_map) == 1
npt.assert_almost_equal(J_map[0] / J, 1, decimal=1)
sigma_v2 = J * D_s / D_ds * const.c**2
sigma_v = np.sqrt(sigma_v2) / 1000.0 # convert to [km/s]
print(sigma_v, "test sigma_v")
Ds_Dds = self.td_cosmo.ds_dds_from_kinematics(sigma_v, J, kappa_s=0, kappa_ds=0)
npt.assert_almost_equal(Ds_Dds, D_s / D_ds)
# now we perform a mass-sheet transform in the observables but leave the models identical with a convergence correction
kappa_s = 0.5
dt_list = self.td_cosmo.time_delays(
self.kwargs_lens, self.kwargs_ps, kappa_ext=kappa_s
)
sigma_v_kappa = sigma_v * np.sqrt(1 - kappa_s)
dt = dt_list[0] - dt_list[1]
D_dt_infered, D_d_infered = self.td_cosmo.ddt_dd_from_time_delay_and_kinematics(
d_fermat_model=d_fermat,
dt_measured=dt,
sigma_v_measured=sigma_v_kappa,
J=J,
kappa_s=kappa_s,
kappa_ds=0,
kappa_d=0,
)
npt.assert_almost_equal(D_dt_infered, D_dt, decimal=6)
npt.assert_almost_equal(D_d_infered, D_d, decimal=6) | [
9,
14336,
1748
] |
def METHOD_NAME(config_path, classifier_names, gamma, sigma, hierarchy, hierarchical_search):
calculator = KleinbergCalculator(config_path=config_path,
classifier_names=classifier_names,
sigma=sigma,
gamma=gamma,
hierarchy=hierarchy,
hierarchical_search=hierarchical_search)
calculator.run() | [
9,
12757,
8688,
1080,
331
] |
def METHOD_NAME():
options = {"provider_name": "fakeprovider", "action": "list", "type": "TXT"}
with pytest.raises(AttributeError):
lexicon.client.Client(ConfigResolver().with_dict(options)) | [
9,
340,
176,
1646,
1038,
1674,
427
] |
def METHOD_NAME(parent_block: 'Block', timestamp: int) -> float:
""" Calculate the next block weight, aka DAA/difficulty adjustment algorithm.
The algorithm used is described in [RFC 22](https://gitlab.com/HathorNetwork/rfcs/merge_requests/22).
The weight must not be less than `MIN_BLOCK_WEIGHT`.
"""
if TEST_MODE & TestMode.TEST_BLOCK_WEIGHT:
return 1.0
from hathor.transaction import sum_weights
root = parent_block
N = min(2 * settings.BLOCK_DIFFICULTY_N_BLOCKS, parent_block.get_height() - 1)
K = N // 2
T = AVG_TIME_BETWEEN_BLOCKS
S = 5
if N < 10:
return MIN_BLOCK_WEIGHT
blocks: list['Block'] = []
while len(blocks) < N + 1:
blocks.append(root)
root = root.get_block_parent()
assert root is not None
# TODO: revise if this assertion can be safely removed
assert blocks == sorted(blocks, key=lambda tx: -tx.timestamp)
blocks = list(reversed(blocks))
assert len(blocks) == N + 1
solvetimes, weights = zip(*(
(block.timestamp - prev_block.timestamp, block.weight)
for prev_block, block in iwindows(blocks, 2)
))
assert len(solvetimes) == len(weights) == N, f'got {len(solvetimes)}, {len(weights)} expected {N}'
sum_solvetimes = 0.0
logsum_weights = 0.0
prefix_sum_solvetimes = [0]
for st in solvetimes:
prefix_sum_solvetimes.append(prefix_sum_solvetimes[-1] + st)
# Loop through N most recent blocks. N is most recently solved block.
for i in range(K, N):
solvetime = solvetimes[i]
weight = weights[i]
x = (prefix_sum_solvetimes[i + 1] - prefix_sum_solvetimes[i - K]) / K
ki = K * (x - T)**2 / (2 * T * T)
ki = max(1, ki / S)
sum_solvetimes += ki * solvetime
logsum_weights = sum_weights(logsum_weights, log(ki, 2) + weight)
weight = logsum_weights - log(sum_solvetimes, 2) + log(T, 2)
# Apply weight decay
weight -= get_weight_decay_amount(timestamp - parent_block.timestamp)
# Apply minimum weight
if weight < MIN_BLOCK_WEIGHT:
weight = MIN_BLOCK_WEIGHT
return weight | [
1593,
243,
1336
] |
def METHOD_NAME() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='TwitterTweetAction',
inputs=['payload'],
outputs=['response', 'error'],
verion='0.7.3',
license='MIT',
author='Mateusz Zitaruk',
manual='twitter_tweet_action',
init={
'source': {
'id': '',
'name': ''
},
'tweet': ''
},
form=Form(groups=[
FormGroup(
name='Twitter resource',
fields=[
FormField(
id='source',
name='Twitter resource',
description='Select Twitter resource. Credentials from selected resource will be used '
'to authorize your account.',
component=FormComponent(type='resource', props={
'label': 'resource',
'tag': 'twitter'
}
)
),
FormField(
id='tweet',
name='Tweet',
description='Please enter the content of your tweet.',
component=FormComponent(
type='textarea',
props={
'label': 'Tweet'
}
)
)
]
)
]
)
),
metadata=MetaData(
name='Send tweet',
brand='Twitter',
desc='Create and send tweet to your twitter wall.',
icon='twitter',
group=['Connectors'],
documentation=Documentation(
inputs={
'payload': PortDoc(desc='This port takes payload object.')
},
outputs={
'response': PortDoc(desc='This port returns payload if response form the Twitter was correct.'),
'error': PortDoc(desc='This port returns error object.')
}
)
)
) | [
372
] |
def METHOD_NAME(self, active) -> Deferred[Incomplete | Failure | BaseException | None]: ... | [
233
] |
def METHOD_NAME(self, word_sep: str | None = None) -> str: ... | [
2791
] |
def METHOD_NAME(x):
return A | [
3248
] |
def METHOD_NAME(self, *, man):
with pytest.raises(NotImplementedError):
man["k"] = 3 | [
9,
5719
] |
def METHOD_NAME(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_sequence_fields)
self.assertEqual(t.n_unnamed_fields, 0)
self.assertEqual(t.n_fields, time._STRUCT_TM_ITEMS) | [
9,
342
] |
def METHOD_NAME(clearance_heights, xgaps, Ds, tilts, kwargs):
# Create client
scheduler_file = '/scratch/sayala/dask_testing/scheduler.json'
client = Client(scheduler_file=scheduler_file)
# Iterate over inputs
futures = []
for ch in range (0, len(clearance_heights)):
clearance_height = clearance_heights[ch]
for xx in range (0, len(xgaps)):
xgap = xgaps[xx]
for tt in range (0, len(tilts)):
tilt = tilts[tt]
for dd in range (0, len(Ds)):
D = Ds[dd]
futures.append(client.submit(simulate_single, clearance_height=clearance_height,
xgap=xgap, tilt=tilt, D=D, **kwargs))
# Get results for all simulations
res = client.gather(futures)
# Close all dask workers and scheduler
try:
client.shutdown()
except:
pass
# Close client
client.close()
res = 'FINISHED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
return res | [
22,
5226,
1082
] |
def METHOD_NAME(obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S') | [
153,
1252
] |
def METHOD_NAME(self):
return_batch, uttid_list = self.transform(self.data, return_uttid=True)
batch = self.converter([return_batch], self.device)
if isinstance(batch, tuple):
att_ws = self.att_vis_fn(*batch)
elif isinstance(batch, dict):
att_ws = self.att_vis_fn(**batch)
return att_ws, uttid_list | [
19,
3998,
733
] |
f METHOD_NAME(self, paramInput): | [
276,
362
] |
def METHOD_NAME():
X = {
'train': np.array([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i'], ['j', 'k', 'l']]),
'valid': np.array([['a', 'b', 'c'], ['x', 'e', 'f'], ['g', 'a', 'i'], ['j', 'k', 'y']]),
'test': np.array([['a', 'b', 'c'], ['d', 'e', 'x'], ['g', 'b', 'i'], ['y', 'k', 'l']]),
}
clean_X, valid_idx, test_idx = _clean_data(X, return_idx=True)
np.testing.assert_array_equal(clean_X['train'], X['train'])
np.testing.assert_array_equal(clean_X['valid'], np.array([['a', 'b', 'c']]))
np.testing.assert_array_equal(clean_X['test'], np.array([['a', 'b', 'c'], ['g', 'b', 'i']]))
np.testing.assert_array_equal(valid_idx, np.array([True, False, False, False]))
np.testing.assert_array_equal(test_idx, np.array([True, False, True, False])) | [
9,
1356,
365
] |
def METHOD_NAME(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at") | [
679,
680,
1541
] |
def METHOD_NAME(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
output_format: str = "standard",
split: bool = False,
inplace: bool = False,
errors: str = "coerce",
progress: bool = True,
) -> pd.DataFrame:
"""
Clean ISAN type data in a DataFrame column.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
column
The name of the column containing data of ISBN type.
output_format
The output format of standardized number string.
If output_format = 'compact', return string without any separators.
If output_format = 'standard', return string with proper separators.
If output_format = 'binary', return ISAN string with binary format.
If output_format = 'urn', return ISAN string with URN format.
If output_format = 'xml', return ISAN string with XML format.
(default: "standard")
split
If True,
each component of derived from its number string will be put into its own column.
(default: False)
inplace
If True, delete the column containing the data that was cleaned.
Otherwise, keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
progress
If True, display a progress bar.
(default: True)
Examples
--------
Clean a column of ISAN data.
>>> df = pd.DataFrame({
"isan": [
"000000018947000000000000"]
})
>>> clean_isan(df, 'isan', inplace=True)
isan_clean
0 0000-0001-8947-0000-8-0000-0000-D
"""
if output_format not in {"compact", "standard", "binary", "urn", "xml"}:
raise ValueError(
f"output_format {output_format} is invalid. "
'It needs to be "compact", "standard", "binary", "urn" or "xml".'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [_format(x, output_format, split, errors) for x in srs],
meta=object,
)
if split:
# For some reason the meta data for the last 3 components needs to be
# set. I think this is a dask bug
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0), meta=("_temp", object)),
root_identifier=df["clean_code_tup"].map(
itemgetter(1), meta=("root_identifier", object)
),
episode=df["clean_code_tup"].map(itemgetter(2), meta=("episode", object)),
version=df["clean_code_tup"].map(itemgetter(3), meta=("version", object)),
)
else:
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
df = df.drop(columns=["clean_code_tup"])
if inplace:
df[column] = df[f"{column}_clean"]
df = df.drop(columns=f"{column}_clean")
df = df.rename(columns={column: f"{column}_clean"})
with ProgressBar(minimum=1, disable=not progress):
df = df.compute()
return df | [
1356,
-1
] |
def METHOD_NAME(s, prefixlen, suffixlen):
skip = len(s) - prefixlen - suffixlen
if skip > _PLACEHOLDER_LEN:
s = '%s[%d chars]%s' % (s[:prefixlen], skip, s[len(s) - suffixlen:])
return s | [
9322
] |
def METHOD_NAME():
np.random.seed(34324)
p = 4
endog = []
exog = []
for k in range(3):
c = np.eye(p)
x = np.random.normal(size=(2, 2))
# The differences between the covariance matrices
# are all in the first 2 rows/columns.
c[0:2, 0:2] = np.dot(x.T, x)
cr = np.linalg.cholesky(c)
m = 1000*k + 50*k
x = np.random.normal(size=(m, p))
x = np.dot(x, cr.T)
exog.append(x)
endog.append(k * np.ones(m))
endog = np.concatenate(endog)
exog = np.concatenate(exog, axis=0)
for dim in 1, 2, 3:
cr = CORE(endog, exog, dim)
pt = np.random.normal(size=(p, dim))
pt, _, _ = np.linalg.svd(pt, 0)
gn = approx_fprime(pt.ravel(), cr.loglike, 1e-7)
g = cr.score(pt.ravel())
assert_allclose(g, gn, 1e-5, 1e-5)
rslt = cr.fit()
proj = rslt.params
assert_equal(proj.shape[0], p)
assert_equal(proj.shape[1], dim)
assert_allclose(np.dot(proj.T, proj), np.eye(dim), 1e-8, 1e-8)
if dim == 2:
# Here we know the approximate truth
projt = np.zeros((p, 2))
projt[0:2, 0:2] = np.eye(2)
assert_allclose(np.trace(np.dot(proj.T, projt)), 2,
rtol=1e-3, atol=1e-3) | [
9,
-1
] |
def METHOD_NAME(resource_group_name: Optional[pulumi.Input[str]] = None,
skip_token: Optional[pulumi.Input[Optional[str]]] = None,
top: Optional[pulumi.Input[Optional[int]]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListNetworkManagerEffectiveSecurityAdminRulesResult]:
"""
List all effective security admin rules applied on a virtual network.
:param str resource_group_name: The name of the resource group.
:param str skip_token: When present, the value can be passed to a subsequent query call (together with the same query and scopes used in the current request) to retrieve the next page of data.
:param int top: An optional query parameter which specifies the maximum number of records to be returned by the server.
:param str virtual_network_name: The name of the virtual network.
"""
... | [
245,
1228,
722,
3071,
2326,
2870,
1634
] |
def METHOD_NAME(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes | [
102,
2355
] |
def METHOD_NAME(self, id):
if not self.faces_database or id == self.UNKNOWN_ID:
return self.UNKNOWN_ID_LABEL
return self.faces_database[id].label | [
19,
2989,
636
] |
def METHOD_NAME(command, printonly=False, exitOnError=True):
print(' '.join(command))
if printonly:
return
try:
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, check=True)
except CalledProcessError as e:
print('Error running ' + ' '.join(command))
print(e.stdout)
print(e.stderr)
if exitOnError:
fatal(2)
else:
print('continuing anyway')
else:
return result.stdout.rstrip() | [
22,
462
] |
def METHOD_NAME(binary):
header = binary.header
print("== Header ==\n")
format_str = "{:<15} {:<30}"
format_hex = "{:<15} 0x{:<13x}"
format_dec = "{:<15} {:<30d}"
modes_str = " - ".join([str(m).split(".")[-1] for m in header.modes])
bitness = ""
if header.is_32:
bitness = "32-bits"
if header.is_64:
bitness = "64-bits"
print(format_str.format("Architecture:", str(header.architecture).split(".")[-1]))
print(format_str.format("Modes:", modes_str))
print(format_hex.format("Entrypoint:", header.entrypoint))
print(format_str.format("Object type:", str(header.object_type).split(".")[-1]))
print(format_str.format("Endianness:", str(header.endianness).split(".")[-1]))
print(format_str.format("Bitness:", bitness))
print("") | [
38,
572
] |
def METHOD_NAME(self):
return self.yolks + 3 | [
19,
4138,
12349
] |
def METHOD_NAME(self):
# define a cosmology
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05)
self._cosmo = cosmo
redshift_list = [0.1, 0.3, 0.8] # list of redshift of the deflectors
z_source = 2 # source redshift
self._z_source = z_source
# analytic profile class in multi plane
self._lensmodel = LensModel(
lens_model_list=["NFW", "NFW", "NFW"],
lens_redshift_list=redshift_list,
multi_plane=True,
z_source_convention=z_source,
cosmo=cosmo,
z_source=z_source,
)
# a single plane class from which the convergence/mass maps are computeded
single_plane = LensModel(lens_model_list=["NFW"], multi_plane=False)
# multi-plane class with three interpolation grids
self._lens_model_interp = LensModel(
lens_model_list=["INTERPOL", "INTERPOL", "INTERPOL"],
lens_redshift_list=redshift_list,
multi_plane=True,
z_source_convention=z_source,
cosmo=cosmo,
z_source=z_source,
)
# deflector parameterisation in units of reduced deflection angles to the source convention redshift
logM_200_list = [8, 9, 10] # log 10 halo masses of the three deflectors
c_list = [20, 10, 8] # concentrations of the three halos
kwargs_lens = []
kwargs_lens_interp = []
grid_spacing = 0.01 # spacing of the convergence grid in units arc seconds
x_grid, y_grid = util.make_grid(
numPix=500, deltapix=grid_spacing
) # we create the grid coordinates centered at zero
x_axes, y_axes = util.get_axes(
x_grid, y_grid
) # we need the axes only for the interpolation
mass_map_list = []
grid_spacing_list_mpc = []
for i, z in enumerate(redshift_list): # loop through the three deflectors
lens_cosmo = LensCosmo(
z_lens=z, z_source=z_source, cosmo=cosmo
) # instance of LensCosmo, a class that manages cosmology relevant quantities of a lens
alpha_Rs, Rs = lens_cosmo.nfw_physical2angle(
M=10 ** (logM_200_list[i]), c=c_list[i]
) # we turn the halo mass and concentration in reduced deflection angles and angles on the sky
kwargs_nfw = {
"Rs": Rs,
"alpha_Rs": alpha_Rs,
"center_x": 0,
"center_y": 0,
} # lensing parameters of the NFW profile in lenstronomy conventions
kwargs_lens.append(kwargs_nfw)
kappa_map = single_plane.kappa(
x_grid, y_grid, [kwargs_nfw]
) # convergence map of a single NFW profile
kappa_map = util.array2image(kappa_map)
mass_map = (
lens_cosmo.sigma_crit_angle * kappa_map * grid_spacing**2
) # projected mass per pixel on the gird
mass_map_list.append(mass_map)
npt.assert_almost_equal(
np.log10(np.sum(mass_map)), logM_200_list[i], decimal=0
) # check whether the sum of mass roughtly correspoonds the mass definition
grid_spacing_mpc = lens_cosmo.arcsec2phys_lens(
grid_spacing
) # turn grid spacing from arcseconds into Mpc
grid_spacing_list_mpc.append(grid_spacing_mpc)
f_x, f_y = convergence_integrals.deflection_from_kappa_grid(
kappa_map, grid_spacing
) # perform the deflection calculation from the convergence map
f_ = convergence_integrals.potential_from_kappa_grid(
kappa_map, grid_spacing
) # perform the lensing potential calculation from the convergence map (attention: arbitrary normalization)
kwargs_interp = {
"grid_interp_x": x_axes,
"grid_interp_y": y_axes,
"f_": f_,
"f_x": f_x,
"f_y": f_y,
} # keyword arguments of the interpolation model
kwargs_lens_interp.append(kwargs_interp)
self.kwargs_lens = kwargs_lens
self.kwargs_lens_interp = kwargs_lens_interp
self.lightCone = LightCone(
mass_map_list, grid_spacing_list_mpc, redshift_list
) # here we make the instance of the LightCone class based on the mass map, physical grid spacing and redshifts. | [
102,
103
] |
def METHOD_NAME(self):
"""
test the user list comparison method
"""
user_conditions = (
# exact name match
"Hugo, "
# negative test
"!Emma, "
# wildcard realm test
"*@realm, "
# wildcard name test
"a*, "
# negative wildcad name test
"!*z"
)
hugo = User("Hugo", "realm")
match_type, match = user_list_compare(user_conditions, hugo)
assert match
assert match_type == "exact:match"
emma = User("Emma")
match_type, match = user_list_compare(user_conditions, emma)
assert not match
assert match_type == "not:match"
betonz = User("betonz", "realm")
match_type, match = user_list_compare(user_conditions, betonz)
assert not match
assert match_type == "not:match"
wanda = User("wanda", "realm")
match_type, match = user_list_compare(user_conditions, wanda)
assert match
assert match_type == "regex:match"
wanda2 = "wanda@realm"
match_type, match = user_list_compare(user_conditions, wanda2)
assert match
assert match_type == "regex:match"
return | [
9,
21,
979
] |
def METHOD_NAME(self):
return True | [
220,
437,
672
] |
def METHOD_NAME(self, value, skel: 'SkeletonInstance', name: str, parentIndexed: bool):
if not self.caseSensitive and parentIndexed:
return {"val": value, "idx": value.lower() if isinstance(value, str) else None}
return value | [
97,
99,
183
] |
def METHOD_NAME():
input_data = np.random.random((10, 3, 4)).astype(np.float32)
test_utils.layer_test(
spectral_normalization.SpectralNormalization,
kwargs={"layer": tf.keras.layers.Dense(2), "input_shape": (3, 4)},
input_data=input_data,
) | [
9,
4098
] |
def METHOD_NAME():
with pytest.raises(ValueError):
stats.cash(10, 10, 0.0) | [
9,
3513,
1068,
3514
] |
def METHOD_NAME(self, namespace, test_set):
self.as_connection.truncate(namespace, test_set, 0)
self._assert_truncation_status(self.truncated_keys, exists=False) | [
9,
2605,
0,
774,
3514
] |
def METHOD_NAME(serv=None, mes=None):
create_log_data = {'service': serv, 'message': mes}
url = "http://{}:8087/logs".format(LOGS)
response = requests.post(
url, data=json.dumps(create_log_data),
headers={'Content-Type': 'application/json'}
)
assert response.status_code == 200
return "success" | [
1099
] |
def METHOD_NAME(self):
"""
Returns the else-clause.
:return: the else-clause.
:rtype: ASTElseClause
"""
return self.else_clause | [
19,
4227,
1177
] |
def METHOD_NAME(self) -> None:
if self.result.label == "undefined":
logging.info("Result is undefined. Skipping figure creation.")
return
if self.area_sqkm < 10:
max_area = 10
else:
max_area = round(self.area_sqkm * 2 / 10) * 10
# Create x-axis data
x = np.linspace(0, max_area, 2)
# Calculate y-axis data for thresholds
y1 = [self.green_threshold_function(xi) for xi in x]
y2 = [self.yellow_threshold_function(xi) for xi in x]
# Calculate y-axis data for fill_between areas
fill_1 = np.maximum(y2, 0)
fill_2 = np.maximum(y1, 0)
fill_3 = np.maximum(y1[1], np.array([self.count, self.count]))
# Create figure and add traces
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=x,
y=fill_1,
mode="lines",
fill="tonexty",
line=dict(color="red"),
showlegend=False,
)
)
fig.add_trace(
go.Scatter(
x=x,
y=fill_2,
mode="lines",
fill="tonexty",
line=dict(color="yellow"),
showlegend=False,
)
)
fig.add_trace(
go.Scatter(
x=x,
y=fill_3,
mode="lines",
fill="tonexty",
line=dict(color="green"),
showlegend=False,
)
)
fig.add_trace(
go.Scatter(
x=x,
y=y2,
mode="lines",
line=dict(dash="dot", color="black"),
name="Threshold B",
)
)
fig.add_trace(
go.Scatter(
x=x,
y=y1,
mode="lines",
line=dict(dash="dash", color="black"),
name="Threshold A",
)
)
fig.add_trace(
go.Scatter(
x=[self.area_sqkm],
y=[self.count],
mode="markers",
marker=dict(symbol="circle", size=10, color="black"),
name="Location",
)
)
# Update layout
fig.update_layout(
title="Density (Features per Area)",
xaxis_title="Area (km²)",
yaxis_title="Features",
)
raw = fig.to_dict()
raw["layout"].pop("template") # remove boilerplate
self.result.figure = raw | [
129,
7014
] |
def METHOD_NAME(self, poly):
if "points" in poly.attrib:
self._start_path("M" + poly.attrib["points"])
self._end_path() | [
214,
1117
] |
def METHOD_NAME(course):
value = get_upload_folder(course)
if value:
return value
return _add_panopto_folder(
course, settings.PANOPTO_PARENT_FOLDER, UPLOAD_FOLDER_KEY) | [
238,
172,
451
] |
def METHOD_NAME(nodes):
"""Return a native Python type from the list of compiled nodes. If the
result is a single node, its value is returned. Otherwise, the nodes are
concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
string is returned.
https://github.com/pallets/jinja/blob/master/src/jinja2/nativetypes.py
"""
head = list(islice(nodes, 2))
if not head:
return None
if len(head) == 1:
out = head[0]
# TODO send unvaulted data to literal_eval?
if isinstance(out, AnsibleVaultEncryptedUnicode):
return out.data
if isinstance(out, NativeJinjaText):
# Sometimes (e.g. ``| string``) we need to mark variables
# in a special way so that they remain strings and are not
# passed into literal_eval.
# See:
# https://github.com/ansible/ansible/issues/70831
# https://github.com/pallets/jinja/issues/1200
# https://github.com/ansible/ansible/issues/70831#issuecomment-664190894
return out
# short-circuit literal_eval for anything other than strings
if not isinstance(out, string_types):
return out
else:
if isinstance(nodes, GeneratorType):
nodes = chain(head, nodes)
out = ''.join([to_text(v) for v in nodes])
try:
evaled = ast.literal_eval(
# In Python 3.10+ ast.literal_eval removes leading spaces/tabs
# from the given string. For backwards compatibility we need to
# parse the string ourselves without removing leading spaces/tabs.
ast.parse(out, mode='eval')
)
except (ValueError, SyntaxError, MemoryError):
return out
if isinstance(evaled, string_types):
quote = out[0]
return f'{quote}{evaled}{quote}'
return evaled | [
4090,
1577,
2008
] |
def METHOD_NAME(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/default_global_naming_convention.yaml') | [
9,
235,
285,
6902,
8921
] |
def METHOD_NAME(app):
app.builder.build_all()
assert (app.outdir / 'prolog_epilog_substitution.pot').is_file()
pot = (app.outdir / 'prolog_epilog_substitution.pot').read_text(encoding='utf8')
msg_ids = list(filter(None, map(msgid_getter, pot.splitlines())))
assert msg_ids == [
"i18n with prologue and epilogue substitutions",
"This is content that contains |subst_prolog_1|.",
"Substituted image |subst_prolog_2| here.",
"subst_prolog_2",
".. image:: /img.png",
"This is content that contains |subst_epilog_1|.",
"Substituted image |subst_epilog_2| here.",
"subst_epilog_2",
".. image:: /i18n.png",
] | [
9,
6316,
5830,
6633,
4282
] |
def METHOD_NAME(outerForm, appender):
if outerForm is not None:
innerForm = outerForm + '-' + appender
else:
innerForm = appender
return innerForm | [
1459,
1029
] |
def METHOD_NAME(self):
res = self.hist_bins.text().toInt()
if(res[1]):
self.snk.METHOD_NAME(res[0]) | [
0,
4704
] |
f METHOD_NAME(self, x): | [
303,
667
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self, sock: socket.socket) -> Iterator[ChatMessageItem]:
while True:
try:
data = sock.recv(self.buffer_size)
if not data:
continue
resp = data.decode(ENCODING)
now = dt.datetime.now()
except socket.timeout as e:
raise TimeoutError(f"Twitch did not respond in {self.timeout:,d} seconds") from e
except UnicodeDecodeError:
continue
if self._is_ping(resp):
self._send(sock, IrcMessage.PONG, ":tmi.twitch.tv")
yield from self._extract_chat_messages(resp, now) | [
370,
1768
] |
def METHOD_NAME(cls):
"""
Create API client on creation of class.
"""
cls.client = TestWorkflow.start()
cls.httpd = HTTPServer(("127.0.0.1", 8001), RequestHandler)
server = Thread(target=cls.httpd.serve_forever)
server.setDaemon(True)
server.start() | [
0,
1,
2
] |
def METHOD_NAME(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, "-std=c++14"):
return "-std=c++14"
elif has_flag(compiler, "-std=c++11"):
return "-std=c++11"
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!') | [
7728,
584
] |
def METHOD_NAME(A: dace.float32[N], B: dace.float32[N]):
for i in range(N):
B[i] = A[i] | [
2069,
43
] |
def METHOD_NAME(name, kwargs):
"""Checks if the deprecated argument is in kwargs
Raises warning, if present.
Args:
name: name of deprecated argument
kwargs: keyword arguments dict
"""
if name in kwargs:
removed_warning(name) | [
674,
1475
] |
def METHOD_NAME(self, timeout: float) -> None:
self._timeout = timeout | [
0,
659
] |
def METHOD_NAME(cls, config=None):
_config = {} if config is None else config
return SpecAugmentTransform(
_config.get("time_warp_W", 0),
_config.get("freq_mask_N", 0),
_config.get("freq_mask_F", 0),
_config.get("time_mask_N", 0),
_config.get("time_mask_T", 0),
_config.get("time_mask_p", 0.0),
_config.get("mask_value", None),
) | [
280,
200,
553
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(self, path=None, skip_tls_verify=False):
"""
creates kubeconfig file for the cluster
Args:
path (str): Path to create kubeconfig file
skip_tls_verify (bool): True to bypass the certificate check
and use insecure connections
"""
path = path or self.kubeconfig_path
cmd = f"{config.ENV_DATA['ms_prod_oc_login']} --kubeconfig {path}"
if skip_tls_verify:
cmd = f"{cmd} --insecure-skip-tls-verify"
exec_cmd(cmd) | [
567,
17326,
171
] |
def METHOD_NAME(self, chromosome):
"""Add the collected segment information to a chromosome for drawing.
Arguments:
- chromosome - A Chromosome graphics object that we can add
chromosome segments to.
This creates ChromosomeSegment (and TelomereSegment) objects to
fill in the chromosome. The information is derived from the
label and count information, with counts transformed to the
specified color map.
Returns the chromosome with all of the segments added.
"""
for seg_num in range(len(self._names)):
is_end_segment = 0
# make the top and bottom telomeres
if seg_num == 0:
cur_segment = TelomereSegment()
is_end_segment = 1
elif seg_num == len(self._names) - 1:
cur_segment = TelomereSegment(1)
is_end_segment = 1
# otherwise, they are just regular segments
else:
cur_segment = ChromosomeSegment()
seg_name = self._names[seg_num]
if self._count_info[seg_name] > 0:
color = self._color_from_count(self._count_info[seg_name])
cur_segment.fill_color = color
if self._label_info[seg_name] is not None:
cur_segment.label = self._label_info[seg_name]
# give end segments extra size so they look right
if is_end_segment:
cur_segment.scale = 3
else:
cur_segment.scale = self._scale_info[seg_name]
chromosome.add(cur_segment)
return chromosome | [
1917,
13580
] |
def METHOD_NAME(request):
"""List all active projects."""
METHOD_NAME = (
Project.objects.visible()
.visible_for(request.user)
.prefetch_related("latest_translation__user")
.order_by("name")
)
if not METHOD_NAME:
return render(request, "no_projects.html", {"title": "Projects"})
return render(
request,
"projects/projects.html",
{"projects": METHOD_NAME, "top_instances": METHOD_NAME.get_top_instances()},
) | [
2847
] |
def METHOD_NAME(self):
rst1 = ERst(test_path("data/SPE9.UNRST"))
# get first occurrence of ZWEL, report step 37
zwel1 = rst1[11, 37]
zwel2 = rst1["ZWEL",37, 0]
zwel3 = rst1["ZWEL",37]
for v1,v2 in zip (zwel1, zwel2):
self.assertEqual(v1, v2)
for v1,v2 in zip (zwel1, zwel3):
self.assertEqual(v1, v2)
self.assertEqual(len(zwel1), 78)
self.assertEqual(zwel1[0], "INJE1")
self.assertEqual(zwel1[3], "PRODU2")
self.assertEqual(zwel1[6], "PRODU3")
# get first occurrence of INTEHEAD, report step 37
inteh = rst1["INTEHEAD",37]
self.assertEqual(len(inteh), 411)
self.assertTrue(isinstance(inteh, np.ndarray))
self.assertEqual(inteh.dtype, "int32")
self.assertEqual(inteh[1], 201702)
self.assertEqual(inteh[9], 25)
self.assertEqual(inteh[64], 6)
self.assertEqual(inteh[65], 1)
self.assertEqual(inteh[66], 2016)
# get first occurrence of PRESSURE, report step 74
pres74 = rst1["PRESSURE",74]
self.assertTrue(isinstance(pres74, np.ndarray))
self.assertEqual(pres74.dtype, "float32")
self.assertEqual(len(pres74), 9000)
self.assertAlmostEqual(pres74[0], 2290.9192, 4)
self.assertAlmostEqual(pres74[1], 2254.6619, 4)
self.assertAlmostEqual(pres74[2], 2165.5347, 4)
self.assertAlmostEqual(pres74[3], 1996.2598, 4)
xcon = rst1["XCON", 74]
self.assertTrue(isinstance(xcon, np.ndarray))
self.assertEqual(xcon.dtype, "float64")
self.assertEqual(len(xcon), 7540)
self.assertAlmostEqual(xcon[1], -22.841887080742975, 10)
logih = rst1["LOGIHEAD", 74]
self.assertTrue(isinstance(logih, np.ndarray))
self.assertEqual(len(logih), 121)
for b1, b2 in zip([True, True, False, False, False], logih[0:5]):
self.assertEqual(b1, b2) | [
9,
5181
] |
async def METHOD_NAME() -> None:
"""It should be possible to clear the option list of all content."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 5
option_list.clear_options()
assert option_list.option_count == 0 | [
9,
537,
1335,
245
] |
async def METHOD_NAME(request: Request) -> FastAPIResponse:
request = Request(request.scope, request.receive)
return await original_route_handler(request) | [
343,
2476,
1519
] |
def METHOD_NAME(self):
cube_small = self.cube_a[:, 0, 0]
cube_small_masked = cube_small.copy()
cube_small_masked.data = ma.array(
cube_small.data, mask=np.array([0, 0, 0, 1, 1, 1], dtype=bool)
)
r = stats.pearsonr(cube_small, cube_small_masked, common_mask=True)
self.assertArrayAlmostEqual(r.data, np.array([1.0])) | [
9,
67,
361,
53
] |
def METHOD_NAME(
project_name, repre_doc=None, repre_id=None, link_type=None, max_depth=None
):
"""Returns list of linked ids of particular type (if provided).
One of representation document or representation id must be passed.
Note:
Representation links now works only from representation through version
back to representations.
Todos:
Missing depth query. Not sure how it did find more representations in
depth, probably links to version?
Args:
project_name (str): Name of project where look for links.
repre_doc (Dict[str, Any]): Representation document.
repre_id (Union[ObjectId, str]): Representation id.
link_type (str): Type of link (e.g. 'reference', ...).
max_depth (int): Limit recursion level. Default: 0
Returns:
List[ObjectId] Linked representation ids.
"""
if repre_doc:
repre_id = repre_doc["_id"]
if not repre_id and not repre_doc:
return []
version_id = None
if repre_doc:
version_id = repre_doc.get("parent")
if not version_id:
repre_doc = get_representation_by_id(
project_name, repre_id, fields=["parent"]
)
if repre_doc:
version_id = repre_doc["parent"]
if not version_id:
return []
if max_depth is None or max_depth == 0:
max_depth = 1
link_types = None
if link_type:
link_types = [link_type]
# Store already found version ids to avoid recursion, and also to store
# output -> Don't forget to remove 'version_id' at the end!!!
linked_version_ids = {version_id}
# Each loop of depth will reset this variable
versions_to_check = {version_id}
for _ in range(max_depth):
if not versions_to_check:
break
links = get_versions_links(
project_name,
versions_to_check,
link_types=link_types,
link_direction="out")
versions_to_check = set()
for link in links:
# Care only about version links
if link["entityType"] != "version":
continue
entity_id = link["entityId"]
# Skip already found linked version ids
if entity_id in linked_version_ids:
continue
linked_version_ids.add(entity_id)
versions_to_check.add(entity_id)
linked_version_ids.remove(version_id)
if not linked_version_ids:
return []
representations = ayon_api.get_representations(
project_name,
version_ids=linked_version_ids,
fields=["id"])
return [
repre["id"]
for repre in representations
] | [
19,
4643,
1504,
147
] |
def METHOD_NAME(self):
import_helper.import_module("winreg")
returncode, events, stderr = self.run_python("test_winreg")
if returncode:
self.fail(stderr)
self.assertEqual(events[0][0], "winreg.OpenKey")
self.assertEqual(events[1][0], "winreg.OpenKey/result")
expected = events[1][2]
self.assertTrue(expected)
self.assertSequenceEqual(["winreg.EnumKey", " ", f"{expected} 0"], events[2])
self.assertSequenceEqual(["winreg.EnumKey", " ", f"{expected} 10000"], events[3])
self.assertSequenceEqual(["winreg.PyHKEY.Detach", " ", expected], events[4]) | [
9,
-1
] |
def METHOD_NAME(self):
"""Test _create_msg_for_version_check"""
# No need for additional test for successful case
# Error case: bad error message pattern
bad_patterns = [
"no param",
"one param %s",
"three params %s %s %s"
]
for bad_pattern in bad_patterns:
message = _create_msg_for_version_check(bad_pattern, Version('2.2'), Version('2.2'))
self.assertEqual(
message,
bad_pattern + " -> See https://fedbiomed.org/latest/user-guide/deployment/versions for more information") | [
9,
295,
6881,
277,
43,
281,
250
] |
METHOD_NAME(self, name, key, generator=None, shared=False): | [
15986
] |
def METHOD_NAME(self, x, K, s):
return K * np.exp(-0.5 * (x / s) ** 2) / (s * np.sqrt(2 * np.pi)) | [
717
] |
def METHOD_NAME(self, current_datetime):
return self.publish_date is not None and self.publish_date < current_datetime | [
137,
2999
] |
def METHOD_NAME(self, r):
return np.sqrt(r[0] * r[0] + r[1] * r[1] + r[2] * r[2]) | [
4767
] |
f METHOD_NAME(self): | [
9,
940,
24,
4940,
432,
69,
3224
] |
def METHOD_NAME():
return str(int(time.time())) | [
9,
147
] |
def METHOD_NAME(self, timeout):
with error_check("Cannot receive message"):
frame = self.interface.recv(int(timeout * 1000))
if frame is None:
# timeout occurred
return None, False
msg = Message(
arbitration_id=frame["id"],
is_extended_id=frame["extended"],
timestamp=frame["timestamp"],
is_remote_frame=frame["rtr"],
dlc=frame["dlc"],
data=frame["data"][: frame["dlc"]],
channel=frame["channel"],
is_rx=(not frame["loopback"]), # received if not loopback frame
)
return msg, False | [
1398,
2026
] |
def METHOD_NAME(relay_server, wandb_init):
with relay_server() as relay:
run = wandb_init(settings=dict(console="off"))
run.log({"a": 1})
run.mark_preempting()
# poll for message arrival
for _ in range(3):
preempting = relay.context.entries[run.id].get("preempting")
if preempting:
break
time.sleep(1)
assert any(preempting)
run.finish() | [
9,
1798,
24,
1798,
-1,
2499,
298
] |
def METHOD_NAME(self) -> DBDiff:
return self._track_diff.diff() | [
2443
] |
def METHOD_NAME(self, video_id, episode):
path = f'/api/v6.2_w/stream/vod/{video_id}/{episode}/auto_vip'
timestamp = int(time.time()) + 10800
t = hashlib.md5(f'WEBv6Dkdsad90dasdjlALDDDS{timestamp}{path}'.encode()).hexdigest().upper()
r = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
n = [int(f'0x{t[2 * o: 2 * o + 2]}', 16) for o in range(len(t) // 2)]
def convert(e):
t = ''
n = 0
i = [0, 0, 0]
a = [0, 0, 0, 0]
s = len(e)
c = 0
for z in range(s, 0, -1):
if n <= 3:
i[n] = e[c]
n += 1
c += 1
if 3 == n:
a[0] = (252 & i[0]) >> 2
a[1] = ((3 & i[0]) << 4) + ((240 & i[1]) >> 4)
a[2] = ((15 & i[1]) << 2) + ((192 & i[2]) >> 6)
a[3] = (63 & i[2])
for v in range(4):
t += r[a[v]]
n = 0
if n:
for o in range(n, 3):
i[o] = 0
for o in range(n + 1):
a[0] = (252 & i[0]) >> 2
a[1] = ((3 & i[0]) << 4) + ((240 & i[1]) >> 4)
a[2] = ((15 & i[1]) << 2) + ((192 & i[2]) >> 6)
a[3] = (63 & i[2])
t += r[a[o]]
n += 1
while n < 3:
t += ''
n += 1
return t
st_token = convert(n).replace('+', '-').replace('/', '_').replace('=', '')
return f'https://api.fptplay.net{path}?{urllib.parse.urlencode({"st": st_token, "e": timestamp})}' | [
19,
58,
41,
1780,
466
] |
def METHOD_NAME(self):
self.pre_operations()
yield self.RouteTablesDelete(ctx=self.ctx)()
self.post_operations() | [
750,
710
] |
def METHOD_NAME(self, *args, **kwargs):
"""
Pretends to do the conversion, by copying the input file
to the output file
"""
shutil.copy2(
Path(kwargs["input_file"].rstrip("[0]")),
Path(kwargs["output_file"]),
) | [
5042,
197,
146
] |
def METHOD_NAME(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(keycode: int) -> HIDEvent:
return HIDPress(action=HIDKey(keycode=keycode), direction=HIDDirection.DOWN) | [
59,
481,
417
] |
def METHOD_NAME(request, module_target_sat, module_capsule_configured):
if settings.remotedb.server:
yield Satellite(settings.remotedb.server)
else:
module_target_sat.register_to_cdn(pool_ids=settings.subscription.fm_rhn_poolid.split())
hosts = {'satellite': module_target_sat, 'capsule': module_capsule_configured}
yield hosts[request.param] | [
10270,
10887
] |
def METHOD_NAME(self, **client_settings):
addr, stop = self.start_server()
client = self.start_client(addr, **client_settings)
self._client_assertions(client, addr, **client_settings)
client.close()
stop() | [
9,
235,
-1,
200
] |
def METHOD_NAME(self, **kwargs: Any) -> Iterable["_models.Sku"]:
"""Lists the available SKUs supported by Microsoft.Storage for given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Sku or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2018_11_01.models.Sku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2018-11-01"))
cls: ClsType[_models.StorageSkuListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StorageSkuListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data) | [
245
] |
def METHOD_NAME(A, V):
n = A.shape[0]
invert(A, V) | [
283,
-1
] |
def METHOD_NAME(self):
self._test_parse_create()
file_path = os.path.join(os.path.dirname(__file__), "data", "v2", "basic_noop.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
case = submit_form_locally(xml_data, 'test-domain').case
self.assertFalse(case.closed)
self.assertEqual("bar-user-id", case.user_id)
self.assertEqual(datetime(2011, 12, 7, 13, 44, 50), case.modified_on)
self.assertEqual(2, len(case.xform_ids)) | [
9,
214,
1952
] |
def METHOD_NAME(
self,
result_variable="TOF",
selection_mode="FLOW_TR_BY_SELECTION",
injectors=None,
producers=None,
):
"""Apply a flow diagnostics cell result
**Parameters**::
Parameter | Description | Type
------------------- | ------------------------------------------------------ | -----
result_variable | String representing the result value | String
selection_mode | String specifying which tracers to select | String
injectors | List of injector names, used by 'FLOW_TR_BY_SELECTION' | String List
producers | List of injector names, used by 'FLOW_TR_BY_SELECTION' | String List
**Enum compdat_export**::
Option | Description
------------------------| ------------
"TOF" | Time of flight
"Fraction" | Fraction
"MaxFractionTracer" | Max Fraction Tracer
"Communication" | Communication
"""
if injectors is None:
injectors = []
if producers is None:
producers = []
cell_result = self.cell_result()
cell_result.result_type = "FLOW_DIAGNOSTICS"
cell_result.result_variable = result_variable
cell_result.flow_tracer_selection_mode = selection_mode
if selection_mode == "FLOW_TR_BY_SELECTION":
cell_result.selected_injector_tracers = injectors
cell_result.selected_producer_tracers = producers
cell_result.update() | [
231,
233,
7643,
118,
1571
] |
def METHOD_NAME(self, r, g, b, **kwargs):
"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
**kwargs :
Forwarded to `~.Axes.imshow` calls for the four images.
Returns
-------
rgb : `~matplotlib.image.AxesImage`
r : `~matplotlib.image.AxesImage`
g : `~matplotlib.image.AxesImage`
b : `~matplotlib.image.AxesImage`
"""
if not (r.shape == g.shape == b.shape):
raise ValueError(
f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')
RGB = np.dstack([r, g, b])
R = np.zeros_like(RGB)
R[:, :, 0] = r
G = np.zeros_like(RGB)
G[:, :, 1] = g
B = np.zeros_like(RGB)
B[:, :, 2] = b
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b | [
7120,
2310
] |
def METHOD_NAME(self, data):
if self._writer is not None:
self._writer.METHOD_NAME(data) | [
77,
771
] |
def METHOD_NAME(
name,
asn1_file,
prefix):
"""Create a CC library of generated asn1 files.
This library wraps up generated files from these 3 actions:
1. generate .c/.h files by running asn1c with the given input: asn1_file
2. remove all non .c/.h files
3. apply string substitutions
Args:
name: the name of rule
asn1_file: relative path to the .asn1 file that will be passed to asn1c
prefix: value that is set to ASN1C_PREFIX
"""
gen_name = name + "_genrule"
flags = "-pdu=all -fcompound-names -fno-include-deps -gen-PER -no-gen-example"
# Taken from https://github.com/magma/magma/blob/14c1cf643a61d576b3d24642e17ed3911d19210d/lte/gateway/c/core/oai/tasks/s1ap/CMakeLists.txt#L35
# The original PR (PR2707) doesn't give an explanation on why this is necessary.
# I'm guessing it is to avoid the following GCC error: To avoid the following GCC warning: integer constant is so large that it is unsigned
substitutions = {
"18446744073709551615": "18446744073709551615u",
}
gen_with_asn1c(
name = gen_name,
asn1_file = asn1_file,
flags = flags,
prefix = prefix,
substitutions = substitutions,
)
cc_library(
name = name,
srcs = [gen_name],
# This is needed so that the CCInfo (header/include info) can be used
deps = [gen_name],
# Dynamically linking this library is currently broken
# linkstatic=True here forces only a .a file to be produced, forcing this library to be linked statically
linkstatic = True,
) | [
1298,
-1,
3106
] |
def METHOD_NAME(date_time: datetime) -> str:
"""Return the date in legislation timezone as a string."""
date_time = LegislationDatetime.as_legislation_timezone(date_time)
return date_time.strftime('%Y-%m-%d') | [
275,
947,
15581,
153
] |
def METHOD_NAME(n: int, d: float = 1.0) -> TensorType:
"""fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"""
raise NotImplementedError() | [
3073,
4548,
-1
] |
def METHOD_NAME(self, x):
bs = x.size(0)
x = self.first_conv(x)
x = self.features(x)
x = self.conv_last(x)
x = self.globalpool(x)
x = self.dropout(x)
x = x.contiguous().view(bs, -1)
x = self.classifier(x)
return x | [
76
] |
def METHOD_NAME(self, i):
"""
There are two standard steps for each iteration: expectation (E) and
minimization (M). The E-step (assignment) is performed with an exhaustive
search and the M-step (centroid computation) is performed with
the exact solution.
Args:
- i: step number
Remarks:
- The E-step heavily uses PyTorch broadcasting to speed up computations
and reduce the memory overhead
"""
# assignments (E-step)
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
n_empty_clusters = self.resolve_empty_clusters()
# centroids (M-step)
for k in range(self.n_centroids):
W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k)
self.centroids[k] = W_k.mean(dim=1) # (in_features)
# book-keeping
obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item()
self.objective.append(obj)
if self.verbose:
logging.info(
f"Iteration: {i},\t"
f"objective: {obj:.6f},\t"
f"resolved empty clusters: {n_empty_clusters}"
) | [
367
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.