text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME():
link_line = ""
import tempfile
cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
with open("CMakeLists.txt", "w") as f:
f.write("cmake_minimum_required(VERSION 3.10)\nproject(DACE_findMPI)\nfind_package(MPI)\n")
os.mkdir('build')
os.chdir(os.path.join(tmp_dir, 'build'))
os.system("cmake ..")
os.system("cmake ..")
script = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'intel_mkl_mpich.cmake')
output = os.popen(f"cmake -C {script}")
line = output.readline()
while not line.startswith('--'):
line = output.readline()
link_line = line[2:].strip('\n')
print(link_line, flush=True)
os.chdir(cwd)
libpath = IntelMKLScaLAPACKMPICH._find_mkl_lib_path()
return [
f"-L {libpath} -lmkl_scalapack_lp64 -Wl,--no-as-needed -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core -lmkl_blacs_intelmpi_lp64 {link_line} -lgomp -lpthread -lm -ldl"
] | [
334,
548,
1106
] |
def METHOD_NAME( a, resize = 1.5, angle = 20., interpolation = "linear", blocks = (16,16,1) ):
"""
Rotates the array. The new array has the new size and centers the
picture in the middle.
a - array (2-dim)
resize - new_image w/old_image w
angle - degrees to rotate the image
interpolation - "linear" or None
blocks - given to the kernel when run
returns: a new array with dtype=uint8 containing the rotated image
"""
angle = angle/180. *pi
# Convert this image to float. Unsigned int texture gave
# strange results for me. This conversion is slow though :(
a = a.astype("float32")
# Calculate the dimensions of the new image
calc_x = lambda x_y: (x_y[0]*a.shape[1]/2.*cos(angle)-x_y[1]*a.shape[0]/2.*sin(angle))
calc_y = lambda x_y1: (x_y1[0]*a.shape[1]/2.*sin(angle)+x_y1[1]*a.shape[0]/2.*cos(angle))
xs = [ calc_x(p) for p in [ (-1.,-1.),(1.,-1.),(1.,1.),(-1.,1.) ] ]
ys = [ calc_y(p) for p in [ (-1.,-1.),(1.,-1.),(1.,1.),(-1.,1.) ] ]
new_image_dim = (
int(numpy.ceil(max(ys)-min(ys))*resize),
int(numpy.ceil(max(xs)-min(xs))*resize),
)
# Now generate the cuda texture
cuda.matrix_to_texref(a, texref, order="C")
# We could set the next if we wanted to address the image
# in normalized coordinates ( 0 <= coordinate < 1.)
# texref.set_flags(cuda.TRSF_NORMALIZED_COORDINATES)
if interpolation == "linear":
texref.set_filter_mode(cuda.filter_mode.LINEAR)
# Calculate the gridsize. This is entirely given by the size of our image.
gridx = new_image_dim[0]/blocks[0] if \
new_image_dim[0]%blocks[0]==1 else new_image_dim[0]/blocks[0] +1
gridy = new_image_dim[1]/blocks[1] if \
new_image_dim[1]%blocks[1]==0 else new_image_dim[1]/blocks[1] +1
# Get the output image
output = numpy.zeros(new_image_dim,dtype="uint8")
# Call the kernel
copy_texture_func(
numpy.float32(resize), numpy.float32(angle),
numpy.uint16(a.shape[1]), numpy.uint16(a.shape[0]),
numpy.uint16(new_image_dim[1]), numpy.uint16(new_image_dim[0]),
cuda.Out(output),texrefs=[texref],block=blocks,grid=(gridx,gridy))
return output | [
1672,
660
] |
def METHOD_NAME(x, N):
return np.convolve(x, np.ones((N,))/N, mode='same') | [
1340,
314
] |
def METHOD_NAME(
rank: int,
rampup_batch_size: Optional[List[int]],
global_batch_size: int,
micro_batch_size: int,
data_parallel_size: int,
):
# Constant num micro-batches.
if rampup_batch_size is None:
num_microbatches_calculator = ConstantNumMicroBatches(
global_batch_size, micro_batch_size, data_parallel_size
)
if rank == 0:
_logger.info(
"setting number of micro-batches to constant {}".format(
num_microbatches_calculator.get()
)
)
else:
assert len(rampup_batch_size) == 3, (
"expected the following "
"format: --rampup-batch-size <start batch size> "
"<batch size incerement> <ramp-up samples>"
)
start_batch_size = int(rampup_batch_size[0])
batch_size_increment = int(rampup_batch_size[1])
ramup_samples = int(rampup_batch_size[2])
if rank == 0:
_logger.info(
"will use batch size rampup starting from global batch "
"size {} to global batch size {} with batch size increments "
"{} over {} samples.".format(
start_batch_size,
global_batch_size,
batch_size_increment,
ramup_samples,
)
)
num_microbatches_calculator = RampupBatchsizeNumMicroBatches(
start_batch_size,
batch_size_increment,
ramup_samples,
global_batch_size,
micro_batch_size,
data_parallel_size,
)
return num_microbatches_calculator | [
56,
181,
-1,
8688
] |
def METHOD_NAME(
http: type[httpretty.httpretty], uploader: Uploader
) -> None:
http.register_uri(http.POST, "https://foo.com", status=403, body="Unauthorized")
with pytest.raises(UploadError):
uploader.upload("https://foo.com", skip_existing=True) | [
9,
7332,
2423,
1153,
-1,
-1,
1096
] |
def METHOD_NAME() -> Dict[str, int]:
with stats.get_connection() as cursor:
cursor.execute('''
SELECT max(value) as value, cast(date as date) as date, printer
FROM printer_pages_public
GROUP BY cast(date as date), printer
ORDER BY date ASC, printer ASC
''')
# Resolves the issue of possible missing dates.
# defaultdict(lambda: defaultdict(int)) doesn't work due to inability to pickle local objects like lambdas;
# this effectively does the same thing as that.
pages_printed: Dict[Any, Any] = defaultdict(partial(defaultdict, int))
last_seen: Dict[Any, Any] = {}
for row in cursor:
if row['printer'] in last_seen:
pages_printed.setdefault(row['date'], defaultdict(int))
pages_printed[row['date']][row['printer']] = (
row['value'] - last_seen[row['printer']]
)
last_seen[row['printer']] = row['value']
return pages_printed | [
254,
2735,
1724
] |
def METHOD_NAME(self, operator, time, qubits, label=None):
"""Apply hamiltonian evolution to qubits.
This gate resolves to a :class:`.UnitaryGate` as :math:`U(t) = exp(-i t H)`,
which can be decomposed into basis gates if it is 2 qubits or less, or
simulated directly in Aer for more qubits.
Args:
operator (matrix or Operator): a hermitian operator.
time (float or ParameterExpression): time evolution parameter.
qubits (Union[int, Tuple[int]]): The circuit qubits to apply the
transformation to.
label (str): unitary name for backend [Default: None].
Returns:
QuantumCircuit: The quantum circuit.
Raises:
ExtensionError: if input data is not an N-qubit unitary operator.
"""
if not isinstance(qubits, list):
qubits = [qubits]
return self.append(HamiltonianGate(data=operator, time=time, label=label), qubits, []) | [
1996
] |
def METHOD_NAME(
time_series: np.ndarray, minimum_samples: int = 3
) -> TimeSeriesStatistics:
"""Detect when a time series set has effectively become stationary (i.e has reached
equilibrium).
Parameters
----------
time_series
The time series to analyse with shape=(n_data_points, n_dimensions).
minimum_samples
The minimum number of data points to consider in the calculation.
Notes
-----
This method is based on the paper by J. D. Chodera [1]_ and the implementation at
https://github.com/choderalab/pymbar. Here the code is extended support
multidimensional data such as dipole moments.
References
----------
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the
weighted histogram analysis method for the analysis of simulated and parallel
tempering simulations. JCTC 3(1):26-41, 2007.
Returns
-------
Statistics about the time series.
"""
n_timesteps = time_series.shape[0]
statistical_inefficiency_array = np.ones([n_timesteps - 1])
# Special case if the time series is constant.
if np.isclose(time_series.std(), 0.0):
return TimeSeriesStatistics(
n_total_points=len(time_series),
n_uncorrelated_points=1,
statistical_inefficiency=float(len(time_series)),
equilibration_index=0,
)
effect_samples_array = np.ones([n_timesteps - 1])
for current_timestep in range(0, n_timesteps - 1):
try:
statistical_inefficiency_array[
current_timestep
] = _statistical_inefficiency(
time_series[current_timestep:n_timesteps], minimum_samples
)
except ParameterError:
# Fix for issue https://github.com/choderalab/pymbar/issues/122
statistical_inefficiency_array[current_timestep] = (
n_timesteps - current_timestep + 1
)
effect_samples_array[current_timestep] = (
n_timesteps - current_timestep + 1
) / statistical_inefficiency_array[current_timestep]
equilibration_time = effect_samples_array.argmax()
statistical_inefficiency = statistical_inefficiency_array[equilibration_time]
return TimeSeriesStatistics(
n_total_points=len(time_series),
n_uncorrelated_points=len(
get_uncorrelated_indices(len(time_series), statistical_inefficiency)
),
statistical_inefficiency=float(statistical_inefficiency),
equilibration_index=int(equilibration_time),
) | [
902,
104,
4045
] |
def METHOD_NAME(testpdb, version="nanoBragg", spindle_axis=(1,0,0), phi_start=0, phistep_deg=-1, phisteps=-1, osc_deg=-1):
# crystal
symmetry=extract_from(testpdb)
sg = str(symmetry.space_group_info())
fmat = matrix.sqr(symmetry.unit_cell().fractionalization_matrix())
dxtbx_cryst = Crystal(fmat, sg)
crystal = nanoBragg_crystal.NBcrystal(init_defaults=True)
crystal.isotropic_ncells = False
crystal.dxtbx_crystal = dxtbx_cryst
crystal.Ncells_abc = 10,10,10
crystal.n_mos_domains = 1 # TODO: setting this causes discrepancy
crystal.mos_spread_deg = 0#1
symbol = dxtbx_cryst.get_space_group().info().type().lookup_symbol()
ucell_p = dxtbx_cryst.get_unit_cell().parameters()
miller_data = utils.make_miller_array(symbol, ucell_p, defaultF=1000)
crystal.symbol = miller_data.crystal_symmetry().space_group_info().type().lookup_symbol()
crystal.miller_array = miller_data
# beam
beam = nanoBragg_beam.NBbeam()
beam.size_mm = 0.001
beam.unit_s0 = dxtbx_beam.get_unit_s0()
spectrum = [(dxtbx_beam.get_wavelength(), 1e12)]
beam.spectrum = spectrum
# detector
fsize, ssize = dxtbx_det[0].get_image_size()
pfs = hopper_utils.full_img_pfs((1,ssize,fsize))
# simulator
SIM = sim_data.SimData()
SIM.detector = utils.strip_thickness_from_detector(dxtbx_det)
SIM.detector = dxtbx_det
SIM.crystal = crystal
SIM.beam = beam
SIM.panel_id = 0
def setup_rotation(SIM):
SIM.D.spindle_axis=spindle_axis
SIM.D.phi_deg = phi_start
SIM.D.phistep_deg = phistep_deg
SIM.D.phisteps = phisteps
if version == "nanoBragg":
SIM.instantiate_nanoBragg(oversample=1, device_Id=0, default_F=0)
SIM.D.printout_pixel_fastslow = FAST,SLOW
setup_rotation(SIM)
SIM.D.show_params()
SIM.D.add_nanoBragg_spots()
pix = SIM.D.raw_pixels.as_numpy_array()
SIM.D.free_all()
return pix
else:
SIM.instantiate_diffBragg(oversample=1, device_Id=0, default_F=0)
SIM.D.xray_beams = SIM.beam.xray_beams
ucell_man = utils.manager_from_params(ucell_p)
Bmatrix = ucell_man.B_recipspace
SIM.D.Bmatrix = Bmatrix
npix = int(len(pfs)/3)
SIM.D.printout_pixel_fastslow = FAST,SLOW
setup_rotation(SIM)
SIM.D.show_params()
SIM.D.add_diffBragg_spots_full()
pix = SIM.D.raw_pixels_roi.as_numpy_array()
SIM.D.free_all()
return pix | [
22,
1919
] |
def METHOD_NAME(monkeypatch_session: MonkeyPatch) -> AppConfig:
METHOD_NAME = AppConfig.from_env()
if "test" not in METHOD_NAME.cache.mongo_database or "test" not in METHOD_NAME.queue.mongo_database:
raise ValueError("Test must be launched on a test mongo database")
return METHOD_NAME | [
991,
200
] |
def METHOD_NAME(call: RPCCallBase) -> int:
r"""Registers a call for RPC requests."""
global _rpc_call_id, _rpc_call_pool
with _rpc_call_lock:
call_id = _rpc_call_id
_rpc_call_id += 1
if call_id in _rpc_call_pool:
raise RuntimeError("Registered function twice in 'rpc_register'")
_rpc_call_pool[call_id] = call
return call_id | [
1064,
372
] |
def METHOD_NAME(self, url):
video_id = self._match_id(url)
nuxt_data = self._search_nuxt_data(self._download_webpage(url, video_id), video_id)
parent = {
'id': video_id,
'title': nuxt_data.get('title'),
'descripion': nuxt_data.get('description'),
'uploader': traverse_obj(nuxt_data, ('creator', 'name')),
'uploader_id': traverse_obj(nuxt_data, ('creator', 'screen_name')),
'age_limit': 18 if nuxt_data.get('nsfw') else 0,
'tags': nuxt_data.get('tag_list'),
}
entries = []
for item in nuxt_data.get('previews') or []:
vid_url = item.get('url')
given_ext = traverse_obj(item, ('information', 'extension'))
preview_ext = determine_ext(vid_url, default_ext=None)
if not preview_ext:
content_disposition = parse_qs(vid_url)['response-content-disposition'][0]
preview_ext = self._search_regex(
r'filename="[^"]+\.([^\.]+?)"', content_disposition,
'preview file extension', fatal=False, group=1)
if preview_ext not in ('mp4', 'mp3'):
continue
if not vid_url or not item.get('id'):
continue
width, height = traverse_obj(item, ('information', 'width')), traverse_obj(item, ('information', 'height'))
if width is not None and height is not None:
# the longest side is at most 720px for non-client viewers
max_size = max(width, height)
width, height = list(x * 720 // max_size for x in (width, height))
entries.append({
**parent,
'id': str(item['id']),
'url': vid_url,
'thumbnail': item.get('poster_url'),
'subtitles': {
'jpn': [{
'url': item.get('vtt_url'),
'ext': 'vtt',
}]
} if item.get('vtt_url') else None,
'width': width,
'height': height,
'duration': traverse_obj(item, ('information', 'duration')),
'fps': traverse_obj(item, ('information', 'frame_rate')),
'ext': preview_ext or given_ext,
'vcodec': 'none' if preview_ext == 'mp3' else None,
# you'll always get 128kbps MP3 for non-client viewers
'abr': 128 if preview_ext == 'mp3' else None,
})
if not entries:
raise ExtractorError('No video/audio attachment found in this commission.', expected=True)
elif len(entries) == 1:
return entries[0]
else:
parent.update({
'_type': 'playlist',
'entries': entries,
})
return parent | [
1866,
297
] |
def METHOD_NAME(totype, name):
pat = to_table.get(totype, None)
if pat is None:
return name #"(%s)%s" % (totype, name)
else:
return pat % name | [
24,
2147
] |
def METHOD_NAME(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column(
"id",
Integer,
sa.Identity(start=2, increment=3),
primary_key=True,
),
)
Table("user", m2)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_column")
eq_(diffs[0][2], "user")
c = diffs[0][3]
eq_(c.name, "id")
is_true(isinstance(c.identity, sa.Identity))
eq_(c.identity.start, 2)
eq_(c.identity.increment, 3) | [
9,
188,
2989,
105
] |
def METHOD_NAME(self, run, camcol, field, band):
'''
http://data.sdss3.org/datamodel/files/PHOTO_REDUX/RERUN/RUN/objcs/CAMCOL/fpC.html
band: string ('u', 'g', 'r', 'i', 'z')
'''
f = FpC(run, camcol, field, band)
# ...
fn = self.getFilename('fpC', run, camcol, field, band)
#print 'reading file', fn
p = self._open(fn)
#print 'got', len(p), 'HDUs'
f.image = p[0].data
f.header = p[0].header
return f | [
203,
4371,
2629
] |
def METHOD_NAME(text):
return text.METHOD_NAME() | [
826
] |
def METHOD_NAME():
assert_(not reference_info_zero_affine(),
msg='An all zeros affine should not be valid') | [
9,
75,
3523,
5095
] |
def METHOD_NAME(
resp, # type: Response
link, # type: Link
progress_bar # type: str
):
# type: (...) -> Iterable[bytes]
total_length = _get_http_response_size(resp)
if link.netloc == PyPI.file_storage_domain:
url = link.show_url
else:
url = link.url_without_fragment
logged_url = redact_auth_from_url(url)
if total_length:
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
if is_from_cache(resp):
logger.info("Using cached %s", logged_url)
else:
logger.info("Downloading %s", logged_url)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif is_from_cache(resp):
show_progress = False
elif not total_length:
show_progress = True
elif total_length > (40 * 1000):
show_progress = True
else:
show_progress = False
chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
if not show_progress:
return chunks
return DownloadProgressProvider(
progress_bar, max=total_length
)(chunks) | [
123,
136
] |
def METHOD_NAME(self):
# Test default implementation
testfunc(self)
# Test Python implementation
if quopri.b2a_qp is not None or quopri.a2b_qp is not None:
oldencode = quopri.b2a_qp
olddecode = quopri.a2b_qp
try:
quopri.b2a_qp = None
quopri.a2b_qp = None
testfunc(self)
finally:
quopri.b2a_qp = oldencode
quopri.a2b_qp = olddecode | [
9622
] |
def METHOD_NAME(self):
return OrderedDict([(self.Command.SET_CHANNEL_INDEX.name, self.channel_index),
(self.Command.SET_FREQUENCY.name, self.frequency),
(self.Command.SET_SAMPLE_RATE.name, self.sample_rate),
(self.Command.SET_BANDWIDTH.name, self.bandwidth),
(self.Command.SET_RF_GAIN.name, self.gain),
(self.Command.SET_BIAS_TEE_ENABLED.name, self.bias_tee_enabled),
("identifier", self.device_serial)]) | [
398,
386
] |
def METHOD_NAME(self, data, tags):
err = []
keys = tags.keys()
for k in keys:
part = k.split(':', 1)
if ":(" in k or k.startswith("def:") or part[0] in self.exceptions:
# acess:([date])
# key def: can contains sign =
continue
if k in self.exceptions_whole:
continue
if not self.KeyPart1.match(part[0]):
if self.KeyPart1Less.match(part[0]):
err.append({"class": 30502, "subclass": stablehash64(k), "text": T_("Concerns tag: `{0}`", '='.join([k, tags[k]])) })
else:
err.append({"class": 3050, "subclass": stablehash64(k), "text": T_("Concerns tag: `{0}`", '='.join([k, tags[k]])) })
elif len(part) == 2 and not self.KeyPart2.match(part[1]):
err.append({"class": 30501, "subclass": stablehash64(k), "text": T_("Concerns tag: `{0}`", '='.join([k, tags[k]])) })
return err | [
1716
] |
def METHOD_NAME(self):
"""Clean-up displayed values"""
for widget in (self._xLabel, self._yLabel, self._zLabel,
self._dataLabel, self._itemLabel):
widget.setText('-') | [
537
] |
def METHOD_NAME(self, preds, targets, tokenize, lowercase):
"""Test functional implementation of metric."""
metric_args = {"tokenize": tokenize, "lowercase": lowercase}
original_sacrebleu = partial(_sacrebleu_fn, tokenize=tokenize, lowercase=lowercase)
self.run_functional_metric_test(
preds,
targets,
metric_functional=sacre_bleu_score,
reference_metric=original_sacrebleu,
metric_args=metric_args,
) | [
9,
8612,
747,
4167
] |
def METHOD_NAME(cls):
return {
"type": "object",
"properties": {
"webex_bot_token": {"type": "string", "title": "Webex Bot Token"},
"to_person_emails": {
"type": "string",
"title": "People (comma-separated)",
},
"to_room_ids": {
"type": "string",
"title": "Rooms (comma-separated)",
},
},
"secret": ["webex_bot_token"],
"required": ["webex_bot_token"],
} | [
830,
135
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(self):
args = []
args.append("-DUSE_EXTERNAL_COMPILER_FLAGS=ON")
for i in ["c", "fortran", "python"]:
if "+" + i in self.spec:
args.append("-Denable-{0}-bindings=ON".format(i))
else:
args.append("-Denable-{0}-bindings=OFF".format(i))
if "+python" in self.spec:
# adding path to python
python = self.spec["python"]
args.append("-DPYTHON_LIBRARY={0}".format(python.libs[0]))
args.append("-DPYTHON_INCLUDE_DIR={0}".format(python.headers.directories[0]))
args.append("-DPython_ADDITIONAL_VERSIONS={0}".format(python.version.up_to(2)))
# adding path to boost
args.append("-DBOOST_ROOT={0}".format(self.spec["boost"].prefix))
if "+static" in self.spec:
args.append("-Denable-static=ON")
return args | [
334,
335
] |
f METHOD_NAME(self): | [
137,
2781,
2782
] |
def METHOD_NAME(self):
"""Test reactions to wrong gradient settings.."""
estimator = Estimator()
gradient = LinCombEstimatorGradient(estimator, derivative_type=DerivativeType.IMAG)
with self.assertWarns(Warning):
var_principle = ImaginaryMcLachlanPrinciple(gradient=gradient)
np.testing.assert_equal(var_principle.gradient._derivative_type, DerivativeType.REAL) | [
9,
1789,
1333
] |
def METHOD_NAME():
release_proc = subprocess.Popen(['msbuild', './TShockAPI/TShockAPI.csproj', '/p:Configuration=Release'])
debug_proc = subprocess.Popen(['msbuild', './TShockAPI/TShockAPI.csproj', '/p:Configuration=Debug'])
release_proc.wait()
debug_proc.wait()
if (release_proc.returncode != 0):
raise CalledProcessError(release_proc.returncode)
if (debug_proc.returncode != 0):
raise CalledProcessError(debug_proc.returncode) | [
56,
2733
] |
def METHOD_NAME():
"""
This basic test insures that MetricsCalculator.get_metric() uses MetricsCalculator.get_metrics() correctly.
In more detail, the purpose of this basic test is to insure that:
1) MetricsCalculator.get_metric() calls MetricsCalculator.get_metrics() exactly once for specific "metric_name";
2) MetricsCalculator.get_metric() correctly retrieves result from dictionary, returned by
MetricsCalculator.get_metrics() by using the specific "metric_name", mentioned above, as the key.
In the present test case, the role of "ExecutionEngine" is limited to providing the required constructor argument to
the "MetricsCalculator" class (one of whose methods is under test); hence, a "DummyExecutionEngine" is employed.
The "with mock.patch" is used judiciously, trading off the focus on the functionality under test (i.e., avoiding
"test leakage") against going as far as mocking all non-essential methods and properties, favoring code readability.
"""
class DummyExecutionEngine:
pass
execution_engine = cast(ExecutionEngine, DummyExecutionEngine)
metrics_calculator = MetricsCalculator(execution_engine=execution_engine)
metric_name = "my_metric_name"
actual_metric_value = "my_metric_value"
metric_domain_kwargs: dict = {}
with mock.patch(
"great_expectations.validator.metrics_calculator.MetricsCalculator.get_metrics",
return_value={metric_name: actual_metric_value},
) as mock_get_metrics_method:
metric_configuration = MetricConfiguration(
metric_name=metric_name,
metric_domain_kwargs=metric_domain_kwargs,
)
resolved_metric_value: Any = metrics_calculator.get_metric(
metric=metric_configuration
)
mock_get_metrics_method.assert_called_once_with(
metrics={metric_name: metric_configuration}
)
assert resolved_metric_value == actual_metric_value | [
9,
19,
1341,
1929,
19,
1097,
61
] |
def METHOD_NAME(self):
"""
Migrate local router settings to the controller.
"""
if self._settings.get("routers"):
templates = []
for router in self._settings.get("routers"):
router_settings = IOS_ROUTER_SETTINGS.copy()
router_settings.update(router)
if router_settings.get("chassis"):
del router_settings["chassis"]
templates.append(Template(router_settings))
TemplateManager.instance().updateList(templates)
self._settings["routers"] = []
self._saveSettings() | [
2744,
2228,
11788
] |
def METHOD_NAME():
"""
Get relative path.
"""
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
test_obj.snap_path_list = disk_obj.get_source_list(vmxml, disk_type,
test_obj.new_dev)[1:]
test.log.info("After start vm with relative path, "
"The xml is:\n%s", vmxml) | [
19,
1821,
157
] |
def METHOD_NAME():
"""Original version of mock-vasp"""
return _mock_vasp(False) | [
248,
10222
] |
def METHOD_NAME(self):
"""
Returns user email as provided by LTI
"""
if 'lis_person_contact_email_primary' in self.lti_params:
return self.lti_params['lis_person_contact_email_primary']
return None | [
21,
487
] |
def METHOD_NAME():
global candidate_locales
# Issue #13441: Skip some locales (e.g. cs_CZ and hu_HU) on Solaris to
# workaround a mbstowcs() bug. For example, on Solaris, the hu_HU locale uses
# the locale encoding ISO-8859-2, the thousauds separator is b'\xA0' and it is
# decoded as U+30000020 (an invalid character) by mbstowcs().
if sys.platform == 'sunos5':
old_locale = locale.setlocale(locale.LC_ALL)
try:
locales = []
for loc in candidate_locales:
try:
locale.setlocale(locale.LC_ALL, loc)
except Error:
continue
encoding = locale.getpreferredencoding(False)
try:
localeconv()
except Exception as err:
print("WARNING: Skip locale %s (encoding %s): [%s] %s"
% (loc, encoding, type(err), err))
else:
locales.append(loc)
candidate_locales = locales
finally:
locale.setlocale(locale.LC_ALL, old_locale)
# Workaround for MSVC6(debug) crash bug
if "MSC v.1200" in sys.version:
def accept(loc):
a = loc.split(".")
return not(len(a) == 2 and len(a[-1]) >= 9)
candidate_locales = [loc for loc in candidate_locales if accept(loc)] | [
0,
1,
298
] |
def METHOD_NAME(self, request, cookie, **kwargs):
"""
Verify if the authentication was successful.
:rtype : Response
:param request: Contains the request parameters.
:param cookie: Cookies sent with the request.
:param kwargs: Any other parameters.
:return: If the authentication was successful: a redirect to the
return_to url. Otherwise a unauthorized response.
:raise: ValueError
"""
logger.debug("verify(%s)" % request)
if isinstance(request, str):
_dict = parse_qs(request)
elif isinstance(request, dict):
_dict = request
else:
raise ValueError("Wrong type of input")
try:
cas_cookie, _ts, _typ = self.getCookieValue(cookie, self.CONST_CAS_COOKIE)
data = json.loads(cas_cookie)
nonce = base64.b64decode(data[self.CONST_NONCE])
if nonce != _dict[self.CONST_NONCE][0]:
logger.warning("Someone tried to login without a correct nonce!")
return Unauthorized("You are not authorized!")
acr = None
try:
acr = _dict["acr_values"][0]
except KeyError:
pass
uid = self.handle_callback(
_dict[self.CONST_TICKET], self.get_service_url(nonce, acr)
)
if uid is None or uid == "":
logger.info("Someone tried to login, but was denied by CAS!")
return Unauthorized("You are not authorized!")
cookie = self.create_cookie(uid, "casm")
return_to = self.generate_return_url(self.return_to, uid)
if "?" in return_to:
return_to += "&"
else:
return_to += "?"
return_to += base64.b64decode(data[self.CONST_QUERY])
return SeeOther(return_to, headers=[cookie])
except Exception:
# FIXME: This should catch specific exception thrown from methods in the block
logger.critical(
"Metod verify in user_cas.py had a fatal exception.", exc_info=True
)
return Unauthorized("You are not authorized!") | [
1162
] |
def METHOD_NAME():
for fldr, _, fyles in os.walk("."):
if "Configurations.json" in fyles:
process(fldr, fyles) | [
57
] |
def METHOD_NAME(key, default):
if key not in environment:
environment[key] = os.getenv(key, default) | [
0,
1027
] |
def METHOD_NAME(self):
folder_id = self.portal.invokeFactory("Folder", "folder")
folder = self.portal[folder_id]
self.assertTrue(self.tool.isStructuralFolder(folder)) | [
9,
137,
9181,
451
] |
def METHOD_NAME(self, namespace):
return {self._reExport.sub(r"\1", sym):addr for sym,addr in self._symbols[namespace].items() if self._reExport.match(sym)} | [
527,
294
] |
def METHOD_NAME(query, **kwargs):
"""
django.utils.http.urlencode wrapper that replaces query parameter values
of None with empty string so that urlencode doesn't raise TypeError
"Cannot encode None in a query string".
"""
# sequence of 2-element tuples
if isinstance(query, (list, tuple)):
query_seq = ((pair[0], "" if pair[1] is None else pair[1]) for pair in query)
return urlencode(query_seq, **kwargs)
elif isinstance(query, dict):
query_d = {k: "" if v is None else v for k, v in query.items()}
return urlencode(query_d, **kwargs)
else:
return urlencode(query, **kwargs) | [
1209,
14374
] |
METHOD_NAME(self, fallbackURL): | [
250,
12633,
741,
275
] |
def METHOD_NAME(
lris, observing_run_token, manage_sources_token, red_transients_group
):
run_details = {
'instrument_id': lris.id,
'pi': 'Danny Goldstein',
'observers': 'D. Goldstein, P. Nugent',
'group_id': red_transients_group.id,
'calendar_date': '2020-02-16',
}
status, data = api(
'POST', 'observing_run', data=run_details, token=observing_run_token
)
assert status == 200
assert data['status'] == 'success'
run_id = data['data']['id']
new_date = {'calendar_date': '2020-02-17'}
run_details.update(new_date)
status, data = api(
'PUT', f'observing_run/{run_id}', data=new_date, token=manage_sources_token
)
assert status == 401
assert data['status'] == 'error' | [
9,
3166,
21,
2444,
11691,
12159,
22
] |
def METHOD_NAME(self, key):
if not isinstance(key, parameters.electricFieldParameters):
logger.error("parameter key needs to be of type NuRadioReco.framework.parameters.electricFieldParameters")
raise ValueError("parameter key needs to be of type NuRadioReco.framework.parameters.electricFieldParameters")
return key in self._parameters | [
220,
511
] |
def METHOD_NAME(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s,
append_eos=True,
add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens | [
8431
] |
def METHOD_NAME(self):
for ui in ["123", "4567890", "001", "0"]:
self.assertEqual(int(ui), validatorfuncs.unsigned_integer(ui)) | [
9,
1715,
4143,
1217
] |
f METHOD_NAME(self, need_trailing_eos, results, data): | [
9,
1137,
4812,
7743
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(
payload: Any,
request_output: RequestOutput,
metadata_outputs: Dict[str, MetadataTensor] = {},
) -> Optional[ResponseOutput]:
output_metadata = metadata_outputs.get(request_output.name)
content_type = _get_content_type(request_output, output_metadata)
codec = (
find_input_codec(content_type)
if content_type
else find_input_codec_by_payload(payload)
)
if not codec:
return None
return codec.encode_output(
name=request_output.name,
payload=payload,
) | [
421,
17,
146
] |
def METHOD_NAME(self, args):
from fairseq import models
model = models.METHOD_NAME(args, self)
model.register_classification_head(
getattr(args, "ranking_head_name", "sentence_classification_head"),
num_classes=1,
)
return model | [
56,
578
] |
def METHOD_NAME():
"""
Provide grains for SmartOS
"""
grains = {}
if salt.utils.platform.is_smartos_zone():
grains = salt.utils.dictupdate.update(
grains, _smartos_zone_data(), merge_lists=True
)
elif salt.utils.platform.is_smartos_globalzone():
grains = salt.utils.dictupdate.update(
grains, _smartos_computenode_data(), merge_lists=True
)
grains = salt.utils.dictupdate.update(
grains, _smartos_zone_pkgin_data(), merge_lists=True
)
grains = salt.utils.dictupdate.update(
grains, _smartos_zone_pkgsrc_data(), merge_lists=True
)
return grains | [
14907
] |
def METHOD_NAME(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location") | [
708
] |
def METHOD_NAME(v):
return "%s_colorbar" % (str(v.key())) | [
1202,
1203,
156
] |
def METHOD_NAME(self) -> Mapping[str, str]:
"""
Resource properties.
"""
return pulumi.get(self, "properties") | [
748
] |
async def METHOD_NAME():
bot_metrics_url = f"{commons_constants.METRICS_URL}metrics/community/count/"
return await _get_stats({
"daily": f"{bot_metrics_url}0/0/-1",
"monthly": f"{bot_metrics_url}0/-1/0",
"all": f"{bot_metrics_url}0/0/0"
}) | [
19,
1056,
-1,
577
] |
def METHOD_NAME(self, uid: int) -> TrialInterface:
"""Fetches a single trial.
Args:
uid: Unique identifier of the trial within study.
Returns:
Trial.
Raises:
ResourceNotFoundError: If trial does not exist.
""" | [
19,
2943
] |
def METHOD_NAME(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# expand to full path name | [
168
] |
def METHOD_NAME(self):
class SimpleMemberMock:
def __init__(self, login):
self.login = login
class SimpleOrgMock:
@staticmethod
def get_members():
return [SimpleMemberMock("a"), SimpleMemberMock("b")]
org = SimpleOrgMock()
assert github_org.get_members(org) == ["a", "b"] | [
9,
19,
3563
] |
def METHOD_NAME(self, event=None):
"Remove effect of doing match."
self.text.tag_delete("paren")
self.deactivate_restore()
self.counter += 1 # disable the last timer, if there is one. | [
1032,
417
] |
def METHOD_NAME():
r"""Test circumcenter function."""
pt0 = [0, 0]
pt1 = [10, 10]
pt2 = [10, 0]
cc = circumcenter(pt0, pt1, pt2)
truth = [5., 5.]
assert_array_almost_equal(truth, cc) | [
9,
-1
] |
def METHOD_NAME(x1,v1,x2,v2,u,order=1):
"""Returns the derivative of a hermite curve with control points
x1, v1, x2, v2 at the parameter u in [0,1]. If order > 1, higher
order derivatives are returned."""
assert len(x1)==len(v1)
assert len(x1)==len(x2)
assert len(x1)==len(v2)
if order == 1:
u2 = u*u
dcx1 = (6.0*u2-6.0*u)
dcx2 = (-6.0*u2+6.0*u)
dcv1 = 3.0*u2-4.0*u+1.0
dcv2 = 3.0*u2-2.0*u
dx = [0]*len(x1)
for i in range(len(x1)):
dx[i] = dcx1*x1[i] + dcx2*x2[i] + dcv1*v1[i] + dcv2*v2[i];
return dx
elif order == 2:
ddcx1 = 12*u-6.0
ddcx2 = -12.0*u+6.0
ddcv1 = 6.0*u-4.0
ddcv2 = 6.0*u-2.0
ddx = [0]*len(x1)
for i in range(len(x1)):
ddx[i] = ddcx1*x1[i] + ddcx2*x2[i] + ddcv1*v1[i] + ddcv2*v2[i]
return ddx
elif order == 3:
cx1 = 12
cx2 = -12.0
cv1 = 6.0
cv2 = 6.0
dddx = [0]*len(x1)
for i in range(len(x1)):
dddx[i] = cx1*x1[i] + cx2*x2[i] + cv1*v1[i] + cv2*v2[i]
return dddx
elif order == 0:
return hermite_eval(x1,v1,x2,v2,u)
else:
return [0]*len(x1) | [
13695,
9365
] |
def METHOD_NAME(samples, link_creds=None):
return np.frombuffer(urandom(8 * len(samples)), dtype=np.uint64).reshape(-1) | [
978,
147
] |
def METHOD_NAME(self):
mgr = self.__owm.agro_manager()
# search all NDVI images in the specified time frame
result_set = mgr.search_satellite_imagery(self.__polygon.id, self.__acquired_from, self.__acquired_to,
None, PresetEnum.NDVI, None, None, None, None, None, None, None)
self.assertIsInstance(result_set, list)
self.assertEqual(len(result_set), 90)
self.assertTrue(all([isinstance(i, MetaImage) and i.preset == PresetEnum.NDVI for i in result_set])) | [
9,
1070,
43,
15819,
2181,
246
] |
def METHOD_NAME(self):
"""
:return: A dictionary of ``topic/dbname, offset integer`` pairs representing
the last sequence ID that was processed for each topic.
""" | [
19,
5288,
2964
] |
def METHOD_NAME(self):
"""
Returns the parent of this element
:rtype: Container
"""
return self.container | [
19,
224
] |
def METHOD_NAME(self):
pass | [
709,
710
] |
def METHOD_NAME(ctx: CIContext) -> bool:
print('⚙️ Resolving `integration tests` phase:')
return should_run_phase(
ctx=ctx,
trigger_env=context.trigger_env.DD_RUN_INTEGRATION_TESTS,
build_env=context.build_env.DD_OVERRIDE_RUN_INTEGRATION_TESTS,
pr_keyword='[x] Run integration tests',
pr_path_prefixes=[
'IntegrationTests/',
],
pr_file_extensions=[]
) | [
427,
22,
1911,
450
] |
def METHOD_NAME(span, func, args, kwargs):
try:
parsed_command = stringify_cache_args(args)
redis_command = parsed_command.split(" ")[0]
result = func(*args, **kwargs)
return result
except Exception:
if redis_command in ROW_RETURNING_COMMANDS:
span.set_metric(db.ROWCOUNT, 0)
raise
finally:
if redis_command in ROW_RETURNING_COMMANDS:
determine_row_count(redis_command=redis_command, span=span, result=result) | [
22,
2485,
462
] |
def METHOD_NAME(self, x, y):
from scipy.spatial import cKDTree
P = np.vstack((x, y)).T
return cKDTree(P) | [
56,
-1
] |
def METHOD_NAME(self):
with open("ledger_transactions_removed_case_ids.txt", "w") as case_ids_log:
forms_iterated = 0
for xform in with_progress_bar(self.forms):
forms_iterated += 1
if forms_iterated % 100 == 0:
print("traversed %s forms" % forms_iterated)
ledger_case_ids = get_case_ids_from_stock_transactions(xform)
if ledger_case_ids:
ledger_case_ids = list(ledger_case_ids)
for ledger_case_id in ledger_case_ids:
case_ids_log.write("%s\n" % ledger_case_id)
LedgerAccessorSQL.delete_ledger_transactions_for_form(ledger_case_ids, xform.form_id) | [
188,
5224,
1465
] |
def METHOD_NAME(self):
center = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
radius = 2 * u.arcsec
with pytest.raises(ValueError) as excinfo:
CircleSkyRegion(center, radius)
estr = "'center' must be a scalar SkyCoord"
assert estr in str(excinfo.value) | [
9,
1425,
1262
] |
def METHOD_NAME(self, monkeypatch):
datadog_obj = pretend.stub()
datadog_cls = pretend.call_recorder(lambda **kw: datadog_obj)
monkeypatch.setattr(services, "DogStatsd", datadog_cls)
context = pretend.stub()
request = pretend.stub(registry=pretend.stub(settings={}))
metrics = DataDogMetrics.create_service(context, request)
assert metrics._datadog is datadog_obj
assert datadog_cls.calls == [
pretend.call(host="127.0.0.1", port=8125, namespace=None, use_ms=True)
] | [
9,
129,
549,
1618
] |
def METHOD_NAME():
do_test(source_app.create_app) | [
9,
1458,
469
] |
def METHOD_NAME(ax, ndim, argname=None):
if not (-ndim <= ax < ndim):
raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}")
if ax < 0:
ax += ndim
return ax | [
1137,
2227,
724
] |
def METHOD_NAME(self):
now = seconds_resolution(timezone.now())
occurring_time_ev1_dtstart = now + datetime.timedelta(days=3)
occurring_time_ev1_dtend = occurring_time_ev1_dtstart + datetime.timedelta(days=5)
datetime_rule_ev1 = OccurringRule.objects.create(
event=self.event,
dt_start=occurring_time_ev1_dtstart,
dt_end=occurring_time_ev1_dtend,
)
event2 = Event.objects.create(creator=self.user, calendar=self.calendar)
now = seconds_resolution(timezone.now())
occurring_time_ev2_dtstart = now + datetime.timedelta(days=4)
occurring_time_ev2_dtend = occurring_time_ev2_dtstart + datetime.timedelta(days=6)
datetime_rule_ev2 = OccurringRule.objects.create(
event=event2,
dt_start=occurring_time_ev2_dtstart,
dt_end=occurring_time_ev2_dtend,
)
self.assertEqual(self.event.next_event, event2)
self.assertEqual(event2.previous_event, self.event)
datetime_rule_ev2.delete()
date_rule_ev2 = OccurringRule.objects.create(
event=event2,
dt_start=convert_dt_to_aware(occurring_time_ev2_dtstart.date()),
dt_end=convert_dt_to_aware(occurring_time_ev2_dtend.date()),
)
self.assertEqual(self.event.next_event, event2)
self.assertEqual(event2.previous_event, self.event)
datetime_rule_ev1.delete()
date_rule_ev2.delete()
OccurringRule.objects.create(
event=self.event,
dt_start=convert_dt_to_aware(occurring_time_ev1_dtstart.date()),
dt_end=convert_dt_to_aware(occurring_time_ev1_dtend.date()),
)
OccurringRule.objects.create(
event=event2,
dt_start=occurring_time_ev2_dtstart,
dt_end=occurring_time_ev2_dtend,
)
self.assertEqual(self.event.next_event, event2)
self.assertEqual(event2.previous_event, self.event) | [
9,
417,
2855
] |
def METHOD_NAME(self, notif_pid):
"""Get operation logs records by notification pid.
:param notif_pid: The notification pid.
:returns a generator of ElasticSearch hit.
:rtype generator<dict>.
"""
query = self.filter('term', notification__pid=notif_pid)
for hit in query.scan():
yield hit.to_dict() | [
19,
1099,
604,
857,
2243
] |
def METHOD_NAME(args):
problem = read_param_file(args.paramfile)
X = np.loadtxt(args.model_input_file, delimiter=args.delimiter)
Y = np.loadtxt(
args.model_output_file, delimiter=args.delimiter, usecols=(args.column,)
)
analyze(problem, X, Y, S=args.slices, print_to_console=True, seed=args.seed) | [
615,
1006
] |
def METHOD_NAME():
"""
Tests resetting VMM.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.reset()
mock_cmd.assert_called_once_with(
["vmctl", "reset"], output_loglevel="trace", python_shell=False
)
assert res | [
9,
656
] |
def METHOD_NAME(self, point, index=np.s_[:], p=None, scaled=True):
"""
p >= 0
"""
smgphi = self.smspace.METHOD_NAME(point, index=index, p=p)
ldof = self.number_of_local_dofs(p=p, doftype='cell')
shape = point.shape[:-1]+(ldof, 2, 2)
gphi = np.zeros(shape, dtype=self.ftype)
gphi[..., :ldof//2, : , 0] = smgphi
gphi[..., -ldof//2:, : , 1] = smgphi
return gphi | [
140,
1189
] |
def METHOD_NAME(archive_path, output_dir):
print('Untarring {} in {}.'.format(archive_path, output_dir))
tar_file = tarfile.open(archive_path, 'r:gz')
try:
tar_file.extractall(output_dir)
finally:
tar_file.close() | [
6900,
1622,
24
] |
def METHOD_NAME(self):
# Test retrieving audio samples withouts labels from a directory and its
# subdirs where we double the sampling rate.
# Save a few extra audio in the parent directory.
directory = self._prepare_directory(count=7, num_classes=2)
for i, audio in enumerate(self._get_audio_samples(3)):
filename = f"audio_{i}.wav"
with open(os.path.join(directory, filename), "wb") as f:
f.write(audio.numpy())
dataset = audio_dataset.audio_dataset_from_directory(
directory,
batch_size=5,
output_sequence_length=30,
labels=None,
sampling_rate=2000, # Twice the original sample rate.
)
batch = next(iter(dataset))
# We return plain audio. Expect twice as many samples now.
self.assertEqual(batch.shape, (5, 60, 1))
self.assertEqual(batch.dtype.name, "float32")
# Count samples
batch_count = 0
sample_count = 0
for batch in dataset:
batch_count += 1
sample_count += batch.shape[0]
self.assertEqual(batch_count, 2)
self.assertEqual(sample_count, 10) | [
9,
1747,
126,
280,
2851,
3375,
41
] |
def METHOD_NAME():
files = _get_files()
modules = _get_modules()
if not files and not modules['kernel'] and not modules['dracut']:
api.current_logger().debug(
'No additional files or modules required to add into the target initramfs.')
return
target_kernel_info = next(api.consume(InstalledTargetKernelInfo), None)
if not target_kernel_info:
raise StopActorExecutionError(
'Cannot get version of the installed RHEL-8 kernel',
details={'Problem': 'Did not receive a message with installed RHEL-8 kernel version'
' (InstalledTargetKernelVersion)'})
_copy_modules(modules['dracut'], DRACUT_DIR, 'dracut')
_copy_modules(modules['kernel'], _get_target_kernel_modules_dir(target_kernel_info.uname_r), 'kernel')
# Discover any new modules and regenerate modules.dep
should_regenerate = any(module.module_path is not None for module in modules['kernel'])
if should_regenerate:
try:
run(['depmod', target_kernel_info.uname_r, '-a'])
except CalledProcessError as e:
raise StopActorExecutionError('Failed to generate modules.dep and map files.', details={'details': str(e)})
try:
# multiple files|modules need to be quoted, see --install | --add in dracut(8)
dracut_module_names = list({module.name for module in modules['dracut']})
kernel_module_names = list({module.name for module in modules['kernel']})
cmd = ['dracut', '-f', '--kver', target_kernel_info.uname_r]
if files:
cmd += ['--install', '{}'.format(' '.join(files))]
if modules['dracut']:
cmd += ['--add', '{}'.format(' '.join(dracut_module_names))]
if modules['kernel']:
cmd += ['--add-drivers', '{}'.format(' '.join(kernel_module_names))]
run(cmd)
except CalledProcessError as e:
# just hypothetic check, it should not die
raise StopActorExecutionError('Cannot regenerate dracut image.', details={'details': str(e)}) | [
356
] |
async def METHOD_NAME(
self, query: str, values: Optional[list] = None
) -> Tuple[int, List[dict]]:
async with self.acquire_connection() as connection:
self.log.debug("%s: %s", query, values)
async with connection.cursor() as cursor:
if values:
await cursor.execute(query, values)
else:
await cursor.execute(query)
if query.startswith("UPDATE") or query.startswith("DELETE"):
return cursor.rowcount, []
try:
rows = await cursor.fetchall()
except pyodbc.ProgrammingError:
return cursor.rowcount, []
if rows:
fields = [c[0] for c in cursor.description]
return cursor.rowcount, [dict(zip(fields, row)) for row in rows]
return cursor.rowcount, [] | [
750,
539
] |
def METHOD_NAME(self):
# type: () -> None
self._stopped.set()
self.join() | [
631
] |
f METHOD_NAME(cls): | [
19,
362,
3255
] |
def METHOD_NAME(self, value):
self.native.Enabled = value | [
0,
1111
] |
def METHOD_NAME(self):
"""Test ODE function generator."""
observable = SparsePauliOp.from_list(
[
("II", 0.2252),
("ZZ", 0.5716),
("IZ", 0.3435),
("ZI", -0.4347),
("YY", 0.091),
("XX", 0.091),
]
)
d = 2
ansatz = EfficientSU2(observable.num_qubits, reps=d)
# Define a set of initial parameters
parameters = list(ansatz.parameters)
param_dict = {param: np.pi / 4 for param in parameters}
var_principle = ImaginaryMcLachlanPrinciple()
t_param = None
linear_solver = None
linear_solver = VarQTELinearSolver(
var_principle,
observable,
ansatz,
parameters,
t_param,
linear_solver,
)
time = 2
ode_function_generator = OdeFunction(linear_solver, t_param=None, param_dict=param_dict)
qte_ode_function = ode_function_generator.var_qte_ode_function(time, param_dict.values())
expected_qte_ode_function = [
0.442145,
-0.022081,
0.106223,
-0.117468,
0.251233,
0.321256,
-0.062728,
-0.036209,
-0.509219,
-0.183459,
-0.050739,
-0.093163,
]
np.testing.assert_array_almost_equal(expected_qte_ode_function, qte_ode_function) | [
9,
486,
9939,
9940,
559
] |
def METHOD_NAME(
cmd: str, stdout_path: str, stderr_path: str,
return_code_path: str) -> str:
"""Creates a shell script that captures the stdout, stderr, and return code
to files on the filesystem.
Args:
cmd: Command to execute.
stdout_path: Path to file for stdout.
stderr_path: Path to file for stderr.
return_code_path: Path to write return code of executing cmd.
Returns:
String containing the shell script.
"""
return textwrap.dedent("""
touch {stdout_path}
touch {stderr_path}
touch {return_code_path}
bash -c {cmd} 1> {stdout_path} 2> {stderr_path}
echo $? > {return_code_path}
exit 0
""".format(cmd=shlex.quote(cmd),
stdout_path=shlex.quote(stdout_path),
stderr_path=shlex.quote(stderr_path),
return_code_path=shlex.quote(return_code_path))) | [
93,
8715,
735
] |
def METHOD_NAME(sctx, ctx, name, type):
n = ctx(NeuronClass)(name)
n.type(type)
r = sctx(Neuron).query(name+"R")
for rs in r.load():
n.member(rs)
l = sctx(Neuron).query(name+"L")
for ls in l.load():
n.member(ls)
return n | [
102
] |
def METHOD_NAME(self):
proc_mounts_path = "/proc/self/mounts"
if os.path.exists(proc_mounts_path):
with open(proc_mounts_path) as f:
for line in f:
mntent = line.split()
if mntent[2] != "cgroup": continue
mount_point = os.path.dirname(mntent[1])
return mount_point
return "" | [
2991,
11354,
2844,
1669
] |
def METHOD_NAME():
modified_notebook_path = os.path.join(local_notebooks_dir, "exercise_2_test.ipynb")
nb = nbformat.read(
os.path.join(local_notebooks_dir, "exercise_2.ipynb"),
as_version=nbformat.NO_CONVERT,
)
_replace_str(
nb,
'path = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"',
'# path = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"',
)
new_optional_cell = f'path = "{test_dataset_path}"\n' + download_taxi_dataset
optional_cell_idx = _find_code_cell_idx(nb, "[Optional] Download data locally.")
nb["cells"][optional_cell_idx]["source"] = new_optional_cell
nbformat.write(nb, modified_notebook_path)
_execute_notebook(modified_notebook_path) | [
9,
3446,
988
] |
def METHOD_NAME(input_file, output_file, template):
"""Create a translated IDML using an IDML template and a PO file."""
# Now proceed with the conversion.
template_zip = ZipFile(template, "r")
translatable_files = [
filename
for filename in template_zip.namelist()
if filename.startswith("Stories/")
]
po_data = input_file.read()
dom_trees = translate_idml(template, BytesIO(po_data), translatable_files)
write_idml(template_zip, output_file, dom_trees)
output_file.close()
return True | [
-1
] |
def METHOD_NAME(self, value):
return TimestampField.time_string_to_int(value) | [
19,
48,
99
] |
def METHOD_NAME(self, tracking_context, provided_context, expected_segment_context):
# Test first with tracking and no provided context.
with self.tracker.context('test', tracking_context):
segment.track(sentinel.user_id, sentinel.name, self.properties)
args, kwargs = self.mock_segment_track.call_args # lint-amnesty, pylint: disable=unused-variable
assert (sentinel.user_id, sentinel.name, self.properties, expected_segment_context) == args
# Test with provided context and no tracking context.
segment.track(sentinel.user_id, sentinel.name, self.properties, provided_context)
args, kwargs = self.mock_segment_track.call_args
assert (sentinel.user_id, sentinel.name, self.properties, provided_context) == args
# Test with provided context and also tracking context.
with self.tracker.context('test', tracking_context):
segment.track(sentinel.user_id, sentinel.name, self.properties, provided_context)
assert self.mock_segment_track.called
args, kwargs = self.mock_segment_track.call_args
assert (sentinel.user_id, sentinel.name, self.properties, provided_context) == args | [
9,
3068,
198,
41,
10800
] |
def METHOD_NAME(self, msg, avg, all_times, distributed=False):
percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]]
if not distributed:
logger.info(
f"{msg}: avg={1.0/avg:.1f} it/s, "
f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
)
return
avg_per_gpu = comm.all_gather(avg)
percentiles_per_gpu = comm.all_gather(percentiles)
if comm.get_rank() > 0:
return
for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu):
logger.info(
f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, "
f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
) | [
390,
104
] |
def METHOD_NAME(cls, api_key: str) -> Node | None:
"""
Returns Node based on the provided API key.
Parameters
----------
api_key : str
The API key of the node to search for
Returns
-------
Node | None
Returns the node if a node is associated with api_key, None if no
node is associated with api_key.
"""
session = DatabaseSessionManager.get_session()
nodes = session.query(cls).all()
session.commit()
for node in nodes:
is_correct_key = node.check_key(api_key)
if is_correct_key:
return node
# no node found with matching API key
return None | [
19,
604,
58,
59
] |
def METHOD_NAME(self):
""" Do basic controls sequences for colors work?
"""
string = 'first\x1b[34mblue\x1b[0mlast'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEqual(substring, 'first')
self.assertEqual(self.processor.foreground_color, None)
elif i == 1:
self.assertEqual(substring, 'blue')
self.assertEqual(self.processor.foreground_color, 4)
elif i == 2:
self.assertEqual(substring, 'last')
self.assertEqual(self.processor.foreground_color, None)
else:
self.fail('Too many substrings.')
self.assertEqual(i, 2, 'Too few substrings.') | [
9,
424
] |
def METHOD_NAME() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pkcs1_template(jwt_rsa_ssa_pkcs1_pb2.RS384, 3072,
tink_pb2.RAW) | [
772,
5009,
-1,
-1,
8697,
671
] |
def METHOD_NAME(json_object, schema, context=None) -> tuple[list[str], dict]:
try:
DefaultValidatingDraft7Validator = extend_with_default(
jsonschema.Draft7Validator)
validator = DefaultValidatingDraft7Validator(schema, jsonschema.FormatChecker())
#validator = jsonschema.Draft7Validator(schema, jsonschema.FormatChecker())
errors_formatted = []
for error in sorted(validator.iter_errors(json_object), key=str):
#validate(json_object, schema, format_checker=FormatChecker())
# except jsonschema.ValidationError as e:
report = generate_validation_error_report(error, json_object)
#note = "\n*** Note - If there is more than one error, only the first error is shown ***\n\n"
if context:
errors_formatted.append(
"Schema check failed for '{}'\n{}".format(context, report))
# return note + "Schema check failed for '{}'\n{}".format(context, report)
else:
errors_formatted.append(
"Schema check failed.\n{}".format(report))
# return note + "Schema check failed.\n{}".format(report)
if len(errors_formatted) == 0:
#DefaultValidatingDraft7Validator = extend_with_default(
# jsonschema.Draft7Validator)
#DefaultValidatingDraft7Validator(schema).validate(json_object)
return (errors_formatted, json_object)
else:
return (errors_formatted, {})
except Exception as e:
# Some error occurred, probably related to the schema itself
raise(Exception("Error validating the JSON Schema: %s" % (str(e)))) | [
250,
763
] |
def METHOD_NAME(params):
"""Validate all forms of bad config"""
multi_finder_cfg, error_type = params
with temp_config_setup(multi_finder_cfg=multi_finder_cfg) as mf_config:
with pytest.raises(error_type):
MultiModelFinder(mf_config, "default") | [
9,
457,
578,
6883,
532,
200
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.