text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(audiopath: str):
if audiopath[-4:] == ".wav":
audio, lsr = load_wav_to_torch(audiopath)
elif audiopath[-4:] == ".mp3":
audio, lsr = librosa.load(audiopath, sr=None)
audio = torch.FloatTensor(audio)
else:
assert False, f"Unsupported audio format provided: {audiopath[-4:]}"
# Remove any channel data.
if len(audio.shape) > 1:
if audio.shape[0] < 5:
audio = audio[0]
else:
assert audio.shape[1] < 5
audio = audio[:, 0]
return audio, lsr | [
203,
1747,
171
] |
def METHOD_NAME(self):
return "MgmtErrorFormat" | [
168,
275
] |
def METHOD_NAME(map_file, function_file, argument_file, result_file):
"""
Wrapper function to deploy with FunctionCall as serverless tasks.
"""
from parsl.executors.taskvine.exec_parsl_function import run
run(map_file, function_file, argument_file, result_file) | [
22,
-1,
559
] |
def METHOD_NAME(self):
item = self.findSelectedItem()
if item:
for row in range(item.childCount() - 2, -1, -1):
if item.child(row).isSelected():
i = item.takeChild(row)
item.insertChild(row + 1, i)
i.setSelected(True) | [
132,
449,
2189,
481
] |
def METHOD_NAME(self,state,id,events):
global IsPlaying
global CurrentFile
global SoundObjIndex
global TriggeringAvatar
if not state:
return
if id == actTrigger.id:
TriggeringAvatar = PtFindAvatar(events)
respOneShot.run(self.key, events = events)
elif id == respOneShot.id:
self.NextSong()
respStop.run(self.key)
if CurrentFile:
soSoundObj.value.setSoundFilename(SoundObjIndex, CurrentFile[0], CurrentFile[1])
respStart.run(self.key)
currentSong = (CurrentFile[0],)
else:
currentSong = ("",)
playerid = PtGetClientIDFromAvatarKey(TriggeringAvatar.getKey())
localClient = PtGetLocalClientID()
islocalavatar = (playerid == localClient)
sdlvarisvalid = sdlCurrentSongVar.value
if islocalavatar and sdlvarisvalid:
PtDebugPrint("Setting cur song var to: ", currentSong)
ageSDL = PtGetAgeSDL()
ageSDL[sdlCurrentSongVar.value] = currentSong | [
69,
959
] |
def METHOD_NAME():
out = subprocess.check_output(["git", "tag", "--points-at", "HEAD"])
return out.decode().split("\n")[0] | [
19,
1056,
1493,
82
] |
def METHOD_NAME():
# Ensure the policy is set
test_setting = "Success"
win_lgpo.set_computer_policy(
name="Audit User Account Management", setting=test_setting
)
assert (
win_lgpo.get_policy(
policy_name="Audit User Account Management", policy_class="machine"
)
== test_setting
) | [
0,
54
] |
def METHOD_NAME(cubes_with_wrong_air_pressure):
"""Test ``fix_metadata`` for ``hus``."""
vardef = get_var_info('CMIP6', 'Amon', 'hus')
fix = Hus(vardef)
out_cubes = fix.fix_metadata(cubes_with_wrong_air_pressure)
assert len(out_cubes) == 2
hus_cube = out_cubes.extract_cube('hus')
zg_cube = out_cubes.extract_cube('zg')
assert hus_cube.var_name == 'hus'
assert zg_cube.var_name == 'zg'
np.testing.assert_allclose(hus_cube.coord('air_pressure').points,
[1000.0, 601.0, 200.0])
np.testing.assert_allclose(hus_cube.coord('air_pressure').bounds,
[[1200.0, 800.0], [800.0, 401.0], [401.0, 2.0]])
np.testing.assert_allclose(zg_cube.coord('air_pressure').points,
[1000.09, 600.6, 200.0])
np.testing.assert_allclose(zg_cube.coord('air_pressure').bounds,
[[1200.00001, 800], [800, 400.8], [400.8, 1.9]]) | [
9,
-1,
1112,
773
] |
def METHOD_NAME(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output | [
3428,
663,
141
] |
def METHOD_NAME():
# check that FirebaseError is raised when send_message returns it so Celery task can retry
with patch.object(
FCMDevice, "send_message", return_value=FirebaseError(code="test_error_code", message="test_error_message")
):
with pytest.raises(FirebaseError):
fcm_relay_async(token="test_token", data={}, apns={}) | [
9,
3805,
4208,
958,
2052
] |
def METHOD_NAME(self) -> str:
"""Generate a license plate."""
pattern: str = self.random_element(self.license_plate_formats)
return self.generator.parse(pattern) | [
2130,
7777
] |
def METHOD_NAME(s):
return qs(**{field.db_field: s}).clear_cls_query().limit(1).count(True) > 0 | [
954
] |
def METHOD_NAME(self, text):
self._input = text
self._current_position = 0
self._current_state = CLL_NORMAL
self._paren_balance = 0 | [
362
] |
def METHOD_NAME(self, waypoint: FlightWaypoint) -> timedelta | None:
return None | [
6545,
104,
43,
4599
] |
def METHOD_NAME(self, target):
"""Returns all the location types supported for the given target.
This is an implementation for an abstract method.
Parameters
----------
target : any
The UI target for which supported location types are queried.
Returns
-------
locators_classes : set
Supported locator types for the given target type.
"""
return set() | [
19,
1081
] |
def METHOD_NAME(fdp, dict_size):
dictionary = {}
for _ in range(dict_size):
dictionary[fdp.ConsumeUnicodeNoSurrogates(20)] = fdp.ConsumeIntListInRange(4, 1, 100)
return dictionary | [
1049,
2445,
41,
245
] |
def METHOD_NAME():
METHOD_NAME = mock.Mock(spec=Repository)
METHOD_NAME.owner = "SalesforceFoundation"
METHOD_NAME.name = "TestRepo"
return METHOD_NAME | [
522
] |
def METHOD_NAME(
user_profile: UserProfile, *, acting_user: Optional[UserProfile]
) -> None:
assert user_profile.bot_owner is not None
new_owner_subscribed_private_streams = get_subscribed_private_streams_for_user(
user_profile.bot_owner
)
new_owner_subscribed_private_stream_ids = [
stream.id for stream in new_owner_subscribed_private_streams
]
bot_subscribed_private_streams = get_subscribed_private_streams_for_user(user_profile)
bot_subscribed_private_stream_ids = [stream.id for stream in bot_subscribed_private_streams]
stream_ids_to_unsubscribe = set(bot_subscribed_private_stream_ids) - set(
new_owner_subscribed_private_stream_ids
)
unsubscribed_streams = [
stream
for stream in bot_subscribed_private_streams
if stream.id in stream_ids_to_unsubscribe
]
bulk_remove_subscriptions(
user_profile.realm, [user_profile], unsubscribed_streams, acting_user=acting_user
) | [
188,
1227,
280,
13509,
547,
1196
] |
def METHOD_NAME(self, setup_logging):
"""Test for when the user is not using pyramid_sawing."""
bodhi_logging.setup()
setup_logging.assert_called_once_with('/test/file') | [
9,
529,
-1
] |
def METHOD_NAME(self, x_start, tol):
"""Tests that momentum optimizer takes one and two steps correctly
for univariate functions."""
stepsize, gamma = 0.1, 0.5
mom_opt = MomentumOptimizer(stepsize, momentum=gamma)
univariate_funcs = [np.sin, lambda x: np.exp(x / 10.0), lambda x: x**2]
grad_uni_fns = [
lambda x: (np.cos(x),),
lambda x: (np.exp(x / 10.0) / 10.0,),
lambda x: (2 * x,),
]
for gradf, f in zip(grad_uni_fns, univariate_funcs):
mom_opt.reset()
x_onestep = mom_opt.step(f, x_start)
x_onestep_target = x_start - gradf(x_start)[0] * stepsize
assert np.allclose(x_onestep, x_onestep_target, atol=tol)
x_twosteps = mom_opt.step(f, x_onestep)
momentum_term = gamma * gradf(x_start)[0]
x_twosteps_target = x_onestep - (gradf(x_onestep)[0] + momentum_term) * stepsize
assert np.allclose(x_twosteps, x_twosteps_target, atol=tol) | [
9,
11057,
968,
7167
] |
def METHOD_NAME(self) -> 'outputs.AssessmentStatusResponse':
"""
The result of the assessment
"""
return pulumi.get(self, "status") | [
452
] |
def METHOD_NAME(self, multihost):
"""
:title: config: sssd --genconf-section only
refreshes those sections given on the command line
:id: 011bf2ad-4a2a-4350-adfa-7826349e262f
"""
multihost.master[0].service_sssd('restart')
self._assert_config_value(multihost, 'pam', 'debug_level', '9')
self._assert_config_value(multihost, 'nss', 'debug_level', '9')
set_param(multihost, 'pam', 'debug_level', '1')
set_param(multihost, 'nss', 'debug_level', '1')
multihost.master[0].run_command(
'/usr/sbin/sssd --genconf-section=pam')
# We only told genconf to touch the pam section..
self._assert_config_value(multihost, 'pam', 'debug_level', '1')
# ..so the NSS section shouldn't be updated at all
self._assert_config_value(multihost, 'nss', 'debug_level', '9')
set_param(multihost, 'nss', 'debug_level', '9')
set_param(multihost, 'pam', 'debug_level', '9') | [
9,
10205,
10856,
1287,
246
] |
def METHOD_NAME(self, policy_owner):
return policy_owner in self.policy_owner_list | [
416,
54,
2013
] |
def METHOD_NAME(package, from_version, to_version, *, api_key):
from_version = parse_version(from_version)
to_version = parse_version(to_version)
changelog = {}
r = requests.get(
"https://pyup.io/api/v1/changelogs/{}/".format(package),
headers={"X-Api-Key": api_key}
)
if r.status_code == 200:
data = r.json()
if data:
# sort the changelog by release
sorted_log = sorted(data.items(), key=lambda v: parse_version(v[0]), reverse=True)
# go over each release and add it to the log if it's within the "upgrade
# range" e.g. update from 1.2 to 1.3 includes a changelog for 1.2.1 but
# not for 0.4.
for version, log in sorted_log:
parsed_version = parse_version(version)
if parsed_version > from_version and parsed_version <= to_version:
changelog[version] = log
return changelog | [
1047,
3838
] |
def METHOD_NAME(resp: requests.Response) -> bool:
if resp.status_code == 408:
return True
if resp.status_code >= 500 and resp.status_code <= 599:
return True
return False | [
12189
] |
def METHOD_NAME(bytesequence, outputfile, xres, yres, pixelformat):
image = []
if (pixelformat == "YUV422"):
imagepixels = yuv422_to_rgb(bytesequence)
elif (pixelformat == "RGB555"):
imagepixels = rgb555_to_rgb(bytesequence)
elif (pixelformat == "RGB565"):
imagepixels = rgb565_to_rgb(bytesequence)
elif (pixelformat == "RGB888"):
imagepixels = rgb888_to_rgb(bytesequence)
elif (pixelformat == "GRAYSCALE"): #Black and white yuv422
imagepixels = blackAndWhite_to_rgb(bytesequence)
elif (pixelformat == "BAYER"): #Black and white raw
imagepixels = blackAndWhite_to_rgb(bytesequence)
offset = 0
for i in range(yres):
line = []
offset = (xres * 3) * i
for j in range(xres * 3):
line.append(imagepixels[j + offset])
image.append(line)
print("Output image to file xres {}, yres {}".format(xres,yres), flush=True)
g_pil_image = generate_img(outputfile, (0, 0, 0), (xres, yres))
x = 0
y = 0
for i in range(int(len(imagepixels) / 3)):
color_r = imagepixels[i * 3 + 0]
color_g = imagepixels[i * 3 + 1]
color_b = imagepixels[i * 3 + 2]
g_pil_image.putpixel( (x, y), (color_r, color_g, color_b, 255))
x = x + 1
if x > (xres - 1):
x = 0
y = y + 1
if y > (yres - 1):
break
g_pil_image.save(outputfile) | [
197
] |
def METHOD_NAME(self, mock_subprocess_run, mock_robot_run):
"""Verifies that pabot-specific options are not passed on when processes=1"""
mock_robot_run.return_value = 0
task = create_task(
Robot,
{
"suites": "tests",
"processes": 1,
"testlevelsplit": "true",
"ordering": "./robot/order.txt",
},
)
task()
mock_subprocess_run.assert_not_called()
outputdir = str(Path(".").resolve())
mock_robot_run.assert_called_once_with(
"tests",
listener=[],
outputdir=outputdir,
variable=["org:test"],
tagstatexclude=["cci_metric_elapsed_time", "cci_metric"],
stdout=sys.stdout,
stderr=sys.stderr,
) | [
9,
-1,
335,
356,
2338,
313
] |
def METHOD_NAME(u):
return translate([1, 2, 3]) | [
711,
1224
] |
def METHOD_NAME(self):
"""Test function multivariate_ttest.
Tested against the R package Hotelling and real-statistics.com.
"""
np.random.seed(123)
# With 2 variables
mean, cov, n = [4, 6], [[1, 0.5], [0.5, 1]], 30
Z = np.random.multivariate_normal(mean, cov, n)
# One-sample test
multivariate_ttest(Z, Y=None, paired=False)
multivariate_ttest(Z, Y=[4, 5], paired=False)
# With 3 variables
# Two-sample independent
stats = multivariate_ttest(X, Y)
assert round(stats.at["hotelling", "F"], 3) == 1.327
assert stats.at["hotelling", "df1"] == 3
assert stats.at["hotelling", "df2"] == 32
assert round(stats.loc["hotelling", "pval"], 3) == 0.283
# Paired test with NaN values
stats = multivariate_ttest(X_na, Y, paired=True)
assert stats.at["hotelling", "df1"] == 3
assert stats.at["hotelling", "df2"] == X.shape[0] - 1 - X.shape[1] | [
9,
4049,
4883
] |
def METHOD_NAME(self):
"""Returns gyroscope vector in degrees/sec."""
mv = memoryview(self.scratch_int)
f = self.scale_gyro
self.i2c.readfrom_mem_into(self.address_gyro, OUT_G | 0x80, mv)
return (mv[0] / f, mv[1] / f, mv[2] / f) | [
203,
-1
] |
def METHOD_NAME(self, input: DOMInputSource) -> ExpatBuilder | ExpatBuilderNS: ... | [
214
] |
def METHOD_NAME(
self,
connection_id: str = None,
monitor_ids: List[str] = None,
poll_interval: int = 60,
raise_on_failure: bool = False,
) -> None:
print('Metaplane process started.')
if not monitor_ids and connection_id:
print(f'Fetching monitors for connection ID {connection_id}.')
monitors = self.monitors(connection_id)['data']
monitor_ids = [m['id'] for m in monitors]
time.sleep(10)
monitors_started = {}
print(f'Running {len(monitor_ids)} monitor(s).')
for monitor_id in monitor_ids:
print(f'Running monitor ID {monitor_id}.')
now = datetime.utcnow()
status = self.run_monitors([monitor_id])['status']
print(f'Monitor ID {monitor_id} ran with status {status}.')
if 200 == status:
monitors_started[monitor_id] = now
monitors_completed = {}
while len(monitors_completed) < len(monitors_started):
for monitor_id in monitor_ids:
print(f'Checking status for monitor ID {monitor_id}.')
monitor_status = self.monitor_status(monitor_id)
start_date = monitors_started[monitor_id]
status_date = dateutil.parser.parse(monitor_status['createdAt'])
completed = status_date.timestamp() >= start_date.timestamp()
print(f'Monitor ID {monitor_id} completed: {completed}; started {start_date}, '
f'last created {status_date}).')
if completed:
passed = monitor_status['passed']
if raise_on_failure and not passed:
raise Exception(f'Monitor ID {monitor_id} didn’t pass.')
monitors_completed[monitor_id] = passed
print(f'Monitor ID {monitor_id} passed: {passed}.')
if len(monitors_completed) < len(monitors_started):
time.sleep(poll_interval)
print('Metaplane process completed.') | [
356
] |
def METHOD_NAME():
"""
Transducer for 20-99 e.g
hai ba -> 23
"""
graph_one = pynini.cross("mốt", "1")
graph_four = pynini.cross("tư", "4")
graph_five = pynini.cross("lăm", "5")
graph_ten = pynini.cross("mươi", "")
optional_ten = pynini.closure(delete_space + graph_ten, 0, 1)
graph = pynini.union(
ties_graph + optional_ten + delete_space + (graph_digit | graph_one | graph_four | graph_five),
ties_graph + delete_space + graph_ten + pynutil.insert("0"),
)
return graph | [
19,
30,
303
] |
f METHOD_NAME(self): | [
9,
3533,
40,
41,
3239,
641,
1745
] |
def METHOD_NAME(match: re.Match[str]):
return " " * int(match.group(1)) | [
369,
2882,
41,
1041
] |
def METHOD_NAME(self, poll_interval):
"""
Query stats until we receive startup flows
"""
while not self._flows_received:
if self._datapath:
self._poll_all_tables(self._datapath)
hub.sleep(poll_interval) | [
1237,
4294,
7923
] |
def METHOD_NAME():
"""Test functional implementation of metric with empty inputs."""
assert edit_distance([], []) == 0 | [
9,
2004,
35,
4167
] |
def METHOD_NAME(self):
# enable compression
return {} | [
19,
4483,
1881
] |
def METHOD_NAME(self, swagger_client_factory):
# given
self._get_swagger_client_mock(swagger_client_factory, min_compatible="0.5.14")
# expect
with self.assertRaises(UnsupportedClientVersion) as ex:
HostedNeptuneBackendApiClient(api_token=API_TOKEN)
self.assertTrue("Please install neptune-client>=0.5.14" in str(ex.exception)) | [
9,
1835,
3892,
281,
180
] |
def METHOD_NAME(choice):
if(choice!=1 and choice!=2):
if(choice == 0):
print("Testing step 4. Remove all the intermediate testing files.\n")
dest_dir =os.getcwd()
for file in glob.glob('*.h5'):
os.remove(file)
for file in glob.glob('*.h5.dmrpp'):
os.remove(file)
os.remove('mh5_sha256') | [
5528,
1617,
1537
] |
def METHOD_NAME(self):
request = self.req_blank
response = estimator(request, dob="1955-05-05")
self.assertTrue(response.status_code == 400) | [
9,
5175,
539,
365,
2882,
11439
] |
def METHOD_NAME(self, satellite=None):
# Set Satellite URL for puppet-server-foreman-url
if satellite is not None:
satellite_url = f'https://{satellite.hostname}'
PUPPET_COMMON_INSTALLER_OPTS['puppet-server-foreman-url'] = satellite_url
enable_capsule_cmd = InstallerCommand(
installer_args=PUPPET_CAPSULE_INSTALLER, installer_opts=PUPPET_COMMON_INSTALLER_OPTS
)
result = self.execute(enable_capsule_cmd.get_command(), timeout='20m')
assert result.status == 0
assert 'Success!' in result.stdout
return self | [
1317,
12595,
14457
] |
def METHOD_NAME(
self,
shape: BluemiraWire,
degree: float = 360.0,
) -> List[PhysicalComponent]:
"""
Build the xyz representation of the PF coil.
Parameters
----------
shape:
The xz cross-section shape of the coil.
degree:
The angle [°] around which to build the components, by default 360.0.
Returns
-------
The component grouping the results in 3D (xyz).
"""
sector_degree, n_sectors = get_n_sectors(self.params.n_TF.value, degree)
# I doubt this is floating-point safe to collisions...
xz_components = self.build_xz(shape)
components = []
for c in xz_components:
shape = revolve_shape(c.shape, degree=sector_degree * n_sectors)
c_xyz = PhysicalComponent(c.name, shape)
apply_component_display_options(
c_xyz, color=c.plot_options.face_options["color"]
)
components.append(c_xyz)
return components | [
56,
2846
] |
def METHOD_NAME(lst):
lst = sorted(lst)
mid, odd = divmod(len(lst), 2)
if odd:
return lst[mid]
else:
return (lst[mid - 1] + lst[mid]) / 2.0 | [
6778
] |
def METHOD_NAME(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
) | [
9,
3736,
9,
377,
198
] |
def METHOD_NAME(self, filename):
name = os.path.basename(filename)
if self.editor.protoPalette.findItem(name):
item = self.tree.traverse(self.tree.root, name)
if item:
self.tree.DeleteItem(item)
modelname = Filename.fromOsSpecific(filename).getFullpath()
if modelname.endswith('.mb') or\
modelname.endswith('.ma'):
self.editor.convertMaya(modelname, self.addNewItem)
return
itemData = ObjectBase(name=name, model=modelname, actor=True)
self.editor.protoPalette.add(itemData)
newItem = self.tree.AppendItem(self.editor.ui.protoPaletteUI.tree.root, name)
self.tree.SetItemPyData(newItem, itemData)
self.tree.ScrollTo(newItem) | [
-1,
171
] |
f METHOD_NAME(self, statement, channel_name=None): | [
539
] |
def METHOD_NAME(self, base_path: Union[str, PathLike]) -> None:
if not self.docker:
return
if not self.docker.get(self.BUILD) or not self.docker[self.BUILD].get(self.DOCKERFILE):
return
dockerfile_file = self.docker[self.BUILD][self.DOCKERFILE]
if not dockerfile_file.startswith(FILE_PREFIX):
return
dockerfile_file = self._parse_file_path(dockerfile_file)
with open(Path(base_path) / dockerfile_file, "r", encoding=DefaultOpenEncoding.READ) as f:
self.docker[self.BUILD][self.DOCKERFILE] = f.read()
self._docker_file_resolved = True
return | [
1014,
223,
1287
] |
def METHOD_NAME(platform):
return f"brainarray-test/{platform}.json" | [
19,
607,
59
] |
def METHOD_NAME(
model: Input[BQMLModel],
advanced_weights: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
project: str = _placeholders.PROJECT_ID_PLACEHOLDER, | [
4701,
4702,
284,
733,
202
] |
def METHOD_NAME(font_key):
"""Return a previously cached font object appropriate to the backend
based on a key generated by the backend.
Will return a KeyError in the case of a missing font.
"""
font = font_cache[font_key]
return font | [
19,
596
] |
def METHOD_NAME(self, days=30):
"""
Remove expired elements from history
:param days: number of days to keep
"""
date = datetime.today() - timedelta(days)
self.db.action(
'DELETE '
'FROM history '
'WHERE date < ?',
[date.strftime(History.date_format)]
) | [
1896
] |
def METHOD_NAME(self, request):
return TestPrefixHandler.COMMANDS[: request.param] | [
2458
] |
def METHOD_NAME(
self,
molecule,
partial_charge_method=None,
use_conformers=None,
strict_n_conformers=False,
normalize_partial_charges=True,
_cls=None,
):
"""
Compute partial charges with the built-in toolkit using simple arithmetic operations,
and assign the new values to the partial_charges attribute.
.. warning :: This API is experimental and subject to change.
Parameters
----------
molecule : openff.toolkit.topology.Molecule
Molecule for which partial charges are to be computed
partial_charge_method: str, optional, default=None
The charge model to use. One of ['zeros', 'formal_charge']. If None, 'formal_charge'
will be used.
use_conformers : iterable of unit-wrapped numpy arrays, each with shape
(n_atoms, 3) and dimension of distance. Optional, default = None
Coordinates to use for partial charge calculation. If None, an appropriate number
of conformers will be generated.
strict_n_conformers : bool, default=False
Whether to raise an exception if an invalid number of conformers is provided for the
given charge method.
If this is False and an invalid number of conformers is found, a warning will be raised
instead of an Exception.
normalize_partial_charges : bool, default=True
Whether to offset partial charges so that they sum to the total formal charge of the molecule.
This is used to prevent accumulation of rounding errors when the partial charge generation method has
low precision.
_cls : class
Molecule constructor
Raises
------
ChargeMethodUnavailableError if this toolkit cannot handle the requested charge method
IncorrectNumConformersError if strict_n_conformers is True and use_conformers is provided
and specifies an invalid number of conformers for the requested method
ChargeCalculationError if the charge calculation is supported by this toolkit, but fails
"""
PARTIAL_CHARGE_METHODS = {
"zeros": {"rec_confs": 0, "min_confs": 0, "max_confs": 0},
"formal_charge": {"rec_confs": 0, "min_confs": 0, "max_confs": 0},
}
if partial_charge_method is None:
partial_charge_method = "formal_charge"
if _cls is None:
from openff.toolkit.topology.molecule import Molecule
_cls = Molecule
# Make a temporary copy of the molecule, since we'll be messing with its conformers
mol_copy = _cls(molecule)
partial_charge_method = partial_charge_method.lower()
if partial_charge_method not in PARTIAL_CHARGE_METHODS:
raise ChargeMethodUnavailableError(
f'Partial charge method "{partial_charge_method}"" is not supported by '
f"the Built-in toolkit. Available charge methods are "
f"{list(PARTIAL_CHARGE_METHODS.keys())}"
)
if use_conformers is None:
# Note that this refers back to the GLOBAL_TOOLKIT_REGISTRY by default, since
# BuiltInToolkitWrapper can't generate conformers
mol_copy.generate_conformers(
n_conformers=PARTIAL_CHARGE_METHODS[partial_charge_method]["rec_confs"]
)
else:
mol_copy._conformers = None
for conformer in use_conformers:
mol_copy._add_conformer(conformer)
self._check_n_conformers(
mol_copy,
partial_charge_method=partial_charge_method,
min_confs=0,
max_confs=0,
strict_n_conformers=strict_n_conformers,
)
if partial_charge_method == "zeros":
partial_charges = [0.0] * molecule.n_atoms
elif partial_charge_method == "formal_charge":
partial_charges = [float(atom.formal_charge.m) for atom in molecule.atoms]
molecule.partial_charges = unit.Quantity(
partial_charges, unit.elementary_charge
)
if normalize_partial_charges:
molecule._normalize_partial_charges() | [
1283,
2351,
10231
] |
f METHOD_NAME(self): | [
9,
1385,
1640,
4961
] |
def METHOD_NAME(self):
if self._var_cache is None:
script = self.var.get('_prepare', _eval=False, default='') + \
self.var.get('_run', _eval=False, default='')
self._var_cache = [
(key, None) for key in self._extract_assignments(script)]
return super().METHOD_NAME() + self._var_cache | [
486,
100
] |
def METHOD_NAME(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_BACKUP in self.flags | [
137,
1001
] |
def METHOD_NAME(apps, schema_editor):
""" Create profiles for all existing test users, with some defaults for required fields"""
User = apps.get_model("users", "User")
Profile = apps.get_model("users", "Profile")
for user in User.objects.all().iterator():
Profile.objects.create(
user=user,
gender="o",
birth_year="2000",
company="MIT",
job_title="Employee",
) | [
238,
1348
] |
def METHOD_NAME(queue, transfer_type, block_size, timer_factory=None):
"""Measures one-sided bandwidth."""
transfer = transfer_type(queue, block_size)
return block_size/_get_time(queue, transfer.do, timer_factory=timer_factory) | [
1286,
3490
] |
def METHOD_NAME(workflow_execution: PredictorEvaluationExecution, session,
example_cv_result_dict):
# Given
session.set_response(example_cv_result_dict)
# When
results = workflow_execution["Example Evaluator"]
# Then
assert results.evaluator == PredictorEvaluationResult.build(example_cv_result_dict).evaluator
expected_path = '/projects/{}/predictor-evaluation-executions/{}/results'.format(
workflow_execution.project_id,
workflow_execution.uid,
)
assert session.last_call == FakeCall(method='GET', path=expected_path, params={"evaluator_name": "Example Evaluator"}) | [
9,
3855,
2046,
51
] |
def METHOD_NAME(element):
return type(element) == nltk.Tree | [
137,
151
] |
def METHOD_NAME(which: int) -> None:
write(f'{CSI}{which}#Q') | [
5099,
760,
424
] |
def METHOD_NAME(self, app, **kwargs):
self.actions.append(("create", app.app_name, kwargs.copy()))
# Remove arguments consumed by the underlying call to create_app()
kwargs.pop("test_mode", None)
return full_options({"create_state": app.app_name}, kwargs) | [
129,
462
] |
def METHOD_NAME():
with pytest.raises(ValueError):
Rotator(coord="CE", rot=[(0, 0, 90)]) | [
9,
11740,
362,
44
] |
def METHOD_NAME():
# parse args and config
parser = argparse.ArgumentParser(description="Synthesize with JETS")
# model
parser.add_argument(
'--config', type=str, default=None, help='Config of JETS.')
parser.add_argument(
'--ckpt', type=str, default=None, help='Checkpoint file of JETS.')
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--speaker_dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
"--voice-cloning",
type=str2bool,
default=False,
help="whether training voice cloning model.")
# other
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
parser.add_argument("--test_metadata", type=str, help="test metadata.")
parser.add_argument("--output_dir", type=str, help="output dir.")
args = parser.METHOD_NAME()
return args | [
214,
335
] |
def METHOD_NAME(self):
self.set_http_response(status_code=200)
response = self.ec2.get_all_instance_types()
self.assertEqual(len(response), 18)
instance_type = response[0]
self.assertEqual(instance_type.name, 'm1.small')
self.assertEqual(instance_type.cores, '1')
self.assertEqual(instance_type.disk, '5')
self.assertEqual(instance_type.memory, '256')
instance_type = response[17]
self.assertEqual(instance_type.name, 'hs1.8xlarge')
self.assertEqual(instance_type.cores, '48')
self.assertEqual(instance_type.disk, '24000')
self.assertEqual(instance_type.memory, '119808') | [
9,
19,
89,
119
] |
def METHOD_NAME():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="""
Check for new lines in diff that introduce trailing whitespace
or tab characters instead of spaces in unstaged changes, the
previous n commits, or a commit-range.
""",
epilog=f"""
You can manually set the commit-range with the COMMIT_RANGE
environment variable (e.g. "COMMIT_RANGE='47ba2c3...ee50c9e'
{sys.argv[0]}"). Defaults to current merge base when neither
prev-commits nor the environment variable is set.
""")
parser.add_argument("--prev-commits", "-p", required=False, help="The previous n commits to check")
return parser.METHOD_NAME() | [
214,
335
] |
def METHOD_NAME(n_jobs, monkeypatch):
"""Try building all the dependency graph, without the actual build operations"""
class MockPackage(buildall.Package):
def build(self, args: Any) -> None:
raise ValueError("Failed build")
monkeypatch.setattr(buildall, "Package", MockPackage)
pkg_map = buildall.generate_dependency_graph(RECIPE_DIR, {"pkg_1"})
with pytest.raises(ValueError, match="Failed build"):
buildall.build_from_graph(
pkg_map, BuildArgs(), n_jobs=n_jobs, force_rebuild=True
) | [
9,
56,
168
] |
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.table_name = AAZStrArg(
options=["-n", "--name", "--table-name"],
help="The name of the table.",
required=True,
id_part="child_name_1",
)
_args_schema.workspace_name = AAZStrArg(
options=["--workspace-name"],
help="The name of the workspace.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$",
max_length=63,
min_length=4,
),
)
return cls._args_schema | [
56,
134,
135
] |
def METHOD_NAME(self, spec, prefix):
corge_cc = """#include <iostream> | [
428
] |
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs) | [
353,
377
] |
def METHOD_NAME(solc_version_string: str) -> str:
solc_version_match = re.search(SOLC_FULL_VERSION_REGEX, solc_version_string)
if solc_version_match is None:
raise RuntimeError(f"Solc version could not be found in: {solc_version_string}.")
return solc_version_match.group(1) | [
214,
-1,
281
] |
def METHOD_NAME(self):
m = Memory(width=8, depth=4)
self.assertEqual(m.width, 8)
self.assertEqual(m.depth, 4) | [
9,
1525
] |
def METHOD_NAME(Lmax,m,s,cos_theta,**kwargs):
"""
Gives spin-wieghted spherical harmonic functions on the Gauss quaduature grid.
Returns an array with
shape = ( Lmax - Lmin(m,s) + 1, len(z) )
or (Lmax - Lmin(m,s) + 1,) if z is a single point.
Parameters
----------
Lmax: int >=0; spherical-harmonic degree.
m,s : int
spherical harmonic parameters.
cos_theta: np.ndarray or float.
dtype: output dtype. internal dtype = 'longdouble'.
"""
n,a,b = spin2Jacobi(Lmax,m,s)
init = np.exp(0.5*Jacobi.measure(a,b,cos_theta,log=True))
init *= ((-1.)**max(m,-s))
return Jacobi.polynomials(n,a,b,cos_theta,init,**kwargs) | [
6998
] |
def METHOD_NAME(self):
# tuple iterator (like dict.items())
for item in self.REGISTRY.getBuiltins():
self.assertIsInstance(item, tuple)
self.assertEqual(len(item), 2)
self.assertIsInstance(item[0], STRING_TYPES)
self.assertIsInstance(item[1], STRING_TYPES)
# tuple unpacking support
for style, desc in self.REGISTRY.getBuiltins():
# __getitem__ has correct result
self.assertEqual(self.REGISTRY[style], desc)
self.assertIsInstance(style, STRING_TYPES)
self.assertIsInstance(desc, STRING_TYPES)
# Test iterator instance
iterator = self.REGISTRY.getBuiltins()
self.assertIsInstance(iterator, Iterable)
# Iterator size is available
self.assertEqual(len(iterator), len(self.REGISTRY))
# Iterator supports range-based loop and indexing
values = list(iterator)
for i in range(len(iterator)):
# Item at index matches list result
self.assertEqual(iterator[i], values[i])
self.assertIsInstance(iterator[i], tuple)
self.assertEqual(len(iterator[i]), 2)
self.assertIsInstance(iterator[i][0], STRING_TYPES)
self.assertIsInstance(iterator[i][1], STRING_TYPES) | [
9,
19,
4298
] |
def METHOD_NAME(testrepo):
blame = testrepo.blame(PATH)
assert len(blame) == 3
for i, hunk in enumerate(blame):
assert hunk.lines_in_hunk == 1
assert HUNKS[i][0] == hunk.final_commit_id
assert HUNKS[i][1] == hunk.final_start_line_number
assert HUNKS[i][2] == hunk.final_committer
assert HUNKS[i][0] == hunk.orig_commit_id
assert hunk.orig_path == PATH
assert HUNKS[i][1] == hunk.orig_start_line_number
assert HUNKS[i][2] == hunk.orig_committer
assert HUNKS[i][3] == hunk.boundary | [
9,
4016,
724
] |
def METHOD_NAME(self):
self.coefficient_drag_area = self.drag_coeffiecient * (
(0.0293 * math.pow(self.height, 0.725)) * (math.pow(self.weight, 0.425)) + 0.0604
) | [
86,
-1
] |
def METHOD_NAME(self, frame):
try:
return self._material_ids.index(frame)
except ValueError:
raise KeyError(f"material id {frame} not among results!") from None | [
711,
896
] |
def METHOD_NAME(obj):
try:
obj < obj
return obj
except TypeError:
return repr(obj) | [
93,
12751
] |
def METHOD_NAME(data):
if len(data) == 0:
return
_data = {}
for key, value in data.items():
_data[key] = pd.read_csv(StringIO(value))
merged_df = pd.concat([df.assign(key=key) for key, df in _data.items()])
col = merged_df.pop("key")
if "Unnamed: 0" in merged_df.columns:
merged_df.drop("Unnamed: 0", axis=1, inplace=True)
merged_df.insert(0, "System", col)
print(merged_df.to_string(index=False, justify="left")) | [
38,
410
] |
def METHOD_NAME(name):
# we need to postfix the directory name with .c to trick Bazel into thinking this is a valid input
# Related GH issue: https://github.com/bazelbuild/bazel/issues/10552
return name + ".c" | [
19,
1190,
156
] |
def METHOD_NAME(self):
with environ({"MAX_RUNS": "10", "DEQUEUE_INTERVAL": "7"}):
with instance_for_test(
overrides={
"run_coordinator": {
"module": "dagster._core.run_coordinator",
"class": "QueuedRunCoordinator",
"config": {
"max_concurrent_runs": {
"env": "MAX_RUNS",
},
"tag_concurrency_limits": [
{
"key": "foo",
"value": {"applyLimitPerUniqueValue": True},
"limit": 3,
},
{"key": "backfill", "limit": 2},
],
"dequeue_interval_seconds": {
"env": "DEQUEUE_INTERVAL",
},
},
}
}
) as _:
pass | [
9,
200,
2768,
99
] |
def METHOD_NAME(self) -> None:
sorted_tiles = visualization_registry.Registry.get_visualization_class(
'SortedTiles')
option_names = {
'header': 'Pretty Tiles!',
'use_percentages': 'invalid_value'
}
sorted_tiles_instance = sorted_tiles(
'AnswerFrequencies', option_names, True)
with self.assertRaisesRegex(
Exception, 'Expected bool, received invalid_value'):
sorted_tiles_instance.validate() | [
9,
5721,
2,
41,
532,
1335,
99
] |
def METHOD_NAME():
underlying_future = cf.Future()
wrapper_future = FluxFutureWrapper()
wrapper_future._flux_future = underlying_future
underlying_future.add_done_callback(
lambda fut: _complete_future(".fluxexecutortest.txt", wrapper_future, fut)
)
underlying_future.set_exception(ValueError())
assert wrapper_future.done()
assert isinstance(wrapper_future.exception(), ValueError) | [
9,
3637,
1076,
2158,
442
] |
def METHOD_NAME(local_seed):
np.random.seed(local_seed)
upstream_gradients = self._randomDataOp(output_shape, dtypes.float32)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(input_image)
output_image = image_ops.resize_bilinear(
input_image, output_shape[1:3], align_corners=align_corners,
half_pixel_centers=half_pixel_centers)
gradient_injector_output = output_image * upstream_gradients
return tape.gradient(gradient_injector_output, input_image) | [
1128,
2488,
1784
] |
def METHOD_NAME(monkeypatch):
query = "What is the Earth?"
monkeypatch.setattr('builtins.input', lambda _: query)
from src.gen import main
all_generations = main(base_model='h2oai/h2ogpt-oig-oasst1-512-6_9b', cli=True, cli_loop=False, score_model='None')
assert len(all_generations) == 1
assert "The Earth is a planet in the Solar System" in all_generations[0] or \
"The Earth is the third planet" in all_generations[0] | [
9,
615,
-1
] |
def METHOD_NAME(self):
self.create_resolver(data.sepasswd_resolver) | [
9,
1509,
8052,
1836,
581
] |
def METHOD_NAME(self, content):
if len(content) == 0:
print "Result is empty"
sys.exit(1)
xmldoc = parseString(content)
itemlist = xmldoc.getElementsByTagName('aa')
if itemlist.length <= 0:
print "Result is empty"
sys.exit(1)
for item in itemlist:
print "PDB_SITE" + ':' + item.getAttribute("pos") + item.getAttribute("aa") + ';' | [
214,
146
] |
def METHOD_NAME(url_root):
"""
Show documentation about campaignListRetrieve (No CDN)
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
]
try_now_link_variables_dict = {
# 'campaignx_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "campaignx_list": list\n' \
' [\n' \
' "campaign_description": string,\n' \
' "campaign_title": string,\n' \
' "campaignx_we_vote_id": string,\n' \
' "final_election_date_as_integer": integer,\n' \
' "final_election_date_in_past": boolean,\n' \
' "in_draft_mode": boolean,\n' \
' "is_blocked_by_we_vote": boolean,\n' \
' "is_blocked_by_we_vote_reason": string,\n' \
' "is_supporters_count_minimum_exceeded": boolean,\n' \
' "seo_friendly_path": string,\n' \
' "supporters_count": integer,\n' \
' "supporters_count_next_goal": integer,\n' \
' "supporters_count_victory_goal": integer,\n' \
' "visible_on_this_site": boolean,\n' \
' "voter_can_send_updates_to_campaignx": boolean,\n' \
' "voter_is_campaignx_owner": boolean,\n' \
' "voter_signed_in_with_email": boolean,\n' \
' "voter_we_vote_id": string,\n' \
' "we_vote_hosted_campaign_photo_large_url": string,\n' \
' "we_vote_hosted_campaign_photo_medium_url": string,\n' \
' "we_vote_hosted_campaign_photo_small_url": string,\n' \
' "campaignx_owner_list": list\n' \
' [\n' \
' "feature_this_profile_image": boolean,\n' \
' "organization_name": string,\n' \
' "organization_we_vote_id": string,\n' \
' "we_vote_hosted_profile_image_url_medium": string,\n' \
' "we_vote_hosted_profile_image_url_tiny": string,\n' \
' "visible_to_public": boolean,\n' \
' ],\n' \
' "campaignx_politician_list": list\n' \
' [\n' \
' "campaignx_politician_id": integer,\n' \
' "politician_name": string,\n' \
' "politician_we_vote_id": string,\n' \
' "state_code": string,\n' \
' "we_vote_hosted_profile_image_url_large": string,\n' \
' "we_vote_hosted_profile_image_url_medium": string,\n' \
' "we_vote_hosted_profile_image_url_tiny": string,\n' \
' ],\n' \
' "campaignx_politician_list_exists": boolean,\n' \
' "campaignx_politician_starter_list": list\n' \
' [\n' \
' "value": string,\n' \
' "label": string,\n' \
' ],\n' \
' "seo_friendly_path_list": list\n' \
' [],\n' \
' ],\n' \
' "campaign_list_found": boolean,\n' \
' "promoted_campaignx_list_returned": boolean,\n' \
' "promoted_campaignx_we_vote_ids": list [],\n' \
' "voter_campaignx_supporter": {\n' \
' "id": integer,\n' \
' "campaign_supported": boolean,\n' \
' "campaignx_we_vote_id": string,\n' \
' "chip_in_total": string,\n' \
' "date_last_changed": string,\n' \
' "date_supported": string,\n' \
' "organization_we_vote_id": string,\n' \
' "supporter_endorsement": string,\n' \
' "supporter_name": string,\n' \
' "visible_to_public": boolean,\n' \
' "voter_signed_in_with_email": boolean,\n' \
' "voter_we_vote_id": string,\n' \
' "we_vote_hosted_profile_image_url_tiny": string,\n' \
' },\n' \
' "voter_can_send_updates_campaignx_we_vote_ids": list [],\n' \
' "voter_can_vote_for_politicians_list_returned": boolean,\n' \
' "voter_can_vote_for_politician_we_vote_ids": list [],\n' \
' "voter_owned_campaignx_list_returned": boolean,\n' \
' "voter_owned_campaignx_we_vote_ids": list [],\n' \
' "voter_started_campaignx_list_returned": boolean,\n' \
' "voter_started_campaignx_we_vote_ids": list [],\n' \
' "voter_supported_campaignx_list_returned": boolean,\n' \
' "voter_supported_campaignx_we_vote_ids": list [],\n' \
'}'
template_values = {
'api_name': 'campaignListRetrieve',
'api_slug': 'campaignListRetrieve',
'api_introduction':
"",
'try_now_link': 'apis_v1:campaignListRetrieveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values | [
8852,
245,
404,
366,
671,
199
] |
def METHOD_NAME(progress):
"""When an event of compilation completes, this function will be called, and
will update the progress bar indication.
Args:
progress: Number of tasks completed
"""
self.update(progress) | [
86,
3064,
681
] |
def METHOD_NAME(self, repo, actor=None):
pass | [
34,
1230
] |
def METHOD_NAME(router, source, group, local, rp, spt_setup):
"Expect MSDP SA."
logger.info("waiting MSDP SA on router {}".format(router))
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
"show ip msdp sa json",
{group: {source: {"local": local, "rp": rp, "sptSetup": spt_setup}}},
)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assertmsg = '"{}" MSDP SA failure'.format(router)
assert result is None, assertmsg | [
1297,
12391,
160
] |
def METHOD_NAME(*args, **kwargs):
if args[3] in ["train", "test", "valid", "test_1", "test_2"]:
data_path = args1.joinpath(f"rnn_input_{args[3]}.pkl")
kwargs['data_path'] = data_path
if args2:
dataset = torch.load(data_path)
return dataset
return func(*args, **kwargs) | [
291
] |
def METHOD_NAME(self, pattern, name=None):
name = name or pattern
return CombinedTagStat(pattern, name, self.get_doc(name),
self.get_links(name)) | [
19,
1832,
1813
] |
def METHOD_NAME(self, size=None):
X_train_new = self.featureTransform(self.X_train, self.shapelets)
model = LogisticRegression(support_size=size)
model.fit(X_train_new, self.y_train)
self.index = np.nonzero(model.coef_)[0]
X_test_new = self.featureTransform(
self.X_test, self.shapelets, self.index)
y_pred = model.predict(X_test_new)
return y_pred | [
90,
2103
] |
def METHOD_NAME(self) -> float:
"""The energy of the entry."""
raise NotImplementedError | [
5121
] |
def METHOD_NAME():
plugin = PythonPlugin(part_name="my-part", options=lambda: None)
assert plugin.get_build_packages() == {"findutils", "python3-venv", "python3-dev"} | [
9,
19,
56,
2975
] |
def METHOD_NAME(self):
cwd = os.getcwd()
pack_path = os.path.join(cwd, 'exported_withTar')
os.makedirs(name=pack_path)
tar_arch = self.arch_dir_comp + '.tar.gz'
pack_path_comp = os.path.join(pack_path, tar_arch)
pack_path_csv = os.path.join(pack_path, 'export.csv')
self.pr.pack(destination_path=pack_path_comp, csv_file_name=pack_path_csv, compress=True)
pr = self.pr.open("nested2")
pr_imp = pr.open("imported2")
pr_imp.unpack(origin_path=pack_path_comp, csv_file_name=pack_path_csv, compress=True)
# here the 7 is the length of '.tar.gz' string
extract_archive(pack_path_comp[:-7])
compare_obj = dircmp(pack_path_comp[:-7], pr_imp.path)
self.assertEqual(len(compare_obj.diff_files), 0)
pr.remove(enable=True)
try:
rmtree(pack_path)
except Exception as err_msg:
print(f"deleting unsuccessful: {err_msg}") | [
9,
512,
41,
-1,
2916
] |
def METHOD_NAME(self):
return len(self.w) | [
799
] |
def METHOD_NAME(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="comment3", seq="--") | [
397,
14909
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.