text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
self.producer_fifo.append(None) | [
1462,
1646,
1658
] |
def METHOD_NAME(self):
"""test case set up"""
gLogger.setLevel("INFO")
self.file = File()
self.file.LFN = "/lhcb/user/c/cibak/testFile"
self.file.Checksum = "123456"
self.file.ChecksumType = "ADLER32"
self.file2 = File()
self.file2.LFN = "/lhcb/user/f/fstagni/testFile"
self.file2.Checksum = "654321"
self.file2.ChecksumType = "ADLER32"
self.operation = Operation()
self.operation.Type = "ReplicateAndRegister"
self.operation.TargetSE = "CERN-USER"
self.operation.addFile(self.file)
self.operation.addFile(self.file2)
proxyInfo = getProxyInfo()["Value"]
self.request = Request()
self.request.RequestName = "RequestManagerHandlerTests"
self.request.Owner = proxyInfo["username"]
self.request.OwnerGroup = proxyInfo["group"]
self.request.JobID = 123
self.request.addOperation(self.operation)
# # JSON representation of a whole request
self.jsonStr = self.request.toJSON()["Value"]
# # request client
self.requestClient = ReqClient() | [
0,
1
] |
def METHOD_NAME(*args):
parser = get_parser()
(options, args) = parser.parse_args(*args)
load_config_or_exit(options.configfile)
log_to_stream(sys.stderr)
interface.start(config)
reservation_expiry = options.reservation_expiry
reservation_length = options.reservation_length
waiting_recipe_age = options.waiting_recipe_age
delayed_job_age = options.delayed_job_age
testing = options.testing
if testing:
print 'Dry run only, nothing will be sent\n'
for user in User.query:
beaker_usage = BeakerUsage(user, reservation_expiry, reservation_length,
waiting_recipe_age, delayed_job_age)
expiring_reservations = beaker_usage.expiring_reservations()
open_in_demand_systems = beaker_usage.open_in_demand_systems()
delayed_jobs = beaker_usage.delayed_jobs()
if (expiring_reservations or open_in_demand_systems or delayed_jobs):
data = {
'user_name': user.user_name,
'current_date': datetime.utcnow().strftime("%Y-%m-%d"),
'beaker_fqdn': absolute_url('/'),
'reservation_expiry': reservation_expiry,
'reservation_length': reservation_length,
'waiting_recipe_age': waiting_recipe_age,
'delayed_job_age': delayed_job_age,
'expiring_reservations': expiring_reservations,
'open_reservations': open_in_demand_systems,
'delayed_jobs': delayed_jobs
}
mail.send_usage_reminder(user, data, testing)
return | [
57
] |
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response | [
19,
243
] |
def METHOD_NAME(text):
"""Removes extra whitespace that may be in the text."""
# Ensure input is a string
text = str(text)
# Remove whitespace from the start and end of the text
text = text.strip()
# Deduplicate spaces in the string
text = re.sub(r' +', ' ', text)
return text | [
188,
1967,
1859
] |
def METHOD_NAME(cls, process_instance_id: int) -> ProcessInstanceQueueModel | None:
ctx = cls.get_thread_local_locking_context()
return ctx["locks"].pop(process_instance_id, None) # type: ignore | [
1365,
2671
] |
def METHOD_NAME(nw_name, ingress=False):
print "Verifying LB programming for containers on network %s" % nw_name
data = cli.inspect_network(nw_name, verbose=True)
if "Services" in data.keys():
services = data["Services"]
else:
print "Network %s has no services. Skipping check" % nw_name
return
fwmarks = {str(service): str(svalue["LocalLBIndex"]) for service, svalue in services.items()}
stasks = {}
for service, svalue in services.items():
if service == "":
continue
tasks = []
for task in svalue["Tasks"]:
tasks.append(str(task["EndpointIP"]))
stasks[fwmarks[str(service)]] = tasks
# for services in ingress network verify the iptables rules
# that direct ingress (published port) to backend (target port)
if ingress is True:
check_iptables(service, svalue["Ports"])
containers = get_namespaces(data, ingress)
for container, namespace in containers.items():
print "Verifying container %s..." % container
ipvs = subprocess.check_output([which("nsenter","/usr/bin/nsenter"), '--net=%s' % namespace, which("ipvsadm","/usr/sbin/ipvsadm"), '-ln'])
mark = ""
realmark = {}
for line in ipvs.splitlines():
if "FWM" in line:
mark = re.findall("[0-9]+", line)[0]
realmark[str(mark)] = []
elif "->" in line:
if mark == "":
continue
ip = ipv4match.search(line)
if ip is not None:
realmark[mark].append(format(ip.group(0)))
else:
mark = ""
for key in realmark.keys():
if key not in stasks:
print "LB Index %s" % key, "present in IPVS but missing in docker daemon"
del realmark[key]
for key in stasks.keys():
if key not in realmark:
print "LB Index %s" % key, "present in docker daemon but missing in IPVS"
del stasks[key]
for key in realmark:
service = "--Invalid--"
for sname, idx in fwmarks.items():
if key == idx:
service = sname
if len(set(realmark[key])) != len(set(stasks[key])):
print "Incorrect LB Programming for service %s" % service
print "control-plane backend tasks:"
for task in stasks[key]:
print task
print "kernel IPVS backend tasks:"
for task in realmark[key]:
print task
else:
print "service %s... OK" % service | [
250,
1228
] |
def METHOD_NAME(value, size, seed=None):
"""
Return a given sample size from a list. By default, the random number
generator uses the current system time unless given a seed value.
.. versionadded:: 3005
value
A list to e used as input.
size
The sample size to return.
seed
Any value which will be hashed as a seed for random.
CLI Example:
.. code-block:: bash
salt '*' random.sample '["one", "two"]' 1 seed="something"
"""
return salt.utils.data.METHOD_NAME(value, size, seed=seed) | [
734
] |
def METHOD_NAME(self):
def f(a, b):
return torch.mm(a, b)
inp = (
T(10, 10),
T(10, 10),
)
self.assertNotZero(calculate_runtime(f, *inp)) | [
9,
3074
] |
def METHOD_NAME(self, h, name):
"""Quick fix for handler methods that are currently left out!"""
def delegate(*args, **kw):
method = getattr(h, name)
return method(*args, **kw)
return delegate | [
129,
4666
] |
METHOD_NAME (self, line): | [
214,
224,
534
] |
def METHOD_NAME(self):
"""Check that the output is created"""
# run the import module
self.assertModule(
"t.rast.import.netcdf",
flags="fo",
input=self.input_sentinel[0],
output=self.output_sentinel,
memory=2048,
nprocs=2,
nodata=-1,
)
# check to see if output is in mapset
# Adjust to STRDS
# self.assertRasterExists(self.output, msg="Output was not created") | [
9,
4038,
2602,
654,
6123,
636
] |
def METHOD_NAME(self):
network_1 = self.create_network()
network_1.peers['node_2'] = Peer()
network_1.peers['node_3'] = Peer(dealer_running=False)
self.assertEqual(1, network_1.num_of_peers_connected()) | [
9,
181,
47,
8869,
2261
] |
def METHOD_NAME(self, vpn_gateway_id: str) -> VpnGateway:
deleted = self.vpn_gateways.get(vpn_gateway_id, None)
if not deleted:
raise InvalidVpnGatewayIdError(vpn_gateway_id)
deleted.state = "deleted"
return deleted | [
34,
8938,
14
] |
METHOD_NAME( self ) : | [
9
] |
def METHOD_NAME():
for category in os.listdir(event_root):
cat_dir = os.path.join(event_root, category)
if not os.path.isdir(cat_dir):
continue
for event in os.listdir(cat_dir):
evt_dir = os.path.join(cat_dir, event)
if os.path.isdir(evt_dir):
print_tpoint(category, event) | [
38,
-1
] |
def METHOD_NAME(
cfg, virtualname, __salt__, _options, profile_attr, profile_attrs
):
"""
Fetches profile specific options if applicable
@see :func:`get_returner_options`
:return: a options dict
"""
if (not profile_attr) or (profile_attr not in _options):
return {}
# Using a profile and it is in _options
creds = {}
profile = _options[profile_attr]
if profile:
log.debug("Using profile %s", profile)
if "config.option" in __salt__:
creds = cfg(profile)
else:
creds = cfg.get(profile)
if not creds:
return {}
return {
pattr: creds.get("{}.{}".format(virtualname, profile_attrs[pattr]))
for pattr in profile_attrs
} | [
1047,
337,
2766
] |
def METHOD_NAME(self):
self.dps[HVACMODE_DPS] = True
self.assertEqual(self.subject.hvac_mode, HVACMode.HEAT)
self.dps[HVACMODE_DPS] = False
self.assertEqual(self.subject.hvac_mode, HVACMode.OFF) | [
9,
7162,
854
] |
def METHOD_NAME(self, data, tags, nds):
if "name" not in tags or "highway" not in tags or "ref" in tags:
return
ref = self.ReRefRoute1.match(tags["name"])
if ref:
ref_src = ref.group(1)
ref_dest = ref.group(2)
if " la {0}".format(ref_src) in tags["name"] or " de {0}".format(ref_src) in tags["name"] or " du {0}".format(ref_src) in tags["name"]:
return
if "ancienne" in tags["name"]:
return {"class": 904}
name = re.sub(self.MultipleSpace, " ", tags["name"].replace(ref_src, "").strip())
if name == "":
fix = {"-":["name"], "+":{"ref": ref_dest}}
else:
fix = {"~":{"name": name}, "+":{"ref": ref_dest}}
return {"class": 904, "fix": fix}
if self.ReRefRoute2.match(tags["name"]):
return {"class": 904, "subclass": 1, "text": {"en": "name={0}".format(tags["name"])}} | [
3504
] |
def METHOD_NAME(self):
return self.__itemModifiedAttributes | [
1024,
680,
177
] |
def METHOD_NAME(self, include_gripper: bool = False) -> list[Joint]:
return rest.call(rest.Method.GET, f"{self.settings.url}/joints", list_return_type=Joint) | [
4705,
4502
] |
def METHOD_NAME(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in Tabular tokens using the vocab."""
tokens = []
sizes = self.code_column.sizes
ids_size = sum(sizes)
cindex = 0
eor_pos = find_index_of(ids, self.eor)
eod_pos = find_index_of(ids, self.eod)
if eor_pos >= 0 and eod_pos >= 0:
idd = min(eor_pos, eod_pos)
cindex = (ids_size - idd) % ids_size
elif eor_pos >= 0 and eod_pos < 0:
idd = eor_pos
cindex = (ids_size - idd) % ids_size
elif eod_pos >= 0 and eor_pos < 0:
idd = eod_pos
cindex = (ids_size - idd) % ids_size
cum_sizes = numpy.cumsum(sizes)
old_column_index = -1
token_ids = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
index = cindex % ids_size
column_index = numpy.where(index < cum_sizes)[0][0]
column = self.code_column.columns[column_index]
if old_column_index != column_index:
token_ids = [i]
old_column_index = column_index
else:
token_ids.append(i)
if len(token_ids) == sizes[column_index]:
tokens.append(self.code_column.decode(column, token_ids))
cindex += 1
return tokens | [
308,
24,
1735
] |
def METHOD_NAME(
predicate: Callable[[_PartitionMember], int | bool], values: list[_PartitionMember]
) -> tuple[list[_PartitionMember], list[_PartitionMember]]: ... | [
2312
] |
async def METHOD_NAME(self):
msgf = await self.bboxsub.get_next_message()
msg = msgf.objects[0]
# print msg
c1 = rosmsg_to_numpy(msg.points[0])
c2 = rosmsg_to_numpy(msg.points[1])
tmp = (((c1 + c2) / 2.0), msgf, msg.name)
return tmp | [
19,
1262,
896
] |
def METHOD_NAME(self, data):
"""
Call management command with `data` as contents of input file.
"""
with patch(
_COMMAND_PATH + '.py3_open',
mock_open(read_data=data)
) as _:
call_command(
self.command,
uid_mapping='./foo.json',
saml_provider_slug=self.provider_slug
) | [
128,
462
] |
def METHOD_NAME(self):
@cuda.jit
def foo(r, x):
r[0] = x + 1
N = 10
arr_f32 = np.zeros(N, dtype=np.float32)
with override_config('CUDA_WARN_ON_IMPLICIT_COPY', 1):
with warnings.catch_warnings(record=True) as w:
foo[1, N](arr_f32, N)
self.assertEqual(w[0].category, NumbaPerformanceWarning)
self.assertIn('Host array used in CUDA kernel will incur',
str(w[0].message))
self.assertIn('copy overhead', str(w[0].message)) | [
9,
883,
69,
1806,
877
] |
def METHOD_NAME(self):
return self.end - self.start | [
3229
] |
def METHOD_NAME(events, **kw):
events.write(fname, 'CSZ', **kw)
assert iocsv._is_csz(fname)
events2 = read_events(fname, check_compression=check_compression)
assert len(events2) == len(events)
for ev1, ev2 in zip(events, events2):
assert len(ev2.origins[0].arrivals) == \
len(ev1.origins[0].arrivals)
assert len(ev2.picks) == \
len(ev1.picks) | [
9,
77,
203
] |
def METHOD_NAME(self) -> None:
config = {"root_restrict_files": [".git", ".foo"]}
expect = [
("directory", ".git", True),
("file", ".foo", True),
("directory", ".foo", True),
(None, None, False),
("directory", ".svn", False),
("file", "baz", False),
]
self.runWatchTests(config=config, expect=expect) | [
9,
1563,
12482
] |
def METHOD_NAME():
if len(timer.TAPE) > 1:
return timer.TAPE.pop()
else:
return -1 | [
19
] |
def METHOD_NAME(con, df):
"""
Return a SpatiaLite connection containing the nybb table.
Parameters
----------
`con`: ``sqlite3.Connection``
`df`: ``GeoDataFrame``
"""
with con:
geom_col = df.geometry.name
srid = get_srid(df)
con.execute(
"CREATE TABLE IF NOT EXISTS nybb "
"( ogc_fid INTEGER PRIMARY KEY"
", borocode INTEGER"
", boroname TEXT"
", shape_leng REAL"
", shape_area REAL"
")"
)
con.execute(
"SELECT AddGeometryColumn(?, ?, ?, ?)",
("nybb", geom_col, srid, df.geom_type.dropna().iat[0].upper()),
)
con.execute("SELECT CreateSpatialIndex(?, ?)", ("nybb", geom_col))
sql_row = "INSERT INTO nybb VALUES(?, ?, ?, ?, ?, GeomFromText(?, ?))"
con.executemany(
sql_row,
(
(
None,
row.BoroCode,
row.BoroName,
row.Shape_Leng,
row.Shape_Area,
row.geometry.wkt if row.geometry else None,
srid,
)
for row in df.itertuples(index=False)
),
) | [
129,
-1
] |
def METHOD_NAME(self):
"""Extend $GEM_PATH in module file."""
txt = super(RubyGem, self).METHOD_NAME()
# for stand-alone Ruby gem installs, $GEM_PATH needs to be updated
if not self.is_extension or self.master.name != 'Ruby':
txt += self.module_generator.prepend_paths('GEM_PATH', [''])
return txt | [
93,
298,
1967
] |
def METHOD_NAME(check_name, version_string):
"""
Compose a string to use for release tags
"""
if check_name == 'ddev':
version_string = f'v{version_string}'
if check_name:
return f'{check_name}-{version_string}'
else:
return version_string | [
19,
586,
82,
144
] |
async def METHOD_NAME(self):
url = self.get_image_url("image.jpg")
image_bytes = self.get_image_bytes("image.jpg")
storage = Storage(self.context)
await storage.put(url, image_bytes)
result = await storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
expect(result).to_equal(image_bytes) | [
9,
427,
1276
] |
def METHOD_NAME(self):
with mock.patch(GET_CLIENT_BY_USER, return_value=MockClient()):
result = self.get_job_result(TEST_DATA, TEST_PARENT_DATA)
self.assertTrue(result) | [
9,
19,
202,
351,
1571
] |
def METHOD_NAME():
format = '{} {}: {};'
for component, symbols in component_map.iteritems():
print 'COMPONENT:', component
for key in symbols.keys():
for symbol in symbols[key]: print format.format(component, key, symbol)
print | [
38,
339
] |
def METHOD_NAME(self, observation, state, step_type):
"""Update the goal if the episode just beginned; otherwise keep using
the goal in ``state``.
Args:
observation (nested Tensor): the observation at the current time step
state (nested Tensor): state of this goal generator
step_type (StepTyp):
Returns:
Tensor: a batch of one-hot tensors representing the updated goals.
"""
new_goal_mask = torch.unsqueeze((step_type == StepType.FIRST), dim=-1)
generated_goal = self._generate_goal(observation, state)
new_goal = torch.where(new_goal_mask, generated_goal, state.goal)
return new_goal | [
86,
1523
] |
def METHOD_NAME(empty_msgid_store: MessageIdStore) -> None:
empty_msgid_store.add_msgid_and_symbol("W1234", "warning-symbol")
with pytest.raises(InvalidMessageError) as error:
empty_msgid_store.check_msgid_and_symbol("W1234", "other-symbol")
assert (
"Message id 'W1234' cannot have both 'other-symbol' and 'warning-symbol' as symbolic name."
in str(error.value)
) | [
9,
1119,
1608
] |
def METHOD_NAME(self):
current = Current(1e4)
current_str = json.dumps(SIMSON(current), cls=GSONEncoder)
current_regen = json.loads(current_str, cls=GSONDecoder)
self.assertAlmostEqual(current.get_value(), current_regen.get_value()) | [
9,
1056,
2109
] |
def METHOD_NAME(self, expr: str, globals: dict[str, Any] | None = None, locals: Mapping[str, Any] | None = None) -> None: ... | [
13745
] |
def METHOD_NAME(self):
return str(self.as_dict()) | [
947,
455
] |
def METHOD_NAME(self, username):
endpoint = "/v1/user/" + username
return self._call(endpoint) | [
21
] |
def METHOD_NAME(config):
return _relocate_fields_to_subfolder(config, CONF_TRACES, GRAPH_TRACE_SCHEMA) | [
12851,
2576
] |
def METHOD_NAME():
"""
testWithNoKwargs illustrates using the launchHubServer function with no
parameters at all. Considerations:
* A Keyboard, Mouse, Monitor, and Experiment device are created by default.
* All devices use their default parameter settings. Therefore,
not very useful in real studies.
* ioHub DataStore is not enabled.
"""
io = launchHubServer()
# Get the default keyboard device created.
keyboard = io.devices.keyboard
print()
print(" ** PRESS A KEY TO CONTINUE.....")
# Check for new events every 1/4 second.
# By using the io.wait() function, the ioHub Process is checked for
# events every 50 msec or so, and they are cached in the PsychoPy process
# until the next getEvents() call is made. On Windows, messagePump() is also
# called periodically so that any Window you have created does not lock up.
#
while not keyboard.getEvents():
io.wait(0.25)
print("A Keyboard Event was Detected; exiting Test.")
io.quit() | [
9,
41,
654,
1475
] |
def METHOD_NAME(nw: NodeWrangler, base_hue):
roughness = uniform(.5, .8)
color = *colorsys.hsv_to_rgb(base_hue, uniform(.4, .5), log_uniform(.02, .1)), 1
principled_bsdf = nw.new_node(Nodes.PrincipledBSDF,
input_kwargs={'Base Color': color, 'Roughness': roughness})
return principled_bsdf | [
1871,
-1
] |
def METHOD_NAME(self, **kwargs):
context = super().METHOD_NAME(**kwargs)
invitation = self.get_object()
from_user = invitation.from_user
context["requestor_name"] = from_user.get_full_name() or from_user.username
context["requestor_url"] = reverse(
"profiles_profile_detail", args=[from_user.username]
)
context["target_name"] = invitation.object_name
context["target_type"] = invitation.object._meta.verbose_name
return context | [
19,
198,
365
] |
def METHOD_NAME():
import sfepy
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh, FEDomain, Field
mesh = Mesh.from_file('meshes/2d/rectangle_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
dim = domain.shape.dim
min_x, max_x = domain.get_mesh_bounding_box()[:,0]
eps = 1e-8 * (max_x - min_x)
omega = domain.create_region('Omega', 'all')
gamma1 = domain.create_region('Gamma1',
'vertices in x < %.10f' % (min_x + eps),
'facet')
gamma2 = domain.create_region('Gamma2',
'vertices in x > %.10f' % (max_x - eps),
'facet')
field = Field.from_args('fu', nm.float64, 'vector', omega,
approx_order=2)
return Struct(dim=dim, omega=omega, gamma1=gamma1, gamma2=gamma2,
field=field) | [
365
] |
def METHOD_NAME(self, value, expected):
""" """
assert plugin_common.convert_to_type(value) == expected | [
9,
197,
24,
44,
3026
] |
def METHOD_NAME(self):
"""Construct docstring for instances
Lists the public top-level paths inside the location, where
non-public means has a `.` or `_` prefix or is a 'tests'
directory.
"""
top_level = sorted(
os.path.relpath(p, self.files) + "/"[: p.is_dir()]
for p in self.files.iterdir()
if p.name[0] not in (".", "_") and p.name != "tests"
)
doclines = [
f"Load package files relative to ``{self._anchor}``.",
"",
"This package contains the following (top-level) files/directories:",
"",
*(f"* ``{path}``" for path in top_level),
]
return "\n".join(doclines) | [
366
] |
def METHOD_NAME(
tr_doc: Document, rd: str, rp: str | None = None, ra: str | None = None
) -> tuple[Document | None, str | None]:
"""
Find document and paragraph id from cited source Translation whose language matches
the Translation we are currently creating.
Note some elements of the return value may be None.
:param rd: source document id
:param rp: source document paragraph id
:param ra: source document paragraph area name
:param tr_doc: Translation that is citing the source document
:return: the matched source Translation and paragraph id as a tuple.
"""
matched_doc = None
par_id = None
source_docinfo = DocEntry.find_by_id(int(rd)) if rd else None
if source_docinfo:
for source_tr in source_docinfo.translations:
# Documents might be missing a lang_id, or even a DocInfo
if (
source_tr.lang_id
and tr_doc.docinfo
and source_tr.lang_id == tr_doc.docinfo.lang_id
):
matched_doc = source_tr
# Find matching paragraph hash for translated citation par
for p in source_tr.document:
if (rp and p.get_attr("rp") == rp) or (
ra and p.get_attr("area") == ra
):
par_id = p.id
break
break
if not matched_doc:
matched_doc = source_docinfo.document
par_id = rp
return matched_doc, par_id | [
416,
1767,
3626,
1254,
1458
] |
def METHOD_NAME(command, silent=False, env=None, cwd=None):
"""
Execute the the build command and continuously write
the output from the process to the standard output.
"""
proc = subprocess.Popen(
command,
bufsize=-1,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
shell=True,
universal_newlines=True,
encoding="utf-8",
errors="ignore")
while True:
line = proc.stdout.readline()
if not line and proc.poll() is not None:
break
if not silent:
sys.stdout.write(line)
return proc.returncode | [
750,
-1
] |
def METHOD_NAME(args):
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
# tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
# 2. parse images list and landmark
lmk_file = args.lmk_file
ts = time.time()
rank_land, rank_img_list, start, end = parse_quality_list_part(lmk_file, args.world_size, args.rank,
args.resume_idx)
print('parse land file in {:.3f} seconds'.format(time.time() - ts))
# for batch processing
print('World size {}, rank {}, start from {}, end with {}'.format(args.world_size, args.rank, start, end))
dataset = McDataset(rank_img_list, rank_land, transform=transform, std_size=STD_SIZE)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)
for img_idx, (inputs, ori_imgs, img_fps, roi_boxes) in enumerate(tqdm(dataloader)):
# forward: one step
with torch.no_grad():
if args.mode == 'gpu':
inputs = inputs.cuda()
params = model(inputs)
params = params.cpu().numpy()
roi_boxes = roi_boxes.numpy()
outputs_roi_boxes = roi_boxes
if args.bbox_init == 'two':
step_two_ori_imgs = []
step_two_roi_boxes = []
ori_imgs = ori_imgs.numpy()
for ii in range(params.shape[0]):
# 68 pts
pts68 = predict_68pts(params[ii], roi_boxes[ii])
# two-step for more accurate bbox to crop face
roi_box = parse_roi_box_from_landmark(pts68)
img_step2 = crop_img(ori_imgs[ii], roi_box)
img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
# input = transform(img_step2).unsqueeze(0)
step_two_ori_imgs.append(transform(img_step2))
step_two_roi_boxes.append(roi_box)
with torch.no_grad():
step_two_ori_imgs = torch.stack(step_two_ori_imgs, dim=0)
inputs = step_two_ori_imgs
if args.mode == 'gpu':
inputs = inputs.cuda()
params = model(inputs)
params = params.cpu().numpy()
outputs_roi_boxes = step_two_roi_boxes
# dump results
if args.dump_param:
for img_fp, param, roi_box in zip(img_fps, params, outputs_roi_boxes):
split = img_fp.split('/')
save_name = os.path.join(args.save_dir, '{}.txt'.format(os.path.splitext(split[-1])[0]))
this_param = param * param_std + param_mean
this_param = np.concatenate((this_param, roi_box))
this_param.tofile(save_name, sep=' ') | [
57
] |
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
self._execute_operations()
return self._output() | [
1519
] |
def METHOD_NAME(self):
"""
Testing methods in the solverwriter factory registration process
"""
SolverFactory.unregister('stest3')
self.assertTrue('stest3' not in SolverFactory)
SolverFactory.register('stest3')(MockSolver)
self.assertTrue('stest3' in SolverFactory)
self.assertTrue('_mock_cbc' in SolverFactory) | [
9,
2644,
2213
] |
def METHOD_NAME():
if os.name == 'nt':
logger.trace('Symlinks not supported on Windows. Skipping Symlink plugin register.')
return
plugin.register(Symlink, 'symlink', api_ver=2) | [
372,
2793
] |
def METHOD_NAME(self, adapters):
for adapter in adapters:
full_class = adapter.get("adapter")
adapter_prefix = adapter.get("prefix")
if full_class is None or adapter_prefix is None:
raise HttpSessionAdapterConfigException(
"Invalid http session adapter config, prefix: {} or class: {} "
"not defined correctly".format(adapter_prefix, full_class)
)
module, class_name = full_class.rsplit(".", 1)
adapter_module = importlib.import_module(module)
adapter_class = getattr(adapter_module, class_name)
self._session.mount(adapter_prefix, adapter_class()) | [
0,
721,
240,
5645
] |
def METHOD_NAME(self):
"""Test the run method."""
with patch.object(
ConnectTaskRunner, "connect", side_effect=invalid_login
) as mock_connect:
status = self.runner.run(Value("i", ScanJob.JOB_RUN))
self.assertEqual(ScanTask.FAILED, status[1])
mock_connect.assert_called_once_with() | [
9,
1423,
22
] |
def METHOD_NAME(self, train_data, device, args):
model = self.model
model.to(device)
model.train()
# train and update
criterion = nn.CrossEntropyLoss().to(device) # pylint: disable=E1102
if args.client_optimizer == "sgd":
optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=args.learning_rate,
)
else:
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=args.learning_rate,
weight_decay=args.weight_decay,
amsgrad=True,
)
epoch_loss = []
current_steps = 0
current_epoch = 0
while current_steps < args.local_iterations:
batch_loss = []
for batch_idx, (x, labels) in enumerate(train_data):
x, labels = x.to(device), labels.to(device)
model.zero_grad()
log_probs = model(x)
labels = labels.long()
loss = criterion(log_probs, labels) # pylint: disable=E1102
loss.backward()
# Uncommet this following line to avoid nan loss
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
optimizer.step()
# logging.info(
# "Update Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
# epoch,
# (batch_idx + 1) * args.batch_size,
# len(train_data) * args.batch_size,
# 100.0 * (batch_idx + 1) / len(train_data),
# loss.item(),
# )
# )
batch_loss.append(loss.item())
current_steps += 1
if current_steps == args.local_iterations:
break
current_epoch += 1
epoch_loss.append(sum(batch_loss) / len(batch_loss))
logging.info(
"Client Index = {}\tEpoch: {}\tLoss: {:.6f}".format(
self.id, current_epoch, sum(epoch_loss) / len(epoch_loss)
)
) | [
849,
4244
] |
def METHOD_NAME(self, routes):
self.routes.update(routes) | [
238,
3968
] |
def METHOD_NAME(G: Graph) -> None:
assert set(G.node_labels()) == {"Node", "Node2"} | [
9,
303,
1716,
415
] |
def METHOD_NAME(reference, hypothesis, ignore_case=False, delimiter=' '):
"""Calculate word error rate (WER). WER compares reference text and
hypothesis text in word-level. WER is defined as:
.. math::
WER = (Sw + Dw + Iw) / Nw
where
.. code-block:: text
Sw is the number of words subsituted,
Dw is the number of words deleted,
Iw is the number of words inserted,
Nw is the number of words in the reference
We can use levenshtein distance to calculate WER. Please draw an attention
that empty items will be removed when splitting sentences by delimiter.
Args:
reference (str): The reference sentence.
hypothesis (str): The hypothesis sentence.
ignore_case (bool): Whether case-sensitive or not.
delimiter (char): Delimiter of input sentences.
Returns:
float: Word error rate.
Raises:
ValueError: If word number of reference is zero.
"""
edit_distance, ref_len = word_errors(reference, hypothesis, ignore_case,
delimiter)
if ref_len == 0:
raise ValueError("Reference's word number should be greater than 0.")
METHOD_NAME = float(edit_distance) / ref_len
return METHOD_NAME | [
3140
] |
def METHOD_NAME(self, order_list: listOfOrders):
data_lock = dataLocks(self.data)
for order in order_list:
# try:
# we allow existing orders to be modified
log = order.log_with_attributes(self.log)
log.debug("Required order %s" % str(order))
instrument_locked = data_lock.is_instrument_locked(order.instrument_code)
if instrument_locked:
log.debug("Instrument locked, not submitting")
continue
self.submit_order(order) | [
579,
852,
245
] |
def METHOD_NAME(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final) | [
2376,
1268
] |
def METHOD_NAME(self, msg_obj, *args, **kwargs):
if not self.phone_number_is_valid(msg_obj.phone_number):
msg_obj.set_system_error(SMS.ERROR_INVALID_DESTINATION_NUMBER)
return
params = self.populate_params(msg_obj)
resp = requests.get(self.url, params=params, timeout=settings.SMS_GATEWAY_TIMEOUT)
self.handle_response(msg_obj, resp.status_code, resp.text) | [
353
] |
def METHOD_NAME(self):
with gdbm.open(filename, 'c') as db:
db[b'bytes key \xbd'] = b'bytes value \xbd'
with gdbm.open(filename, 'r') as db:
self.assertEqual(list(db.keys()), [b'bytes key \xbd'])
self.assertTrue(b'bytes key \xbd' in db)
self.assertEqual(db[b'bytes key \xbd'], b'bytes value \xbd') | [
9,
321
] |
def METHOD_NAME(self):
return getattr(self, "settings_build", self.settings) | [
817,
56
] |
def METHOD_NAME(patch):
module = patch("h.views.admin.users.models")
module.Annotation = Annotation
return module | [
379
] |
def METHOD_NAME(self, block: dict) -> None:
block_num = block.get('number')
proofs = block.get('proofs')
for proof in proofs:
vk = proof.get('signer')
assert self.block_storage.is_member_at_block_height(block_num=block_num, vk=vk), f"block number {block_num} did not pass block consensus." | [
187,
1810
] |
def METHOD_NAME(self, source):
"""Simple helper to return the file specified name"""
target = source.get("target")
if target:
return target
return os.path.basename(source["artifact"]) | [
1147
] |
def METHOD_NAME(self, score_map: np.ndarray,
poly_pts: np.ndarray) -> float:
"""Compute the average score over the area of the bounding box of the
polygon.
Args:
score_map (np.ndarray): The score map.
poly_pts (np.ndarray): The polygon points.
Returns:
float: The average score.
"""
h, w = score_map.shape[:2]
poly_pts = poly_pts.copy()
xmin = np.clip(
np.floor(poly_pts[:, 0].min()).astype(np.int32), 0, w - 1)
xmax = np.clip(
np.ceil(poly_pts[:, 0].max()).astype(np.int32), 0, w - 1)
ymin = np.clip(
np.floor(poly_pts[:, 1].min()).astype(np.int32), 0, h - 1)
ymax = np.clip(
np.ceil(poly_pts[:, 1].max()).astype(np.int32), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
poly_pts[:, 0] = poly_pts[:, 0] - xmin
poly_pts[:, 1] = poly_pts[:, 1] - ymin
cv2.fillPoly(mask, poly_pts.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(score_map[ymin:ymax + 1, xmin:xmax + 1], mask)[0] | [
19,
2739,
747
] |
def METHOD_NAME(cls):
super().METHOD_NAME()
cls.loader = FakeModelLoader(cls.env, cls.__module__)
cls.loader.backup_registry()
from .fake_models import MisKpiDataTestItem
cls.loader.update_registry((MisKpiDataTestItem,))
report = cls.env["mis.report"].create(dict(name="test report"))
cls.kpi1 = cls.env["mis.report.kpi"].create(
dict(
report_id=report.id,
name="k1",
description="kpi 1",
expression="AccountingNone",
)
)
cls.expr1 = cls.kpi1.expression_ids[0]
cls.kpi2 = cls.env["mis.report.kpi"].create(
dict(
report_id=report.id,
name="k2",
description="kpi 2",
expression="AccountingNone",
)
)
cls.expr2 = cls.kpi2.expression_ids[0]
cls.kd11 = cls.env["mis.kpi.data.test.item"].create(
dict(
kpi_expression_id=cls.expr1.id,
date_from="2017-05-01",
date_to="2017-05-10",
amount=10,
)
)
cls.kd12 = cls.env["mis.kpi.data.test.item"].create(
dict(
kpi_expression_id=cls.expr1.id,
date_from="2017-05-11",
date_to="2017-05-20",
amount=20,
)
)
cls.kd13 = cls.env["mis.kpi.data.test.item"].create(
dict(
kpi_expression_id=cls.expr1.id,
date_from="2017-05-21",
date_to="2017-05-25",
amount=30,
)
)
cls.kd21 = cls.env["mis.kpi.data.test.item"].create(
dict(
kpi_expression_id=cls.expr2.id,
date_from="2017-06-01",
date_to="2017-06-30",
amount=3,
)
) | [
0,
1,
2
] |
def METHOD_NAME():
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
expected_failures = {}
last_exp_tmp = None
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:x:X:n:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
#
# Test if server aborts connection upon receiving application data
# before Finished
#
conversations = {}
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions=None))
node = node.add_child(ExpectServerHello())
node = node.add_child(Close())
conversations["sanity"] = conversation
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={}))
node = node.add_child(ExpectServerHello())
node = node.add_child(Close())
conversations["empty extensions"] = conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
# make sure that sanity test is run first and last
# to verify that server was running and kept running throughout
sanity_tests = [('sanity', conversations['sanity'])]
if run_only:
if num_limit > len(run_only):
num_limit = len(run_only)
regular_tests = [(k, v) for k, v in conversations.items() if
k in run_only]
else:
regular_tests = [(k, v) for k, v in conversations.items() if
(k != 'sanity') and k not in run_exclude]
sampled_tests = sample(regular_tests, min(num_limit, len(regular_tests)))
ordered_tests = chain(sanity_tests, sampled_tests, sanity_tests)
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
exception = None
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but test passed\n")
else:
if expected_failures[c_name] is not None and \
expected_failures[c_name] not in str(exception):
bad += 1
failed.append(c_name)
print("Expected error message: {0}\n"
.format(expected_failures[c_name]))
else:
xfail += 1
print("OK-expected failure\n")
else:
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test end")
print(20 * '=')
print("version: {0}".format(version))
print(20 * '=')
print("TOTAL: {0}".format(len(sampled_tests) + 2*len(sanity_tests)))
print("SKIP: {0}".format(len(run_exclude.intersection(conversations.keys()))))
print("PASS: {0}".format(good))
print("XFAIL: {0}".format(xfail))
print("FAIL: {0}".format(bad))
print("XPASS: {0}".format(xpass))
print(20 * '=')
sort = sorted(xpassed ,key=natural_sort_keys)
if len(sort):
print("XPASSED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
sort = sorted(failed, key=natural_sort_keys)
if len(sort):
print("FAILED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
if bad or xpass:
sys.exit(1) | [
57
] |
def METHOD_NAME(
model: Model,
left: EntityProxy,
right: EntityProxy,
weights: Weights = COMPARE_WEIGHTS,
) -> float:
"""Compare two entities and return a match score."""
scores = compare_scores(model, left, right)
return _compare(scores, weights) | [
979
] |
def METHOD_NAME(self) -> dict:
return {
"name": f"aws_mock-{self.test_id}",
"tty": True,
"environment": {
"AWS_MOCK_HOSTS": " ".join(chain(
DEFAULT_MOCKED_HOSTS,
(f"ec2.{region}.amazonaws.com" for region in self.regions if region != "eu-west-2"),
)),
},
"pull": True,
} | [
874,
248,
224,
22,
335
] |
def METHOD_NAME(
staff_api_client,
permission_group_manage_orders,
channel_PLN,
channel_USD,
product,
shipping_method,
graphql_address_data,
):
"""Ensure that staff user is able to create and complete the draft order
in the channel he has access to."""
# given
permission_group_manage_orders.restricted_access_to_channels = True
permission_group_manage_orders.save(update_fields=["restricted_access_to_channels"])
permission_group_manage_orders.channels.add(channel_USD)
permission_group_manage_orders.user_set.add(staff_api_client.user)
# when
# create order
draft_order_create_variables = {
"input": {
"userEmail": "[email protected]",
"billingAddress": graphql_address_data,
"shippingAddress": graphql_address_data,
"channelId": graphene.Node.to_global_id("Channel", channel_USD.id),
}
}
response = staff_api_client.post_graphql(
DRAFT_ORDER_CREATE_MUTATION, draft_order_create_variables
)
draft_order_id = get_graphql_content(response)["data"]["draftOrderCreate"]["order"][
"id"
]
# add lines to order
variant = product.variants.first()
order_lines_create_variables = {
"orderId": draft_order_id,
"variantId": graphene.Node.to_global_id("ProductVariant", variant.id),
"quantity": 2,
}
staff_api_client.post_graphql(
ORDER_LINES_CREATE_MUTATION, order_lines_create_variables
)
# update order with available product and shipping method
draft_order_update_variables = {
"id": draft_order_id,
"input": {
"shippingMethod": graphene.Node.to_global_id(
"ShippingMethod", shipping_method.id
),
},
}
staff_api_client.post_graphql(
DRAFT_ORDER_UPDATE_MUTATION, draft_order_update_variables
)
# complete order
staff_api_client.post_graphql(DRAFT_ORDER_COMPLETE_MUTATION, {"id": draft_order_id})
# then
# query created order
response = staff_api_client.post_graphql(QUERY_ORDER_BY_ID, {"id": draft_order_id})
content = get_graphql_content(response)
order_data = content["data"]["order"]
assert order_data["status"] == OrderStatus.UNFULFILLED.upper() | [
9,
129,
852,
604,
1045,
623,
473
] |
def METHOD_NAME(s: str):
""" Lower text and remove punctuation, articles and extra whitespace """
return QAMetrics.white_space_fix(QAMetrics.remove_articles(QAMetrics.remove_punc(s.lower()))) | [
1137,
3485
] |
f METHOD_NAME(self, name): | [
1798
] |
def METHOD_NAME(self) -> str:
"""
The on-premises integration runtime host URL.
"""
return pulumi.get(self, "host_service_uri") | [
1806,
549,
354
] |
def METHOD_NAME(base_enderecos_novos: pd.DataFrame) -> Union[str, Path]:
"""
Une os endereços previamente catalogados com os novos e cria um csv.
"""
# Fixa ordem das colunas
cols_order = [
"endereco_completo",
"pais",
"estado",
"municipio",
"bairro",
"id_logradouro",
"logradouro",
"numero_porta",
"latitude",
"longitude",
]
base_enderecos_novos[cols_order] = base_enderecos_novos[cols_order]
# Hora atual no formato YYYY-MM-DD para criar partições
current_day = pendulum.now("America/Sao_Paulo").strftime("%Y-%m-%d")
ano = current_day[:4]
mes = str(int(current_day[5:7]))
partitions = os.path.join(
f"ano_particao={ano}", f"mes_particao={mes}", f"data_particao={current_day}"
)
base_path = os.path.join(os.getcwd(), "tmp", "geolocator")
partition_path = os.path.join(base_path, partitions)
if not os.path.exists(partition_path):
os.makedirs(partition_path)
filename = os.path.join(partition_path, "base_enderecos.csv")
log(f"File is saved on: {filename}")
base_enderecos_novos.to_csv(
filename,
index=False,
)
return base_path | [
-1,
732
] |
def METHOD_NAME(self, k8s, app_spec_teams_and_tags):
actual = k8s._make_labels(app_spec_teams_and_tags)
assert actual["teams.fiaas/order-produkt-betaling"] == "true"
assert actual["tags.fiaas/hoeyt-i-stacken"] == "true"
assert actual["tags.fiaas/ad-in"] == "true"
assert actual["tags.fiaas/anonnseinnlegging"] == "true" | [
9,
93,
415,
41,
1041
] |
def METHOD_NAME(self):
"""Return the current View."""
return self._view() | [
1179
] |
def METHOD_NAME(iri):
iri = iri.replace(":"," ")
iri = iri.replace(",", " ")
iri = iri.replace(" ","")
return iri | [
275,
6081
] |
def METHOD_NAME(self):
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = models.Sequential(
(
layers.Dense(1, activation="relu"),
layers.Dense(1, activation="relu"),
)
)
model.compile(
optimizer="sgd",
loss="mae",
metrics=["mse"],
)
weights = model.get_weights()
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
stopper = callbacks.EarlyStopping(monitor="mse", patience=patience)
hist = model.fit(
data, labels, callbacks=[stopper], verbose=0, epochs=20
)
assert len(hist.epoch) >= patience | [
9,
5040,
8449,
1772
] |
def METHOD_NAME(self, message_bytes: bytes) -> str:
return self._do_sign(message_bytes, SIGN_MESSAGE_INS) | [
2452,
277
] |
def METHOD_NAME(self):
with self.assertRaisesRegex(
AssertionError,
"Parent layer must be given if no flavor config is given",
):
self.compile(include_flavor_config=False) | [
9,
654,
3032,
200,
894,
935,
94
] |
def METHOD_NAME():
filename = os.path.join(SAMPLES_DIR, "array_test_data.avro")
data = [
[34, 556, 12],
[34, 556, 12],
[34, 532, 657],
[236, 568, 12],
[34, 556, 12],
[34, 54, 967],
[34, 556, 12],
[34, 647, 12],
]
assert ak.from_avro_file(file=filename).to_list() == data | [
9,
2378,
962
] |
f METHOD_NAME(self): | [
9,
22,
200,
187
] |
def METHOD_NAME(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values)) | [
2471,
437
] |
def METHOD_NAME(word):
return word in _words | [
2236,
1985
] |
def METHOD_NAME(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state") | [
1994,
551
] |
def METHOD_NAME(self) -> str:
"""
Amazon Resource Name (ARN) of the certificate.
"""
return pulumi.get(self, "certificate_arn") | [
1548,
1059
] |
def METHOD_NAME(csv_asset: CSVAsset) -> tuple[re.Pattern, str]:
regex = re.compile(
r"(?P<name>.+)_(?P<ssn>\d{9})_(?P<timestamp>.+)_(?P<price>\d{4})\.csv"
)
data_connector: DBFSDataConnector = cast(
DBFSDataConnector, csv_asset._data_connector
)
test_connection_error_message = f"""No file at base_directory path "{data_connector._base_directory.resolve()}" matched regular expressions pattern "{data_connector._batching_regex.pattern}" and/or glob_directive "**/*" for DataAsset "csv_asset"."""
return regex, test_connection_error_message | [
1068,
211,
200
] |
def METHOD_NAME(self):
x = c_int()
self.assertEqual(x._objects, None)
x.value = 42
self.assertEqual(x._objects, None)
x = c_int(99)
self.assertEqual(x._objects, None) | [
9,
11116
] |
def METHOD_NAME(self):
"""Sets up resources for tests.
"""
self.bq_client = bigquery.Client()
self.dataset_id = 'bq_benchmark_test_dataset'
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
dataset = bigquery.Dataset(self.dataset_ref)
self.dataset = self.bq_client.create_dataset(dataset)
self.table_id = 'test_table'
abs_path = os.path.abspath(os.path.dirname(__file__))
json_schema_filename = os.path.join(abs_path,
'test_schemas/test_schema.json')
self.table_util = table_util.TableUtil(
table_id=self.table_id,
dataset_id=self.dataset_id,
json_schema_filename=json_schema_filename,
) | [
0,
1
] |
def METHOD_NAME(self):
cache = WebDriverCache()
driver0, driver1, driver2 = mock(), mock(), mock()
when(driver0).quit().thenRaise(WebDriverException("stuff."))
when(driver1).quit().thenRaise(ValueError("stuff."))
when(driver2).quit().thenRaise(TimeoutException("timeout."))
cache.register(driver0, "bar0")
cache.register(driver1, "bar1")
cache.register(driver2, "bar2")
with self.assertRaises(TimeoutException):
cache.close_all()
self.verify_cache(cache) | [
9,
1462,
75,
596,
130,
7450,
168
] |
f METHOD_NAME(self): | [
9,
19,
277,
128,
9934
] |
def METHOD_NAME(filename, mode='t'):
"""Returns the type of the model in file `filename`. Returns None
if type is not known.
mode: 'b' for binary, 't' for text. Not used currently.
"""
mtype = None
msubtype = None
if mode == 't':
for typename, typefunc in list(typeChecks.items()):
if typefunc(filename):
return typename
return None | [
19,
44
] |
def METHOD_NAME(client, userdata, msg): # type: (mqtt.Client, tuple, mqtt.client.MQTTMessage) -> None
_ = userdata
global message_log
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == 'data':
client.publish('/topic/qos0', 'data_to_esp32')
if msg.topic == '/topic/qos0' and payload == 'data':
event_client_received_correct.set()
message_log += 'Received data:' + msg.topic + ' ' + payload + '\n' | [
69,
277
] |
def METHOD_NAME(source, target=None, interpreter=None, main=None,
filter=None, compressed=False):
"""Create an application archive from SOURCE.
The SOURCE can be the name of a directory, or a filename or a file-like
object referring to an existing archive.
The content of SOURCE is packed into an application archive in TARGET,
which can be a filename or a file-like object. If SOURCE is a directory,
TARGET can be omitted and will default to the name of SOURCE with .pyz
appended.
The created application archive will have a shebang line specifying
that it should run with INTERPRETER (there will be no shebang line if
INTERPRETER is None), and a __main__.py which runs MAIN (if MAIN is
not specified, an existing __main__.py will be used). It is an error
to specify MAIN for anything other than a directory source with no
__main__.py, and it is an error to omit MAIN if the directory has no
__main__.py.
"""
# Are we copying an existing archive?
source_is_file = False
if hasattr(source, 'read') and hasattr(source, 'readline'):
source_is_file = True
else:
source = pathlib.Path(source)
if source.is_file():
source_is_file = True
if source_is_file:
_copy_archive(source, target, interpreter)
return
# We are creating a new archive from a directory.
if not source.exists():
raise ZipAppError("Source does not exist")
has_main = (source / '__main__.py').is_file()
if main and has_main:
raise ZipAppError(
"Cannot specify entry point if the source has __main__.py")
if not (main or has_main):
raise ZipAppError("Archive has no entry point")
main_py = None
if main:
# Check that main has the right format.
mod, sep, fn = main.partition(':')
mod_ok = all(part.isidentifier() for part in mod.split('.'))
fn_ok = all(part.isidentifier() for part in fn.split('.'))
if not (sep == ':' and mod_ok and fn_ok):
raise ZipAppError("Invalid entry point: " + main)
main_py = MAIN_TEMPLATE.format(module=mod, fn=fn)
if target is None:
target = source.with_suffix('.pyz')
elif not hasattr(target, 'write'):
target = pathlib.Path(target)
with _maybe_open(target, 'wb') as fd:
_write_file_prefix(fd, interpreter)
compression = (zipfile.ZIP_DEFLATED if compressed else
zipfile.ZIP_STORED)
with zipfile.ZipFile(fd, 'w', compression=compression) as z:
for child in source.rglob('*'):
arcname = child.relative_to(source)
if filter is None or filter(arcname):
z.write(child, arcname.as_posix())
if main_py:
z.writestr('__main__.py', main_py.encode('utf-8'))
if interpreter and not hasattr(target, 'write'):
target.chmod(target.stat().st_mode | stat.S_IEXEC) | [
129,
1622
] |
def METHOD_NAME(collection):
from drf_spectacular.settings import spectacular_settings
if not collection and 'GisFeatureEnum' not in spectacular_settings.ENUM_NAME_OVERRIDES:
spectacular_settings.ENUM_NAME_OVERRIDES['GisFeatureEnum'] = ('Feature',)
if collection and 'GisFeatureCollectionEnum' not in spectacular_settings.ENUM_NAME_OVERRIDES:
spectacular_settings.ENUM_NAME_OVERRIDES['GisFeatureCollectionEnum'] = ('FeatureCollection',) | [
1592,
1206,
1766,
1112
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.