text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self):
return "int" | [
865,
285,
724
] |
def METHOD_NAME(snow_fraction, phase, expected):
"""Test specific values give expected results, including all meta-data"""
input_cube = set_up_variable_cube(
np.full((2, 2), fill_value=snow_fraction, dtype=np.float32),
name="snow_fraction",
units="1",
standard_grid_metadata="uk_ens",
)
result = SignificantPhaseMask()(input_cube, phase)
assert isinstance(result, iris.cube.Cube)
assert result.name() == f"{phase}_mask"
assert str(result.units) == "1"
assert result.dtype == np.int8
assert (result.data == expected).all() | [
9,
199
] |
def METHOD_NAME():
output = io.BytesIO()
im = hopper()
im.save(output, "BMP")
output.seek(0)
with Image.open(output) as reloaded:
assert im.mode == reloaded.mode
assert im.size == reloaded.size
assert reloaded.format == "BMP" | [
9,
73,
24,
321
] |
def METHOD_NAME(self):
if self.geom_gcn_preprocess:
for filename in self.raw_file_names[:2]:
url = f'{self.processed_url}/new_data/{self.name}/{filename}'
download_url(url, self.raw_dir)
for filename in self.raw_file_names[2:]:
url = f'{self.processed_url}/splits/{filename}'
download_url(url, self.raw_dir)
else:
download_url(f'{self.raw_url}/{self.name}.npz', self.raw_dir) | [
136
] |
def METHOD_NAME(self, bitChainF):
fmap = self.getFileMapF()
return self._getCodes(fmap, bitChainF) | [
19,
171,
1114,
474
] |
def METHOD_NAME(self, client):
result = client.get_service_statistics()
assert isinstance(result, dict)
assert set(result.keys()) == {"counters", "limits"} | [
9,
19,
549,
68
] |
def METHOD_NAME(self, env):
# We don't want an AMREX_HOME the user may have set already
env.unset("AMREX_HOME")
# Help force Amrvis to not pick up random system compilers
if "+mpi" in self.spec:
env.set("MPI_HOME", self.spec["mpi"].prefix)
env.set("CC", self.spec["mpi"].mpicc)
env.set("CXX", self.spec["mpi"].mpicxx)
env.set("F77", self.spec["mpi"].mpif77)
env.set("FC", self.spec["mpi"].mpifc)
# Set CONFIG_FILEPATH so Amrvis can find the configuration
# file, amrvis.defaults.
env.set("CONFIG_FILEPATH", self.spec.prefix.etc) | [
102,
56,
1027
] |
def METHOD_NAME(data_fixture):
user = data_fixture.create_user()
builder = data_fixture.create_builder_application()
page = data_fixture.create_builder_page(builder=builder)
with pytest.raises(UserNotInWorkspace):
PageService().update_page(user, page, name="test") | [
9,
86,
1174,
21,
130,
623,
1976
] |
def METHOD_NAME(self, cdb: CDB, count: int) -> None:
"""Save the CDB as the latest checkpoint.
Args:
cdb (medcat.CDB):
The MedCAT CDB object to be checkpointed.
count (count):
The number of the finished steps.
"""
ckpt_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-%s-%s" % (self.steps, count))
while len(self._file_paths) >= self._max_to_keep:
to_remove = self._file_paths.pop(0)
os.remove(to_remove)
cdb.METHOD_NAME(ckpt_file_path)
logger.debug("Checkpoint saved: %s", ckpt_file_path)
self._file_paths.append(ckpt_file_path)
self._count = count | [
73
] |
def METHOD_NAME(*args):
"""Join paths to create a real path."""
return osp.abspath(osp.join(*args)) | [
-1
] |
def METHOD_NAME(data: bytes) -> List[bool]:
"""Create bit list out of a bytestring.
:param data: The modbus data packet to decode
example::
bytes = "bytes to decode"
result = unpack_bitstring(bytes)
"""
byte_count = len(data)
bits = []
for byte in range(byte_count):
value = int(int(data[byte]))
for _ in range(8):
bits.append((value & 1) == 1)
value >>= 1
return bits | [
789,
7691
] |
def METHOD_NAME(self):
self.client.login(username=self.username, password=self.password)
with domain_fixture("public", allow_domain_requests=True):
response = self.client.get(reverse("domain_homepage", args=["public"]), follow=True)
self.assertEqual(response.status_code, 200) | [
9,
2569,
1674,
311
] |
def METHOD_NAME(self, spec, prefix):
return ["--cpp_implementation"] | [
428,
1881
] |
def METHOD_NAME(self, scan_settings: model.ScanSettings):
return self.client.METHOD_NAME(scan_settings) | [
447,
793
] |
def METHOD_NAME(self):
sfp_quantity = 8
sfp_i2c_start_bus = 10
#Celestica Blacklist file
blacklist_file_path="/etc/modprobe.d/celestica-blacklist.conf"
#Blacklist the unuse module.
if os.path.exists(blacklist_file_path):
os.system("rm {0}".format(blacklist_file_path))
os.system("touch {0}".format(blacklist_file_path))
cel_paths = "/lib/modules/{0}/onl/celestica/".format(os.uname()[2])
cel_dirs = os.listdir(cel_paths)
for dir in cel_dirs:
full_cel_path=cel_paths+dir
if os.path.isdir(full_cel_path):
modules=os.listdir(full_cel_path)
for module in modules:
os.system("echo 'blacklist {0}' >> {1}".format(module[0:-3],blacklist_file_path))
print("Initialize and Install the driver here")
self.insmod("platform_cpld.ko")
self.insmod("platform_psu.ko")
self.insmod("platform_fan.ko")
# Add drivers
os.system("modprobe i2c-ismt")
os.system("modprobe optoe")
self.new_i2c_device('24c64', 0x52, 1)
# os.system("i2cset -y 1 0x70 0x10 0x00 0x01 i") # reset pca9548
self.new_i2c_device('pca9548', 0x70, 1)
self.new_i2c_device('fan', 0x32, 2)
self.new_i2c_device('24c64', 0x53, 2)
self.new_i2c_device('24c64', 0x50, 3)
self.new_i2c_device('24c02', 0x50, 4)
self.new_i2c_device('24c02', 0x51, 4)
# reinstall fsp550 because it may fail to create fanx_input node if there's error 0x80 in 0x7e register when driver probes
ret = self.register_hwdevice_multi_times('fsp550', 4, 0x58, 6, 'fan1_input')
if ret is False:
print("*** # Fail to register fsp550 on 4-0058, please check...")
ret = self.register_hwdevice_multi_times('fsp550', 4, 0x59, 6, 'fan1_input')
if ret is False:
print("*** # Fail to register fsp550 on 4-0059, please check...")
self.new_i2c_device('lm75b', 0x48, 5)
self.new_i2c_device('lm75b', 0x49, 5)
self.new_i2c_device('lm75b', 0x49, 6)
self.new_i2c_device('lm75b', 0x4a, 6)
self.new_i2c_device('pca9548', 0x71, 9)
# initialize SFP devices name
for actual_i2c_port in range(sfp_i2c_start_bus, sfp_i2c_start_bus+sfp_quantity):
self.new_i2c_device('optoe2', 0x50, actual_i2c_port)
port_number = actual_i2c_port - (sfp_i2c_start_bus-1)
os.system("echo 'SFP{1}' > /sys/bus/i2c/devices/i2c-{0}/{0}-0050/port_name".format(actual_i2c_port,port_number))
# get fan direction from FRU eeprom
os.system("i2cset -y 1 0x52 0x00 0xb0")
status,fan_direction = self.run_command("i2cget -y 0x1 0x52")
# set fan direction, B2F/F2B
if fan_direction.strip() == "0xbf":
os.system("i2cset -y -f 2 0x32 0x88 0x7")
else:
os.system("i2cset -y -f 2 0x32 0x88 0x0")
return True | [
6068
] |
def METHOD_NAME(show):
"""Build NFO file for TV Show"""
tags = {
'title': show['title'],
'showtitle': show['title'],
'plot': show.get('synopsis'),
'id': show['id'],
'mpaa': show.get('rating')
}
# Try get the year from the first season
year = show.get('seasons', [{}])[0].get('year')
if year:
# Since we have the year only, so we hardcode the month/day
tags['premiered'] = f'{year}-01-01'
root = _build_root_node('tvshow', tags)
_add_poster(root, show)
_add_fanart(root, show)
return root | [
129,
697,
6723
] |
def METHOD_NAME(self, show, episode):
self.prompt_for_add.emit(show, episode) | [
2995,
43,
238
] |
def METHOD_NAME(index_name, doc_dir):
if args.search_engine == "milvus":
document_store = MilvusDocumentStore(
embedding_dim=args.embedding_dim,
host=args.host,
index=args.index_name,
port=args.port,
index_param={"M": 16, "efConstruction": 50},
index_type="HNSW",
)
else:
launch_es()
document_store = ElasticsearchDocumentStore(
host=args.host,
port=args.port,
username="",
password="",
embedding_dim=args.embedding_dim,
index=index_name,
)
if args.embedding_type == "image":
docs = [
Document(content=f"./{args.doc_dir}/{filename}", content_type="image")
for filename in os.listdir(args.doc_dir)
]
elif args.embedding_type == "text":
docs = convert_files_to_dicts(dir_path=args.doc_dir, split_paragraphs=True, encoding="utf-8")
else:
raise NotImplementedError
print(docs[:3])
# 文档数据写入数据库
document_store.write_documents(docs)
if args.embedding_type == "image":
# 文搜图,对image做embedding
retriever_mm = MultiModalRetriever(
document_store=document_store,
query_embedding_model=args.query_embedding_model,
query_type="text",
document_embedding_models={"image": args.document_embedding_model},
)
else:
# 图搜文,对text做embedding
retriever_mm = MultiModalRetriever(
document_store=document_store,
query_embedding_model=args.query_embedding_model,
query_type="image",
document_embedding_models={"text": args.document_embedding_model},
)
# 建立索引库
document_store.update_embeddings(retriever_mm) | [
8024,
5165
] |
def METHOD_NAME(media_file):
# This attr was added for 1.8. It is not computed for older projects.
if (not hasattr(media_file, "info")):
media_file.info = None
# We need this in all media files, used only by img seq media.
if not hasattr(media_file, "ttl"):
media_file.ttl = None
# Add container data if not found.
if not hasattr(media_file, "container_data"):
media_file.container_data = None
# ContainerClipMediaItem objects were missing this but it mostly didn't cause problems.
if type(media_file).__name__ == "ContainerClipMediaItem":
if not hasattr(media_file, "current_frame"):
media_file.current_frame = 0 | [
7279,
1104,
7280,
4197,
-1,
385,
16461
] |
def METHOD_NAME(self, obj):
# serialize the data before acquiring the lock
obj = dumps(obj, reducers=self._reducers)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj) | [
1276
] |
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id") | [
147
] |
def METHOD_NAME(self):
self.precision = 1.e-6
self.beta = 50.0 | [
0,
1
] |
def METHOD_NAME(self):
"""
Generates ryu requests for subscriber flows
Based on the provided ip create 2 flows, that are matched based on
dst/src ip and set value of the direction register.
Additional reg values are set from set_reg_value
"""
uplink = copy.deepcopy(self._request)
downlink = copy.deepcopy(self._request)
uplink["instructions"].append({
"type": "APPLY_ACTIONS",
"actions": self._reg_sets + [self._ulink_action],
})
downlink["instructions"].append({
"type": "APPLY_ACTIONS",
"actions": self._reg_sets + [self._dlink_action],
})
ip_addr = convert_ip_str_to_ip_proto(self._ip)
if ip_addr.version == IPAddress.IPV4:
uplink["match"].update(
{"ipv4_src": self._ip},
)
downlink["match"].update(
{"ipv4_dst": self._ip},
)
else:
uplink["match"].update(
{"ipv6_src": self._ip},
)
downlink["match"].update(
{"ipv6_dst": self._ip},
)
return [uplink, downlink] | [
129,
2042,
1213,
311
] |
def METHOD_NAME(coords): return int(np.average(coords, weights=mask * grey_image)) | [
4430
] |
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) | [
24,
3
] |
def METHOD_NAME():
frr = MagicMock()
frr.get_config = MagicMock(return_value = """!
text1
! comment
text2
text3
! comment
text4
""")
c = ConfigMgr(frr)
c.update()
assert c.current_config_raw == [' text1', ' text2', ' text3', ' text4', ' ', ' ']
assert c.current_config == [['text1'], ['text2'], ['text3'], ['text4']] | [
9,
86
] |
def METHOD_NAME(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
self.classifier = nn.Linear(
self.num_features * self.global_pool.feat_mult(),
num_classes) if self.num_classes else None | [
656,
810
] |
def METHOD_NAME(self, newBuildCallback, flushErrors=False):
newConsumer = yield self.master.mq.startConsuming(
newBuildCallback,
('builds', None, 'new'))
build = yield self.doForceBuild(wantSteps=True,
useChange=self.change,
wantLogs=True)
self.assertBuildIsCancelled(build)
newConsumer.stopConsuming()
builds = yield self.master.data.get(("builds",))
for b in builds:
self.assertBuildIsCancelled(b)
if flushErrors:
self.flushLoggedErrors() | [
22,
9
] |
def METHOD_NAME(self) -> str:
"""
the ARN of the custom plugin.
"""
return pulumi.get(self, "arn") | [
1059
] |
def METHOD_NAME(
get_ocr_analyzer_results
):
ocr_result, text, recognizer_result = get_ocr_analyzer_results
mapped_entities = ImageAnalyzerEngine.map_analyzer_results_to_bounding_boxes(
recognizer_result, ocr_result, text, allow_list=["Katie", "Cromley."]
)
assert len(mapped_entities) == 0 | [
9,
1393,
2569,
245,
868,
422,
2224
] |
def METHOD_NAME(self, hash):
hash_str = hash.hex().lower()
return self._cache_dir / hash_str[:2] / hash_str[2:] | [
1161,
157
] |
def METHOD_NAME(self):
"""Verify the responded queryset."""
# Create a question, there shouldn't be any responded yet.
q = QuestionFactory()
self.assertEqual(0, Question.objects.responded().count())
# Add an answer, there should be one responded.
a = AnswerFactory(question=q)
self.assertEqual(1, Question.objects.responded().count())
# Add an answer by the creator, there should be none responded.
a = AnswerFactory(creator=q.creator, question=q)
self.assertEqual(0, Question.objects.responded().count())
# Add another answer, there should be one responded.
a = AnswerFactory(question=q)
self.assertEqual(1, Question.objects.responded().count())
# Lock it, there should be none responded.
q.is_locked = True
q.save()
self.assertEqual(0, Question.objects.responded().count())
# Unlock it and mark solved, there should be none responded.
q.is_locked = False
q.solution = a
q.save()
self.assertEqual(0, Question.objects.responded().count()) | [
9,
-1
] |
def METHOD_NAME(self):
"""Print memory usage average over time. We would like this value
to be as high as possible."""
assert self.track_usage, 'You need to enable track usage.'
if torch.distributed.get_rank() == 0:
print(' > usage of {} memory buffer: {:.2f} %'.format(
self.name, self.in_use_value * 100.0 / self.total_value),
flush=True) | [
38,
1867,
558
] |
def METHOD_NAME(self, mock_query):
# Create a job that appears to be in progress
job = HarvesterBackgroundTask.prepare("testuser")
job.start()
job.save(blocking=True)
job2 = HarvesterBackgroundTask.prepare("testuser")
task = HarvesterBackgroundTask(job2)
BackgroundApi.execute(task)
assert not mock_query.called, "mock_query was called when it shouldn't have been"
time.sleep(1)
job3 = models.BackgroundJob.pull(job2.id)
assert job3.status == "error", "expected 'error', got '{x}'".format(x=job3.status) | [
9,
447,
107
] |
def METHOD_NAME(self, size=None):
if self._remaining == 0:
return b""
if size is None:
e = self._base_data_length
else:
e = size
e = min(e, self._remaining)
if e > self._base_data_length:
self._base_data = urandom(e)
self._base_data_length = e
self._remaining = self._remaining - e
return self._base_data[:e] | [
203
] |
def METHOD_NAME(self, event):
builtins.width = int(self.size[0])
builtins.height = int(self.size[1])
p5.renderer.reset_view()
with p5.renderer.draw_loop():
p5.renderer.clear() | [
69,
1128
] |
def METHOD_NAME(self, data):
self.stack[-1][2].append(data) | [
238,
5859,
365
] |
def METHOD_NAME(self):
results = []
todo = list(providedBy(self.context))
done = []
while todo:
interface = todo.pop()
done.append(interface)
for base in interface.__bases__:
if base not in todo and base not in done:
todo.append(base)
markers = self._getDirectMarkersOf(interface)
for interface in markers:
if interface not in results and \
not interface.providedBy(self.context):
results.append(interface)
todo += markers
return tuple(results) | [
19,
1272,
703
] |
def METHOD_NAME(self):
return list(map(self.generate_data_file, self.src_img_tags)) | [
370,
2405,
1537
] |
def METHOD_NAME(self):
"""
For data blocks, convert known Relion columns to sensible types.
"""
_blocks = OrderedDict()
for block_name, block in self.blocks.items():
_blocks[block_name] = dict_to_relion_types(block)
self.blocks = _blocks | [
197,
4303
] |
f METHOD_NAME(self): | [
9,
1950,
1770,
573,
45
] |
def METHOD_NAME(header: str,
content: str,
node_link: str,
header_index_offset: int) -> str:
# Downgrade headers and use HTML to avoid creating sidebar items for them
for header_index in range(4, 0, -1):
markdown_header_prefix = '#' * header_index
parts = re.split(f'\n{markdown_header_prefix} ([^\n]*)\n', content)
# Nothing to do
if len(parts) == 1:
continue
# We could start a section without a header
if len(parts) % 2:
section_headers = ['', *parts[1::2]]
section_contents = parts[0::2]
else:
section_headers = parts[0::2]
section_contents = parts[1::2]
content = ''
for section_header, section_content in zip(section_headers, section_contents):
if not section_header:
content += section_content
continue
header_html = build_header(
header=section_header,
header_index=(header_index + header_index_offset),
node_link=node_link,
)
content += f'\n{header_html}\n{section_content}'
return content | [
369,
2131
] |
def METHOD_NAME(
self,
render_map: Map,
) -> List[Tuple[Layer, BaseMapServerLayerFactory]]:
layers_plus_factories = []
type_to_layer_factory = {}
for layer in render_map.layers:
layer_type = type(layer)
if layer_type in type_to_layer_factory:
factory = type_to_layer_factory[layer_type]
else:
factory = self._get_layer_factory(layer_type)
type_to_layer_factory[layer_type] = factory
layers_plus_factories.append((layer, factory()))
return layers_plus_factories | [
19,
2315,
222,
7291
] |
def METHOD_NAME(self):
self.assertEqual(
available_namespaces(), ("hdmf-common", "hdmf-experimental", "core")
) | [
9,
1272,
6824
] |
def METHOD_NAME(self, string, message=None, policy=None):
if policy is None:
policy = self.policy
if message is None:
message = self.message
return email.message_from_string(string, message, policy=policy) | [
3,
169
] |
def METHOD_NAME(self):
self.pkgdesc = f"{pkgdesc} (demo applications)"
return [
"usr/bin/gtk4-demo",
"usr/bin/gtk4-widget-factory",
"usr/bin/gtk4-demo-application",
"usr/bin/gtk4-print-editor",
"usr/bin/gtk4-node-editor",
"usr/share/man/man1/gtk4-demo.1",
"usr/share/man/man1/gtk4-widget-factory.1",
"usr/share/man/man1/gtk4-demo-application.1",
"usr/share/man/man1/gtk4-node-editor.1",
"usr/share/metainfo/org.gtk.Demo4.appdata.xml",
"usr/share/metainfo/org.gtk.WidgetFactory4.appdata.xml",
"usr/share/metainfo/org.gtk.gtk4.NodeEditor.appdata.xml",
"usr/share/gtk-4.0/gtk4builder.rng",
"usr/share/glib-2.0/schemas/org.gtk.Demo4.gschema.xml",
"usr/share/applications/org.gtk.Demo4.desktop",
"usr/share/applications/org.gtk.PrintEditor4.desktop",
"usr/share/applications/org.gtk.WidgetFactory4.desktop",
"usr/share/applications/org.gtk.gtk4.NodeEditor.desktop",
"usr/share/icons/hicolor/scalable/apps/org.gtk.Demo4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.Demo4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.PrintEditor4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.PrintEditor4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.PrintEditor4.Devel.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.WidgetFactory4.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.WidgetFactory4-symbolic.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.gtk4.NodeEditor.Devel.svg",
"usr/share/icons/hicolor/scalable/apps/org.gtk.gtk4.NodeEditor.svg",
"usr/share/icons/hicolor/symbolic/apps/org.gtk.gtk4.NodeEditor-symbolic.svg",
] | [
2660
] |
def METHOD_NAME(process):
from FWCore.ParameterSet.Config import Task
l = [ p for p in process.producers.values()]
l.extend( (f for f in process.filters.values()) )
return Task(*l) | [
129,
758,
41,
75,
9145,
61,
469
] |
f METHOD_NAME(self): | [
9,
1195,
747,
2282,
1234
] |
def METHOD_NAME(src_obj_metadata,
progress=0,
user_project=None):
"""Returns download serialization data.
There are five entries:
auto_transfer: JSON-specific field, always False.
progress: How much of the download has already been completed.
total_size: Total object size.
url: Implementation-specific field used for saving a metadata get call.
For JSON, this the download URL of the object.
For XML, this is a pickled boto key.
user_project: Project to be billed to, added as query param.
Args:
src_obj_metadata: Object to be downloaded.
progress: See above.
user_project: User project to add to query string.
Returns:
Serialization data for use with Cloud API GetObjectMedia.
"""
url = src_obj_metadata.mediaLink
if user_project:
url = AddQueryParamToUrl(url, 'userProject', user_project)
if six.PY3:
if isinstance(url, bytes):
url = url.decode('ascii')
serialization_dict = {
'auto_transfer': 'False',
'progress': progress,
'total_size': src_obj_metadata.size,
'url': url
}
return json.dumps(serialization_dict) | [
19,
136,
2109,
365
] |
def METHOD_NAME(xml_file):
try:
doc = xml.dom.minidom.parse(xml_file)
except:
print("Error: invalid xml file", file=sys.stderr)
sys.exit(-1)
invocation_labels = []
for method in doc.getElementsByTagName("method"):
invocation_labels.append((str(method.getAttribute("id")),
str(condition_to_cpp(method.getElementsByTagName("condition")))))
return invocation_labels | [
214,
399
] |
def METHOD_NAME(self):
mapping = {0.5: 1, 1.5: 0}
a_result = np.array([[6.0, 1.0], [1.0, 0.0]])
a = nx.attribute_mixing_matrix(
self.F, "margin", mapping=mapping, normalized=False
)
np.testing.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.F, "margin", mapping=mapping)
np.testing.assert_equal(a, a_result / a_result.sum()) | [
9,
309,
11704,
430,
1819
] |
def METHOD_NAME(freqs, decays, amplitudes, phases, time_step):
t = np.arange(NTIME)
complex_amplitudes = amplitudes * np.exp(1j * phases)
complex_freqs = 2 * np.pi * freqs - 1j * decays
signal = np.zeros(len(t), dtype=complex)
for i in range(len(freqs)):
signal += complex_amplitudes[i] * np.exp(-1j * complex_freqs[i] * t * time_step)
return signal | [
567,
900
] |
def METHOD_NAME(self, scheduler, alias):
super(ZooKeeperJobStore, self).METHOD_NAME(scheduler, alias)
if not self.client.connected:
self.client.METHOD_NAME() | [
447
] |
def METHOD_NAME(service_allowed_endpoint):
"""Check correct headers for every endpoint."""
methods, request, svc_client = service_allowed_endpoint
method = request["allowed_method"]
client_method = methods.pop(method)
response = client_method(request["url"], headers=request["headers"])
assert_rpc_response(response, "error")
assert UserAnonymousError.code == response.json["error"]["code"] | [
9,
2433,
2131,
242
] |
def METHOD_NAME(request):
if request.param == "interval":
base = UnitIntervalMesh(4)
elif request.param == "square":
base = UnitSquareMesh(5, 4)
elif request.param == "quad-square":
base = UnitSquareMesh(4, 6, quadrilateral=True)
return ExtrudedMesh(base, layers=10, layer_height=0.1,
extrusion_type="uniform", name=mesh_name) | [
4431,
1949
] |
def METHOD_NAME(self, value):
"""
:type value: file-like object
:param value: A file-like object containing the content
of the message. The actual content will be stored
in S3 and a link to the S3 object will be stored in
the message body.
"""
bucket_name, key_name = self._get_bucket_key(self.s3_url)
if bucket_name and key_name:
return self.s3_url
key_name = uuid.uuid4()
s3_conn = boto.connect_s3()
s3_bucket = s3_conn.get_bucket(bucket_name)
key = s3_bucket.new_key(key_name)
key.set_contents_from_file(value)
self.s3_url = 's3://%s/%s' % (bucket_name, key_name)
return self.s3_url | [
421
] |
def METHOD_NAME():
headers = make_headers("unsupported-type", "predict")
data = "aW1hZ2UgYnl0ZXM="
for MODEL_NAME in MODEL_NAMES:
response = requests.post(INVOCATION_URL.format(MODEL_NAME), data=data, headers=headers)
assert 500 == response.status_code
assert "unsupported content type" in response.text | [
9,
1950,
459,
44
] |
def METHOD_NAME(gcs_client, target_bucket_name, log_table_id) -> bool:
"""
Function to check file exist in gcs bucket
"""
bucket = gcs_client.bucket(target_bucket_name)
return storage.Blob(bucket=bucket, name=log_table_id).exists(gcs_client) | [
171,
1985
] |
def METHOD_NAME(self):
self._backup_platform = sys.platform
self._backup_get_config_var = sysconfig.get_config_var
class CompilerWrapper(UnixCCompiler):
def rpath_foo(self):
return self.runtime_library_dir_option('/foo')
self.cc = CompilerWrapper() | [
0,
1
] |
def METHOD_NAME(self):
# Mine some blocks and have them mature.
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]["txid"]
vout = utxo[0]["vout"]
value = utxo[0]["amount"]
fee = Decimal("0.0002")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for _ in range(4):
(txid, sent_value) = self.chain_transaction(
self.nodes[0], [txid], [vout], value, fee, 2
)
vout = 0
value = sent_value
chain.append([txid, value])
for _ in range(MAX_ANCESTORS - 4):
(txid, sent_value) = self.chain_transaction(
self.nodes[0], [txid], [0], value, fee, 1
)
value = sent_value
chain.append([txid, value])
(second_chain, second_chain_value) = self.chain_transaction(
self.nodes[0],
[utxo[1]["txid"]],
[utxo[1]["vout"]],
utxo[1]["amount"],
fee,
1,
)
# Check mempool has MAX_ANCESTORS + 1 transactions in it
assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 1)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(
-26,
"too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]",
self.chain_transaction,
self.nodes[0],
[txid],
[0],
value,
fee,
1,
)
# ...even if it chains on from some point in the middle of the chain.
assert_raises_rpc_error(
-26,
"too-long-mempool-chain, too many descendants",
self.chain_transaction,
self.nodes[0],
[chain[2][0]],
[1],
chain[2][1],
fee,
1,
)
assert_raises_rpc_error(
-26,
"too-long-mempool-chain, too many descendants",
self.chain_transaction,
self.nodes[0],
[chain[1][0]],
[1],
chain[1][1],
fee,
1,
)
# ...even if it chains on to two parent transactions with one in the chain.
assert_raises_rpc_error(
-26,
"too-long-mempool-chain, too many descendants",
self.chain_transaction,
self.nodes[0],
[chain[0][0], second_chain],
[1, 0],
chain[0][1] + second_chain_value,
fee,
1,
)
# ...especially if its > 40k weight
assert_raises_rpc_error(
-26,
"too-long-mempool-chain, too many descendants",
self.chain_transaction,
self.nodes[0],
[chain[0][0]],
[1],
chain[0][1],
fee,
350,
)
# But not if it chains directly off the first transaction
self.chain_transaction(self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 1)
# and the second chain should work just fine
self.chain_transaction(
self.nodes[0], [second_chain], [0], second_chain_value, fee, 1
)
# Finally, check that we added two transactions
assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 3) | [
22,
9
] |
def METHOD_NAME(self, g, indices):
"""Sampling function
Parameters
----------
g : DGLGraph
The graph to sample from.
indices : Tensor
Placeholder not used.
Returns
-------
DGLGraph
The sampled subgraph.
"""
node_ids = self.sampler(g)
sg = g.subgraph(
node_ids, relabel_nodes=True, output_device=self.output_device
)
set_node_lazy_features(sg, self.prefetch_ndata)
set_edge_lazy_features(sg, self.prefetch_edata)
return sg | [
734
] |
nc def METHOD_NAME(self, request, response): | [
69,
123,
17
] |
def METHOD_NAME():
status = namedtuple('status', 'status color code sort_value')
return status | [
176,
452
] |
def METHOD_NAME(self):
"""Configure build: <single-line description how this deviates from standard configure>"""
# set generic make options
self.cfg.update('buildopts', 'CC="%s" OPTFLAGS="%s"' % (os.getenv('MPICC'), os.getenv('CFLAGS')))
if LooseVersion(self.version) >= LooseVersion("3.2"):
# set correct start_dir dir, and change into it
# test whether it already contains 'src', since a reprod easyconfig would
if os.path.basename(self.cfg['start_dir']) != 'src':
self.cfg['start_dir'] = os.path.join(self.cfg['start_dir'], 'src')
try:
os.chdir(self.cfg['start_dir'])
except OSError as err:
raise EasyBuildError("Failed to change to correct source dir %s: %s", self.cfg['start_dir'], err)
# run autoconf to generate configure script
cmd = "autoconf"
run_cmd(cmd)
# set config opts
beagle = get_software_root('beagle-lib')
if beagle:
self.cfg.update('configopts', '--with-beagle=%s' % beagle)
else:
if get_software_root('BEAGLE'):
self.log.nosupport('BEAGLE module as dependency, should be beagle-lib', '2.0')
raise EasyBuildError("beagle-lib module not loaded?")
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--enable-mpi')
# configure
super(EB_MrBayes, self).METHOD_NAME()
else:
# no configure script prior to v3.2
self.cfg.update('buildopts', 'MPI=yes') | [
111,
367
] |
def METHOD_NAME(
self, filename_or_file: str | IO[AnyStr] | None = None, indent: str = " ", newl: str = ..., encoding: str = "UTF-8"
) -> None: ... | [
13556
] |
def METHOD_NAME(line):
features = tf.decode_csv(line, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, features))
labels = features.pop(LABEL_COLUMN)
return features, labels | [
1268,
732
] |
def METHOD_NAME(filename):
top_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
script_file = os.path.join(top_dir, filename)
with open(script_file, "r") as f:
out = f.read()
return out | [
203,
782
] |
def METHOD_NAME(self):
escrow_address = create_escrow(self.w3)
with patch("src.chain.escrow.get_web3") as mock_function:
with patch("src.chain.escrow.EscrowClient") as mock_client:
mock_escrow_client = mock_client.return_value
mock_escrow_client.get_manifest_url.return_value = "invalid_url"
mock_function.return_value = self.w3
with self.assertRaises(StorageClientError) as error:
get_escrow_manifest(self.w3.eth.chain_id, escrow_address)
self.assertEqual(f"Invalid URL: invalid_url", str(error.exception)) | [
9,
19,
9913,
1220,
532,
274
] |
def METHOD_NAME(self, request):
"""
View to handle final steps of OAuth based authentication where the user
gets redirected back to from the service provider
"""
login_done_url = reverse(self.adapter.provider_id + "_callback")
client = self._get_client(request, login_done_url)
if not client.is_valid():
if "denied" in request.GET:
error = AuthError.CANCELLED
else:
error = AuthError.UNKNOWN
extra_context = dict(oauth_client=client)
return render_authentication_error(
request,
self.adapter.provider_id,
error=error,
extra_context=extra_context,
)
app = self.adapter.get_provider().app
try:
access_token = client.get_access_token()
token = SocialToken(
app=app,
token=access_token["oauth_token"],
# .get() -- e.g. Evernote does not feature a secret
token_secret=access_token.get("oauth_token_secret", ""),
)
login = self.adapter.complete_login(
request, app, token, response=access_token
)
login.token = token
login.state = SocialLogin.unstash_state(request)
return complete_social_login(request, login)
except OAuthError as e:
return render_authentication_error(
request, self.adapter.provider_id, exception=e
) | [
2506
] |
def METHOD_NAME(self):
# use this method to get the object from the whole organization instead of the current team
pk = self.kwargs["pk"]
organization = self.request.auth.organization
try:
obj = organization.webhooks.filter(*self.available_teams_lookup_args).distinct().get(public_primary_key=pk)
except ObjectDoesNotExist:
raise NotFound
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj | [
19,
279,
280,
1044
] |
def METHOD_NAME(self):
self._build_with_qmake()
self._build_with_meson()
self._build_with_cmake_find_package_multi() | [
56
] |
def METHOD_NAME(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(
feat_frame_shift=upstream.downsample_rate,
)
logger.info("Preparing corpus")
train_data, valid_data, test_data, corpus_stats = cfg.corpus().split(3)
stats = corpus_stats.add(stats)
logger.info("Preparing train data")
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.override(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info("Preparing valid data")
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(
valid_data, **dict(workspace.environ)
)
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info("Preparing test data")
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(
test_data, **dict(workspace.environ)
)
test_sampler = cfg.test_sampler(test_dataset)
logger.info("Preparing model and task")
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **dict(workspace.environ))
workspace["train_data"] = train_data
workspace["valid_data"] = valid_data
workspace["test_data"] = test_data
workspace["train_dataset"] = train_dataset
workspace["train_sampler"] = train_sampler
workspace["valid_dataset"] = valid_dataset
workspace["valid_sampler"] = valid_sampler
workspace["test_dataset"] = test_dataset
workspace["test_sampler"] = test_sampler
workspace["task"] = task | [
102
] |
def METHOD_NAME(
user: AnyUser,
payload: Payload,
type: str, # pylint: disable=redefined-builtin
permission: Permission,
kwargs: Dict[str, Any]
) -> Iterable[Any]:
"""
Gets the objects corresponding to a permission.
Raises AuthenticationFailed if the user doesn't have permission.
Create permissions do not have objects, so they should return empty lists if kwargs are otherwise ok.
"""
global _jwt_accessible_managers # pylint: disable=global-variable-not-assigned
cls = _jwt_accessible_managers.get(type)
if cls is None:
# Fail by default
logger.info(f"Missing jwt class for type {type}") # pylint: disable=logging-fstring-interpolation
raise AuthenticationFailed(format_lazy(
_("NO_JWT_PERMISSION -- {permission}, {type}, {kwargs}"),
permission=permission,
type=type,
kwargs=kwargs
))
items = cls.from_jwt_permission(user, payload, permission, kwargs, auth_settings().DISABLE_LOGIN_CHECKS)
if items is None:
logger.info( # pylint: disable=logging-fstring-interpolation
f"{payload.sub} (signed by {payload.iss}) tried to get {permission} access to {type} with {kwargs}"
)
raise AuthenticationFailed(format_lazy(
_("NO_JWT_PERMISSION -- {permission}, {type}, {kwargs}"),
permission=permission,
type=type,
kwargs=kwargs
))
return items | [
19,
635,
280,
204
] |
def METHOD_NAME(data, fractions_desired, only_upper_part=False, _buffer_size=10):
"""
Computes for a given sampled distribution the highest density region
of the desired fractions.
Does not assume anything on the normalisation of the data.
:param data: Sampled distribution
:param fractions_desired: numpy.array Area/probability for which
the hdr should be computed.
:param _buffer_size: Size of the result buffer. The size is
equivalent to the maximal number of allowed intervals.
:param only_upper_part: Boolean, if true only computes
area/probability between maximum and current height.
:return: two arrays: The first one stores the start and inclusive
endindex of the highest density region. The second array holds
the amplitude for which the desired fraction was reached.
Note:
Also goes by the name highest posterior density. Please note,
that the right edge corresponds to the right side of the sample.
Hence the corresponding index is -= 1.
"""
fi = 0 # number of fractions seen
# Buffer for the result if we find more then _buffer_size edges the function fails.
# User can then manually increase the buffer if needed.
res = np.zeros((len(fractions_desired), 2, _buffer_size), dtype=np.int32)
res_amp = np.zeros(len(fractions_desired), dtype=np.float32)
area_tot = np.sum(data)
if area_tot <= 0:
raise ValueError('Highest density regions are not defined for distributions '
'with a total probability of less-equal 0.')
# Need an index which sorted by amplitude
max_to_min = np.argsort(data, kind='mergesort')[::-1]
lowest_sample_seen = np.inf
for j in range(1, len(data)):
# Loop over indices compute fractions from max to min
if lowest_sample_seen == data[max_to_min[j]]:
# We saw this sample height already, so no need to repeat
continue
lowest_sample_seen = data[max_to_min[j]]
lowest_sample_seen *= int(only_upper_part)
sorted_data_max_to_j = data[max_to_min[:j]]
fraction_seen = np.sum(sorted_data_max_to_j - lowest_sample_seen) / area_tot
# Check if this height step exceeded at least one of the desired
# fractions
m = fractions_desired[fi:] <= fraction_seen
if not np.any(m):
# If we do not exceed go to the next sample.
continue
for fraction_desired in fractions_desired[fi:fi + np.sum(m)]:
# Since we loop always to the height of the next highest sample
# it might happen that we overshoot the desired fraction. Similar
# to the area deciles algorithm we have now to figure out at which
# height we actually reached the desired fraction and store the
# corresponding height:
g = fraction_desired / fraction_seen
# The following gives the true height, to get here one has to
# solve for h:
# 1. fraction_seen = sum_{i=0}^j (y_i - y_j) / a_total
# 2. fraction_desired = sum_{i=0}^j (y_i - h) / a_total
# 3. g = fraction_desired/fraction_seen
# j == number of seen samples
# n == number of total samples in distribution
true_height = (1 - g) * np.sum(sorted_data_max_to_j) / j + g * lowest_sample_seen
res_amp[fi] = true_height
# Find gaps and get edges of hdr intervals:
ind = np.sort(max_to_min[:j])
gaps = np.arange(1, len(ind) + 1)
g0 = 0
g_ind = -1
diff = ind[1:] - ind[:-1]
gaps = gaps[:-1][diff > 1]
if len(gaps) > _buffer_size:
# This signal has more boundaries than the buffer can hold
# hence set all entries to -1 instead.
res[fi, 0, :] = -1
res[fi, 1, :] = -1
fi += 1
else:
for g_ind, g in enumerate(gaps):
# Loop over all gaps and get outer edges:
interval = ind[g0:g]
res[fi, 0, g_ind] = interval[0]
res[fi, 1, g_ind] = interval[-1] + 1
g0 = g
# Now we have to do the last interval:
interval = ind[g0:]
res[fi, 0, g_ind + 1] = interval[0]
res[fi, 1, g_ind + 1] = interval[-1] + 1
fi += 1
if fi == (len(fractions_desired)):
# Found all fractions so we are done
return res, res_amp
# If we end up here this might be due to an offset
# of the distribution with respect to zero. In that case it can
# happen that we do not find all desired fractions.
# Hence we have to enforce to compute the last step from the last
# lowest hight we have seen to zero.
# Left and right edge is by definition 0 and len(data):
res[fi:, 0, 0] = 0
res[fi:, 1, 0] = len(data)
# Now we have to compute the heights for the fractions we have not
# seen yet, since lowest_sample_seen == 0 and j == len(data)
# the formula above reduces to:
for ind, fraction_desired in enumerate(fractions_desired[fi:]):
res_amp[fi+ind] = (1-fraction_desired) * np.sum(data)/len(data)
return res, res_amp | [
1211,
2915,
1216
] |
async def METHOD_NAME(api: OT3API, report: CSVReport, section: str) -> None:
"""Run."""
ax = Axis.P_L
mount = OT3Mount.LEFT
settings = helpers_ot3.get_gantry_load_per_axis_motion_settings_ot3(api, ax)
default_current = settings.run_current
default_speed = settings.max_speed
_, _, blow_out, _ = helpers_ot3.get_plunger_positions_ot3(api, mount)
async def _save_result(tag: str) -> bool:
est, enc, aligned = await _is_plunger_still_aligned_with_encoder(api)
print(f"Estimate: {est}")
print(f"Encoder: {enc}")
result = CSVResult.from_bool(aligned)
report(section, tag, [est, enc, result])
return aligned
await api.home_z(OT3Mount.LEFT)
slot_5 = helpers_ot3.get_slot_calibration_square_position_ot3(5)
home_pos = await api.gantry_position(OT3Mount.LEFT)
await api.move_to(OT3Mount.LEFT, slot_5._replace(z=home_pos.z))
# LOOP THROUGH CURRENTS + SPEEDS
currents = list(CURRENTS_SPEEDS.keys())
for current in sorted(currents, reverse=True):
speeds = CURRENTS_SPEEDS[current]
for speed in sorted(speeds, reverse=False):
ui.print_header(f"CURRENT: {current}, SPEED: {speed}")
# HOME
print("homing...")
await api.home([ax])
print(f"lowering run-current to {current} amps")
await helpers_ot3.set_gantry_load_per_axis_current_settings_ot3(
api,
ax,
run_current=current,
)
await helpers_ot3.set_gantry_load_per_axis_motion_settings_ot3(
api, ax, default_max_speed=speed
)
# MOVE DOWN
print(f"moving down {blow_out} mm at {speed} mm/sec")
await _save_result(_get_test_tag(current, speed, "down", "start"))
await helpers_ot3.move_plunger_absolute_ot3(
api, mount, blow_out, speed=speed, motor_current=current
)
down_passed = await _save_result(
_get_test_tag(current, speed, "down", "end")
)
# MOVE UP
print(f"moving up {blow_out} mm at {speed} mm/sec")
await _save_result(_get_test_tag(current, speed, "up", "start"))
await helpers_ot3.move_plunger_absolute_ot3(
api, mount, 0, speed=speed, motor_current=current
)
up_passed = await _save_result(_get_test_tag(current, speed, "up", "end"))
# RESET CURRENTS AND HOME
print("homing...")
await helpers_ot3.set_gantry_load_per_axis_current_settings_ot3(
api, ax, run_current=default_current
)
await helpers_ot3.set_gantry_load_per_axis_motion_settings_ot3(
api, ax, default_max_speed=default_speed
)
await api._backend.set_active_current({Axis.P_L: default_current})
await api.home([ax])
if not down_passed or not up_passed and not api.is_simulator:
print(f"current {current} failed")
print("skipping any remaining speeds at this current")
break | [
22
] |
async def METHOD_NAME(self, body):
posters = await self.service.get_posters_presented_by_user(
user_id=body["user_id"]
)
await self.consumer.send_success(posters) | [
-1,
604,
21
] |
def METHOD_NAME(host):
res = host.command("ntpstat", module_ignore_errors=True)
if res['rc'] != 0:
return False
return True | [
250,
10604,
452
] |
def METHOD_NAME():
devs = GLOBALCONFIG["DEVS"]
for index in range(len(devs) - 1, -1, -1):
removeDev(devs[index]["bus"], devs[index]["loc"]) | [
-1
] |
def METHOD_NAME():
mock_entrypoint = mock.Mock()
mock_entrypoint.name = "mock-scheme"
with mock.patch("entrypoints.get_group_all", return_value=[mock_entrypoint]):
# Entrypoints are registered at import time, so we need to reload the
# module to register the entrypoint given by the mocked
# entrypoints.get_group_all
reload(artifact_repository_registry)
expected_artifact_repository_registry = {
"",
"s3",
"gs",
"wasbs",
"ftp",
"sftp",
"dbfs",
"mock-scheme",
}
assert expected_artifact_repository_registry.issubset(
artifact_repository_registry._artifact_repository_registry._registry.keys()
) | [
9,
2356,
1831,
510
] |
def METHOD_NAME(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1))) | [
9,
1161
] |
def METHOD_NAME(self):
modules = []
for i in self.tree_node.iter_imports():
if i.is_star_import():
name = i.get_paths()[-1][-1]
new = infer_import(self, name)
for module in new:
if isinstance(module, ModuleContext):
modules += module.METHOD_NAME()
modules += new
return modules | [
4889,
2460
] |
def METHOD_NAME(self, tool_consumer_instance_guid, resource_link_id):
"""Get an assignment by resource_link_id."""
return (
self._db.query(Assignment)
.filter_by(
tool_consumer_instance_guid=tool_consumer_instance_guid,
resource_link_id=resource_link_id,
)
.one_or_none()
) | [
19,
776
] |
def METHOD_NAME(signatures,
examples_batch,
single_feature_name="feature"):
"""Creates example parser from given signatures.
Args:
signatures: Dict of `TensorSignature` objects or single `TensorSignature`.
examples_batch: string `Tensor` of serialized `Example` proto.
single_feature_name: string, single feature name.
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
"""
feature_spec = {}
if not isinstance(signatures, dict):
feature_spec[single_feature_name] = signatures.get_feature_spec()
else:
feature_spec = {
key: signatures[key].get_feature_spec() for key in signatures
}
features = parsing_ops.parse_example(examples_batch, feature_spec)
if not isinstance(signatures, dict):
# Returns single feature, casts if needed.
features = features[single_feature_name]
if not signatures.dtype.is_compatible_with(features.dtype):
features = math_ops.cast(features, signatures.dtype)
return features
# Returns dict of features, casts if needed.
for name in features:
if not signatures[name].dtype.is_compatible_with(features[name].dtype):
features[name] = math_ops.cast(features[name], signatures[name].dtype)
return features | [
129,
1441,
1319,
280,
621
] |
def METHOD_NAME(
get_with_offset: Callable[[int], T],
offset: int = 0,
pages: PageOpts = PageOpts.all,
) -> Iterator[T]:
while True:
resp = get_with_offset(offset)
pagination = resp.pagination
assert pagination is not None
assert pagination.endIndex is not None
assert pagination.total is not None
yield resp
if pagination.endIndex >= pagination.total or pages == PageOpts.single:
break
assert pagination.endIndex is not None
offset = pagination.endIndex | [
203,
5029
] |
def METHOD_NAME(client):
"""Test raising an exception if the index UID is None."""
with pytest.raises(Exception):
client.get_index(uid=None) | [
9,
19,
724,
41,
98,
3668
] |
def METHOD_NAME(self, attr, v):
self.graph_attrs[attr] = v | [
80,
303,
864
] |
def METHOD_NAME(self):
query = f"""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name LIKE '{self._act_prefix}form_%' OR sequence_name LIKE '{self._act_prefix}workflow_%';"""
with connection.cursor() as cursor:
cursor.execute(query, params=None)
return [row[0] for row in cursor.fetchall()] | [
19,
771,
245
] |
def METHOD_NAME(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', {'id': 'torrentsTable'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
try:
torrent_items = row.find_all('td')
title = torrent_items[1].find('a').get_text(strip=True)
download_url = torrent_items[2].find('a')['href']
if not all([title, download_url]):
continue
download_url = urljoin(self.url, download_url)
seeders = try_int(torrent_items[5].get_text(strip=True))
leechers = try_int(torrent_items[6].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed:
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
' minimum seeders: {0}. Seeders: {1}',
title, seeders)
continue
torrent_size = torrent_items[4].get_text()
size = convert_size(torrent_size) or -1
pubdate_raw = torrent_items[1].find('div').get_text()
pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider')
return items | [
214
] |
def METHOD_NAME(
kind: str,
data: dict,
target_datetime: datetime,
) -> pd.DataFrame:
"""
Filters out correct datetimes (source data is 12 hour format)
"""
assert len(data) > 0
assert kind != ""
dt_12_hour = arrow.get(target_datetime.strftime("%Y-%m-%d %I:%M")).datetime
datetime_col = KIND_MAPPING[kind]["datetime_column"]
filtered_data = pd.DataFrame(
[item for item in data if item[datetime_col].hour == dt_12_hour.hour]
)
return filtered_data | [
527,
772,
365
] |
def METHOD_NAME(self):
self.with_xspec = False
self.xspec_version = '12.12.0'
self.xspec_include_dirs = ''
self.xspec_lib_dirs = ''
# This is set up for how CIAO builds XSPEC; other users may require more libraries
self.xspec_libraries = 'XSFunctions XSUtil XS'
self.cfitsio_include_dirs = ''
self.cfitsio_lib_dirs = ''
self.cfitsio_libraries = ''
self.ccfits_include_dirs = ''
self.ccfits_lib_dirs = ''
self.ccfits_libraries = ''
self.wcslib_include_dirs = ''
self.wcslib_lib_dirs = ''
self.wcslib_libraries = ''
self.gfortran_include_dirs = ''
self.gfortran_lib_dirs = ''
self.gfortran_libraries = '' | [
15,
1881
] |
def METHOD_NAME(self) -> None:
# pylint: disable=R0916
if not isinstance(self.lam, (int, float, tuple)) or (
isinstance(self.lam, tuple)
and (
len(self.lam) != 2
or not isinstance(self.lam[0], (int, float))
or not isinstance(self.lam[1], (int, float))
or self.lam[0] > self.lam[1]
or self.lam[0] < 0.0
)
):
raise ValueError("The argument `lam` has to be a float or tuple of two float values as (min, max).") | [
250,
434
] |
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(module):
if not HAS_JENKINS:
module.fail_json(
msg=missing_required_lib("python-jenkins",
url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
exception=JENKINS_IMP_ERR) | [
9,
2410
] |
def METHOD_NAME(self):
if hasattr(self, "table"):
self.table.destroy()
self.keypoint_reorganizer = KeypointReorganizer(
data_folder=self.data_folder.folder_path,
pose_tool=self.pose_tool_dropdown.getChoices(),
file_format=self.file_format.getChoices(),
)
self.table = LabelFrame(
self.main_frm,
text="SET NEW ORDER",
font=Formats.LABELFRAME_HEADER_FORMAT.value,
pady=5,
padx=5,
)
self.current_order = LabelFrame(self.table, text="CURRENT ORDER:")
self.new_order = LabelFrame(self.table, text="NEW ORDER:")
self.table.grid(row=1, sticky=W, pady=10)
self.current_order.grid(row=0, column=0, sticky=NW, pady=5)
self.new_order.grid(row=0, column=1, sticky=NW, padx=5, pady=5)
idx1, idx2, oldanimallist, oldbplist, self.newanimallist, self.newbplist = (
[0] * len(self.keypoint_reorganizer.bp_list) for i in range(6)
)
if self.keypoint_reorganizer.animal_list:
animal_list_reduced = list(set(self.keypoint_reorganizer.animal_list))
self.pose_tool = "maDLC"
for i in range(len(self.keypoint_reorganizer.bp_list)):
idx1[i] = Label(self.current_order, text=str(i + 1) + ".")
oldanimallist[i] = Label(
self.current_order,
text=str(self.keypoint_reorganizer.animal_list[i]),
)
oldbplist[i] = Label(
self.current_order, text=str(self.keypoint_reorganizer.bp_list[i])
)
idx1[i].grid(row=i, column=0, sticky=W)
oldanimallist[i].grid(row=i, column=1, sticky=W, ipady=5)
oldbplist[i].grid(row=i, column=2, sticky=W, ipady=5)
idx2[i] = Label(self.new_order, text=str(i + 1) + ".")
self.newanimallist[i] = DropDownMenu(
self.new_order, " ", animal_list_reduced, "10"
)
self.newbplist[i] = DropDownMenu(
self.new_order, " ", self.keypoint_reorganizer.bp_list, "10"
)
self.newanimallist[i].setChoices(
self.keypoint_reorganizer.animal_list[i]
)
self.newbplist[i].setChoices(self.keypoint_reorganizer.bp_list[i])
idx2[i].grid(row=i, column=0, sticky=W)
self.newanimallist[i].grid(row=i, column=1, sticky=W)
self.newbplist[i].grid(row=i, column=2, sticky=W)
else:
self.pose_tool = "DLC"
for i in range(len(self.keypoint_reorganizer.bp_list)):
idx1[i] = Label(self.current_order, text=str(i + 1) + ".")
oldbplist[i] = Label(
self.current_order, text=str(self.keypoint_reorganizer.bp_list[i])
)
idx1[i].grid(row=i, column=0, sticky=W, ipady=5)
oldbplist[i].grid(row=i, column=2, sticky=W, ipady=5)
idx2[i] = Label(self.new_order, text=str(i + 1) + ".")
self.newbplist[i] = StringVar()
oldanimallist[i] = OptionMenu(
self.new_order,
self.newbplist[i],
*self.keypoint_reorganizer.bp_list
)
self.newbplist[i].set(self.keypoint_reorganizer.bp_list[i])
idx2[i].grid(row=i, column=0, sticky=W)
oldanimallist[i].grid(row=i, column=1, sticky=W)
button_run = Button(
self.table,
text="Run re-organization",
command=lambda: self.run_reorganization(),
)
button_run.grid(row=2, column=1, sticky=W) | [
4695
] |
async def METHOD_NAME(self, relationship, constraints=None):
"""
Add a relationship to the internal store
:param relationship: Relationship object to add
:param constraints: optional constraints on the use of the relationship
"""
raise NotImplementedError | [
238,
2924
] |
def METHOD_NAME(self, login, password):
add_to_seclist(password)
try:
self.groups, self.org_id, self.user_id = getUserGroups(login, password)
except rhnFault:
e = sys.exc_info()[1]
if e.code == 2:
# invalid login/password; set timeout to baffle
# brute force password guessing attacks (BZ 672163)
time.sleep(2)
raise
log_debug(4, "Groups: %s; org_id: %s; user_id: %s" % (
self.groups, self.org_id, self.user_id)) | [
2433
] |
def METHOD_NAME(self, rhs):
for out in divmod(self.p, rhs):
assert_equal(out.symbol, 'z') | [
9,
16933
] |
def METHOD_NAME(parallel_mode: ParallelMode, seed: int, overwrite: bool = False):
"""Adds a seed to the seed manager for `parallel_mode`.
Args:
parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode.
seed (int): The seed to be added
Raises:
AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of
:class:`colossalai.context.ParallelMode` or the seed for `parallel_mode` has been added.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
_SEED_MANAGER.METHOD_NAME(parallel_mode, seed, overwrite) | [
238,
484
] |
def METHOD_NAME(self):
basic_layout(self, src_folder="src") | [
571
] |
def METHOD_NAME(mock_handler, mock_registry):
mock_registry.settings = {"request_log_name": "request_logs"}
yield request_logger.request_logger_tween_factory(
mock_handler,
mock_registry,
) | [
248,
1155
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.