text
stringlengths 15
7.82k
| ids
sequencelengths 1
7
|
---|---|
def METHOD_NAME(self) -> str:
"""
Type of resource. Type = Microsoft.OffAzure/VMWareSites.
"""
return pulumi.get(self, "type") | [
44
] |
def METHOD_NAME(intfspec):
"""Given an interface specification return a cmdline command name"""
if len(intfspec) > 2:
name = intfspec[2]
else:
name = intfspec[0].split('.')[-1].replace('_', '-')
return name | [
19,
4518,
462,
156
] |
def METHOD_NAME(builder, subgraphs):
builder.PrependUOffsetTRelativeSlot(
2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0) | [
578,
238,
11157
] |
def METHOD_NAME(source: str) -> str:
# Filter out any magic lines (starting with %) if in a Jupyter notebook
import re
srclines = map(lambda x: re.sub(r"^\%.*", "", x), source.split("\n"))
source = "\n".join(srclines)
return source | [
1360,
5605,
534
] |
def METHOD_NAME(self, visibility):
"""Set whether to display collision objects or not."""
pass | [
52,
8204
] |
def METHOD_NAME(obj, user=None):
# Check if this is a recipe or project
project = Project.objects.filter(uid=obj.uid).first()
if not project:
project = auth.create_project(user=user, uid=obj.uid,
name=obj.name,
text=obj.text)
project.uid = obj.uid
project.name = obj.name
project.text = obj.text
project.date = obj.date
project.privacy = obj.privacy
update_image(project, obj.image)
project.save()
for recipe, vals in obj.recipes.items():
data = parse_json(vals)
email = data.owner_email or settings.DEFAULT_FROM_EMAIL
owner = get_or_create(email, data=data)
upload_recipe(data, project=project, user=owner)
return | [
172,
155
] |
def METHOD_NAME(self):
# Test {} with many items
@njit
def foo():
return {1: 2.2, 3: 4.4, 5: 6.6}
d = foo()
self.assertEqual(d, {1: 2.2, 3: 4.4, 5: 6.6}) | [
9,
1080,
16695,
41,
-1
] |
def METHOD_NAME(r, partition_key_range):
"""Evaluates and returns r - partition_key_range
:param dict partition_key_range: Partition key range.
:param routing_range.Range r: query range.
:return: The subtract r - partition_key_range.
:rtype: routing_range.Range
"""
left = max(partition_key_range[routing_range.PartitionKeyRange.MaxExclusive], r.min)
if left == r.min:
leftInclusive = r.isMinInclusive
else:
leftInclusive = False
queryRange = routing_range.Range(left, r.max, leftInclusive, r.isMaxInclusive)
return queryRange | [
2096,
661
] |
def METHOD_NAME():
tz = pytz.timezone('Australia/Sydney')
dt = datetime.datetime(2002, 2, 20, 13, 37, 42, 7, tzinfo=tz)
w = DatetimePicker(value=dt)
assert w.value == dt
# tzinfo only changes upon input from user
assert w.value.tzinfo == tz | [
9,
884,
11155
] |
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request | [
123,
377
] |
def METHOD_NAME(self):
"""
:avocado: tags=machine:pseries
:avocado: tags=accel:tcg
"""
self.require_accelerator("tcg")
self.vm.add_args("-accel", "tcg")
self.launch_and_wait(set_up_ssh_connection=False) | [
9,
-1,
6305
] |
def METHOD_NAME(bot: str, raise_exception=True):
"""
Checks if daily event triggering limit exceeded.
@param bot: bot id.
@param raise_exception: Raise exception if event is in progress.
@return: boolean flag
"""
today = datetime.today()
today_start = today.replace(hour=0, minute=0, second=0)
doc_count = ValidationLogs.objects(
bot=bot, start_timestamp__gte=today_start
).count()
if doc_count >= BotSettings.objects(bot=bot).get().data_importer_limit_per_day:
if raise_exception:
raise AppException("Daily limit exceeded.")
else:
return True
else:
return False | [
137,
1467,
5663
] |
def METHOD_NAME(self):
return self.args.command == "about" | [
137,
2296,
462
] |
def METHOD_NAME(self):
return reverse(self.urlname, args=[self.domain, self.program_id]) | [
1174,
274
] |
def METHOD_NAME(self, transpiler_level):
transpile(
self.qv_14_x_14, self.melbourne, seed_transpiler=0, optimization_level=transpiler_level
) | [
104,
6263,
15315,
5708,
1104,
5708
] |
def METHOD_NAME(self):
# https://mxnet.apache.org/get_started/build_from_source
args = [
self.define_from_variant("USE_CUDA", "cuda"),
self.define_from_variant("USE_CUDNN", "cudnn"),
self.define_from_variant("USE_OPENCV", "opencv"),
self.define_from_variant("USE_OPENMP", "openmp"),
self.define_from_variant("USE_LAPACK", "lapack"),
self.define("BLAS_LIBRARIES", self.spec["blas"].libs[0]),
]
if self.spec.satisfies("@:1"):
args.append(self.define_from_variant("USE_MKLDNN", "mkldnn"))
elif self.spec.satisfies("@2:"):
args.append(self.define_from_variant("USE_ONEDNN", "mkldnn"))
args.append(self.define("USE_CUTENSOR", False))
if "+cuda" in self.spec:
if "cuda_arch=none" not in self.spec:
cuda_arch = ";".join(
"{0:.1f}".format(float(i) / 10.0)
for i in self.spec.variants["cuda_arch"].value
)
args.append(self.define("MXNET_CUDA_ARCH", cuda_arch))
args.extend(
[
self.define_from_variant("USE_NCCL", "nccl"),
# Workaround for bug in GCC 8+ and CUDA 10 on PowerPC
self.define("CMAKE_CUDA_FLAGS", self.compiler.cxx11_flag),
# https://github.com/apache/mxnet/issues/21193
# https://github.com/spack/spack/issues/36922
self.define(
"CMAKE_CXX_FLAGS",
"-L" + join_path(self.spec["cuda"].libs.directories[0], "stubs"),
),
]
)
return args | [
334,
335
] |
def METHOD_NAME(addresses: Iterable[_N]) -> Iterator[_N]: ... | [
10634,
1065
] |
def METHOD_NAME(self):
"fills the widgets"
index_crosssectiontype = self.parameterWidget.cb_crosssectiontype.findText(
self.SectionType
)
self.parameterWidget.cb_crosssectiontype.setCurrentIndex(index_crosssectiontype)
self.parameterWidget.if_rec_height.setText(self.RectHeight.UserString)
self.parameterWidget.if_rec_width.setText(self.RectWidth.UserString)
self.parameterWidget.if_circ_diameter.setText(self.CircDiameter.UserString)
self.parameterWidget.if_pipe_diameter.setText(self.PipeDiameter.UserString)
self.parameterWidget.if_pipe_thickness.setText(self.PipeThickness.UserString) | [
86,
511,
706
] |
def METHOD_NAME(
self,
) -> DashboardListListResponse:
"""Get all dashboard lists.
Fetch all of your existing dashboard list definitions.
:rtype: DashboardListListResponse
"""
kwargs: Dict[str, Any] = {}
return self._list_dashboard_lists_endpoint.call_with_http_info(**kwargs) | [
245,
3029,
50
] |
def METHOD_NAME(cls, op: "TensorRechunk"):
from ..indexing.slice import TensorSlice
from ..merge.concatenate import TensorConcatenate
if has_unknown_shape(*op.inputs):
yield
out = op.outputs[0]
tensor = astensor(op.inputs[0])
chunk_size = get_nsplits(tensor, op.chunk_size, tensor.dtype.itemsize)
if chunk_size == tensor.nsplits:
return [tensor]
rechunk_infos = gen_rechunk_infos(tensor, chunk_size)
out_chunks = []
for rechunk_info in rechunk_infos:
chunk_index = rechunk_info.out_index
shape = rechunk_info.shape
inp_chunks = rechunk_info.input_chunks
inp_chunk_slices = rechunk_info.input_slices
inp_slice_chunks = []
for inp_chunk, inp_chunk_slice in zip(inp_chunks, inp_chunk_slices):
if all(slc == slice(None) for slc in inp_chunk_slice):
inp_slice_chunks.append(inp_chunk)
else:
slc_chunk = TensorSlice(slices=list(inp_chunk_slice)).new_chunk(
[inp_chunk],
dtype=inp_chunk.dtype,
shape=tuple(
calc_sliced_size(s, slc)
for s, slc in zip(inp_chunk.shape, inp_chunk_slice)
),
index=inp_chunk.index,
)
inp_slice_chunks.append(slc_chunk)
if len(inp_slice_chunks) > 1 or inp_slice_chunks[0].index != chunk_index:
chunk_op = TensorConcatenate()
out_chunk = chunk_op.new_chunk(
inp_slice_chunks,
shape=shape,
index=chunk_index,
dtype=out.dtype,
order=out.order,
)
out_chunks.append(out_chunk)
else:
out_chunks.append(inp_slice_chunks[0])
new_op = op.copy()
params = out.params
params["nsplits"] = chunk_size
params["chunks"] = out_chunks
tensor = new_op.new_tileable(op.inputs, kws=[params])
if op.reassign_worker:
for c in tensor.chunks:
c.op.reassign_worker = True
return [tensor] | [
4161
] |
def METHOD_NAME(self):
self.assertIsNone(StatValue.from_list([])) | [
9,
35
] |
def METHOD_NAME(self, loss_function, dataset):
model_file = get_test_output_path('model{}.bin')
yc.execute([
CATBOOST_PATH,
'fit',
'--loss-function', loss_function,
'-f', dataset.train_file,
'--cd', dataset.cd_file,
'-i', '10',
'-T', '4',
'-m', model_file,
])
return model_file | [
90
] |
def METHOD_NAME(*, df_list, year, config, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
df = pd.concat(df_list, sort=False, ignore_index=True)
fips = get_all_state_FIPS_2().reset_index(drop=True)
# ensure capitalization of state names
fips['State'] = fips['State'].apply(lambda x: x.title())
fips['StateAbbrev'] = fips['State'].map(us_state_abbrev)
# pad zeroes
fips['FIPS_2'] = fips['FIPS_2'].apply(lambda x: x.ljust(3 + len(x), '0'))
df = pd.merge(
df, fips, how='left', left_on='State', right_on='StateAbbrev')
# set us location code
df.loc[df['State_x'] == 'US', 'FIPS_2'] = US_FIPS
df = df.rename(columns={'FIPS_2': "Location"})
assign_fips_location_system(df, year)
df = df.drop(columns=['StateAbbrev', 'State_x', 'State_y'])
## Extract information for SEDS codes
units = pd.read_excel(config['url']['activities_url'],
sheet_name='Codes_and_Descriptions',
header=10, usecols='B:D')
units['FuelCode'] = units['MSN'].str[0:2]
units['SectorCode'] = units['MSN'].str[2:4]
units['UnitCode'] = units['MSN'].str[4:5]
units = units.query("UnitCode not in ['D', 'K']")
# get fuel names from Total Consumption and Industrial Consumption
fuels = (units.query("SectorCode.isin(['TC', 'IC'])")
.reset_index(drop=True))
fuels['Fuel'] = (fuels.query(
"Description.str.contains('total consumption')")
.Description.str.split(' total consumption', expand=True)[0])
fuels['FuelName2'] = (fuels.query(
"Description.str.contains('consumed by')")
.Description.str.split(' consumed by', expand=True)[0])
fuels['Fuel'] = fuels['Fuel'].fillna(fuels['FuelName2'])
fuels['Fuel'] = fuels['Fuel'].str.rstrip(',')
fuels = (fuels[['Fuel','FuelCode']].dropna().sort_values(by='Fuel')
.drop_duplicates(subset='FuelCode'))
# get sector names
sectors = units.copy()
sectors['ActivityConsumedBy'] = (units.query(
"Description.str.contains('consumed by')")
.Description.str.split('consumed by the ', expand=True)[1]
.str.strip())
sectors = (sectors[['SectorCode', 'ActivityConsumedBy']].dropna()
.sort_values(by='ActivityConsumedBy')
.drop_duplicates(subset='SectorCode'))
units = units.merge(fuels, how='left', on='FuelCode')
units = units.merge(sectors, how='left', on='SectorCode')
units = units.drop(columns=['FuelCode','SectorCode','UnitCode'])
units['Description'] = units['MSN'] + ': ' + units['Description']
df = df.merge(units, how='left', on='MSN')
df = (df.rename(columns={year: "FlowAmount",
"Fuel": "FlowName"})
.drop(columns=['Data_Status'])
.dropna())
# hard code data
df['Class'] = np.where(df['Unit'].str.contains('Btu') |
df['Unit'].str.contains('watt'),
'Energy', 'Other')
df['SourceName'] = 'EIA_SEDS'
df['ActivityProducedBy'] = 'None'
df['Year'] = year
df['FlowType'] = 'TECHNOSPHERE_FLOW'
# Fill in the rest of the Flow by fields so they show
# "None" instead of nan.
df['Compartment'] = 'None'
df['MeasureofSpread'] = 'None'
df['DistributionType'] = 'None'
# Add DQ scores
df['DataReliability'] = 5 # tmp
df['DataCollection'] = 5 # tmp
return df | [
16563,
-1,
214
] |
def METHOD_NAME():
return app.send_static_file("registration-form.html") | [
2213,
1029,
382
] |
def METHOD_NAME(self):
ds = self.mkdataset(True)
ds = self.update.saveAndReturnObject(ds)
assert ds.details.externalInfo
self.assert_type(ds, "test") | [
9,
751,
100,
69,
581
] |
def METHOD_NAME(filename, silent=False):
schema = Schema()
print( "Try to read EXPRESS schema file" + filename)
with open(filename,'rt') as inp:
contents = inp.METHOD_NAME()
types = re.findall(re_match_type,contents)
for name,aggregate,equals,enums in types:
schema.types[name] = Type(name,aggregate,equals,enums)
entities = re.findall(re_match_entity,contents)
for name,parent,fields_raw in entities:
print('process entity {0}, parent is {1}'.format(name,parent)) if not silent else None
fields = re.findall(re_match_field,fields_raw)
members = [Field(name,type,opt,coll) for name, opt, coll, type in fields]
print(' got {0} fields'.format(len(members))) if not silent else None
schema.entities[name] = Entity(name,parent,members)
return schema | [
203
] |
def METHOD_NAME(signal_num):
# once Python 3.8 is the minimum supported version,
# signal.strsignal can be used here
signals = type(signal.SIGINT)
try:
signal_str = f'{signals(signal_num).name} ({signal_num})'
except ValueError:
signal_str = f'{signal_num}'
return f'Terminated by signal {signal_str}' | [
900,
277
] |
def METHOD_NAME(resource_group_name: Optional[str] = None,
trigger_name: Optional[str] = None,
workflow_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWorkflowTriggerCallbackUrlResult:
"""
Get the callback URL for a workflow trigger.
Azure REST API version: 2019-05-01.
:param str resource_group_name: The resource group name.
:param str trigger_name: The workflow trigger name.
:param str workflow_name: The workflow name.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['triggerName'] = trigger_name
__args__['workflowName'] = workflow_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:logic:listWorkflowTriggerCallbackUrl', __args__, opts=opts, typ=ListWorkflowTriggerCallbackUrlResult).value
return AwaitableListWorkflowTriggerCallbackUrlResult(
base_path=pulumi.get(__ret__, 'base_path'),
method=pulumi.get(__ret__, 'method'),
queries=pulumi.get(__ret__, 'queries'),
relative_path=pulumi.get(__ret__, 'relative_path'),
relative_path_parameters=pulumi.get(__ret__, 'relative_path_parameters'),
value=pulumi.get(__ret__, 'value')) | [
245,
3855,
2117,
1076,
274
] |
def METHOD_NAME(self):
tf = tempfile.NamedTemporaryFile(mode="w")
tf.write('TELEMETRY tgt1 pkt1 LITTLE_ENDIAN "Packet"\n')
tf.write(" LIMITS_RESPONSE\n")
tf.seek(0)
with self.assertRaisesRegex(
ConfigParser.Error, "No current item for LIMITS_RESPONSE"
):
self.pc.process_file(tf.name, "TGT1")
tf.close() | [
9,
11099,
217,
385,
1056,
1024,
137
] |
f METHOD_NAME(self, msg_builder): | [
11605,
276,
169
] |
def METHOD_NAME(dataset_path: Path, clear: bool = False) -> None:
dataset = _load_dataset(dataset_path)
if clear:
clear_store(dataset)
store = get_store(dataset, external=True)
dedupe_ui(store, url_base="https://opensanctions.org/entities/%s/") | [
3696
] |
def METHOD_NAME(self, username):
return Inventory().METHOD_NAME(username) | [
19,
1081,
43,
21
] |
f METHOD_NAME(self): | [
1210,
555
] |
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) | [
24,
3
] |
def METHOD_NAME():
return PESIDRepositoryEntry(
pesid='rhel8-CRB',
major_version='8',
repoid='codeready-builder-for-rhel-8-x86_64-rpms',
rhui='',
arch='x86_64',
channel='ga',
repo_type='rpm') | [
16135,
12135,
-1
] |
def METHOD_NAME(ontology):
"""Prints a list of entity types matching a list of input fields.
Args:
ontology: An instance of the OntologyWrapper class.
"""
standard_field_list = _InputFieldsFromUser()
entity_type_match_dict = {}
for i, match in enumerate(
ontology.GetEntityTypesFromFields(standard_field_list)
):
entity_type_match_dict[i] = match
for i in range(DEFAULT_MATCHED_TYPES_LIST_SIZE):
print(colored(f'{i+1}. {entity_type_match_dict[i]}', 'green'))
_PrintFieldMatchComparison(ontology, entity_type_match_dict)
match_selection = input('Would you like to see all matches? (y/n): ')
if match_selection == 'y':
for i, match in [
(index, match)
for index, match in entity_type_match_dict.items()
if match.GetMatchScore() > 0
]:
print(colored(f'{i+1}. {match}', 'green'))
_PrintFieldMatchComparison(ontology, entity_type_match_dict) | [
19,
119,
43,
101,
245
] |
f METHOD_NAME(self): | [
9,
7319,
61,
11194
] |
def METHOD_NAME():
@es_test(requires=[cats_adapter])
class TestCatsRequired(SimpleTestCase):
def test_index_exists(self):
assert_index_exists(cats_adapter)
dirty_test = TestCatsRequired()
dirty_test.setUp()
dirty_test.test_index_exists()
# dirty test never cleans up
tolerant_test = TestCatsRequired()
tolerant_test.setUp() # does not raise "index_already_exists_exception"
tolerant_test.test_index_exists()
tolerant_test.tearDown()
tolerant_test.doCleanups()
# tolerant test still cleans up
assert_not_index_exists(cats_adapter) | [
9,
102,
-1,
1153,
724
] |
def METHOD_NAME(request):
"""OSPF verify E-bit and N-bit mismatch."""
tc_name = request.node.name
write_test_header(tc_name)
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
global topo
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
input_dict = {"r3": {"ospf6": {"neighbors": []}}}
step("Configure r3 as stub router")
stub = {"r3": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "stub"}]}}}
result = create_router_ospf(tgen, topo, stub)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
# Verify r3 lost its adjacency with r2 due to E-bit mismatch
result = verify_ospf6_neighbor(tgen, topo, dut="r3", input_dict=input_dict)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
step("Configure r2 as stub router")
stub = {"r2": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "stub"}]}}}
result = create_router_ospf(tgen, topo, stub)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
# Verify r3 has an adjacency up with r2 again
result = verify_ospf6_neighbor(tgen, topo, dut="r3")
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
step("Configure r3 as NSSA router")
nssa = {"r3": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "nssa"}]}}}
result = create_router_ospf(tgen, topo, nssa)
# Verify r3 lost its adjacency with r2 due to N-bit mismatch
result = verify_ospf6_neighbor(tgen, topo, dut="r3", input_dict=input_dict)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
step("Configure r2 as NSSA router")
nssa = {"r2": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "nssa"}]}}}
result = create_router_ospf(tgen, topo, nssa)
# Verify r3 has an adjacency up with r2 again
result = verify_ospf6_neighbor(tgen, topo, dut="r3")
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name) | [
9,
-1,
6222,
4030
] |
def METHOD_NAME(self, item: Node, start: int = 0, stop: int = sys.maxsize) -> int: ... | [
724
] |
def METHOD_NAME(self, path, include_body=True):
"""
This is an edited method of original class so that we can show
directory listing and set correct Content-Type
"""
path = self.parse_url_path(path)
abspath = os.path.abspath(os.path.join(self.root, path))
self.absolute_path = abspath
if not os.path.exists(abspath):
raise tornado.web.HTTPError(404)
# Check if a directory if so provide listing
if os.path.isdir(abspath):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
# Just loop once to get dirnames and filenames :P
for abspath, dirnames, filenames in os.walk(abspath):
break
directory_listing_template = tornado.template.Template(
"""
<html>
<head>
<title>Directory Listing</title>
</head>
<body>
<h1>Index of</h1>
<hr>
<ul>
<li><a href="../">../</a></li>
{% if len(dirnames) > 0 %}
<h2>Directories</h2>
{% for item in dirnames %}
<li><a href="{{ url_escape(item, plus=False) }}/">{{ item }}/</a></li>
{% end %}
{% end %}
{% if len(filenames) > 0 %}
<h2>Files</h2>
{% for item in filenames %}
<li><a href="{{ url_escape(item, plus=False) }}">{{ item }}</a></li>
{% end %}
{% end %}
</ul>
</body>
</html>
"""
)
self.write(directory_listing_template.generate(dirnames=dirnames, filenames=filenames))
return
if os.path.isfile(abspath): # So file
stat_result = os.stat(abspath)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age={!s}".format(cache_time))
else:
self.set_header("Cache-Control", "public")
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.METHOD_NAME("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
no_of_lines = self.get_argument("lines", default="-1")
if no_of_lines != "-1":
data = subprocess.check_output(["tail", "-" + no_of_lines, abspath])
else:
with open(abspath, "rb") as file:
data = file.read()
hasher = hashlib.sha1()
hasher.update(data)
self.set_header("Etag", '"{!s}"'.format(hasher.hexdigest()))
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data)) | [
19
] |
def METHOD_NAME(self) -> None:
self._archive_download_url: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._expired: Attribute[bool] = NotSet
self._expires_at: Attribute[datetime] = NotSet
self._head_sha: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._size_in_bytes: Attribute[int] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._workflow_run: Attribute[WorkflowRun] = NotSet | [
176,
177
] |
def METHOD_NAME(self, *args, **kwargs):
"""Overrides default init by loading value from checkpoint."""
# pylint: disable=protected-access
self._old_init(*args, **kwargs)
ckpt_name = self._map_func(self._shared_name)
if ckpt_name not in self._ckpt_var_cache:
raise errors.NotFoundError(None, None,
"%s not found in checkpoint" % ckpt_name)
val = self._ckpt_var_cache.get(ckpt_name, None)
if val is not None:
self.assign(val)
# Avoid assigning for the second time.
self._ckpt_var_cache[ckpt_name] = None
# pylint: enable=protected-access | [
176,
280,
1830
] |
def METHOD_NAME(
multiprocessing_tcp_port_selector: TCPPortSelector, context: BaseContext
):
queue = context.Queue()
p1 = context.Process( # type: ignore[attr-defined]
target=get_multiprocessing_tcp_port,
args=(multiprocessing_tcp_port_selector, MULTIPROCESSING_PORT, queue),
)
p2 = context.Process( # type: ignore[attr-defined]
target=get_multiprocessing_tcp_port,
args=(multiprocessing_tcp_port_selector, MULTIPROCESSING_PORT, queue),
)
p1.start()
p2.start()
free_tcp_port_1 = queue.get()
free_tcp_port_2 = queue.get()
p1.join()
p2.join()
actual_results = [free_tcp_port_1, free_tcp_port_2]
assert MULTIPROCESSING_PORT in actual_results
assert None in actual_results | [
9,
5816,
237,
5169,
1916,
4595,
4776
] |
f METHOD_NAME(self): | [
9,
112,
5277,
472,
130,
685,
623
] |
def METHOD_NAME(self, stat_buf, stat):
def store(offset, val):
return self.state.memory.store(stat_buf + offset, val, endness=self.state.arch.memory_endness)
store(0x00, stat.st_dev)
store(0x08, stat.st_ino)
store(0x10, stat.st_mode)
store(0x14, stat.st_nlink)
store(0x18, stat.st_uid)
store(0x1C, stat.st_gid)
store(0x20, stat.st_rdev)
store(0x28, self.state.solver.BVV(0, 64))
store(0x30, stat.st_size)
store(0x38, stat.st_blksize)
store(0x3C, self.state.solver.BVV(0, 32))
store(0x40, stat.st_blocks)
store(0x48, stat.st_atime)
store(0x4C, stat.st_atimensec)
store(0x50, stat.st_mtime)
store(0x54, stat.st_mtimensec)
store(0x58, stat.st_ctime)
store(0x5C, stat.st_ctimensec)
store(0x60, self.state.solver.BVV(0, 32))
store(0x64, self.state.solver.BVV(0, 32)) | [
1308,
-1
] |
def METHOD_NAME(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
"""
METHOD_NAME = {
key: value
for key, value in self.__dict__.items()
if key not in ("optimizer", "lr_lambdas")
}
METHOD_NAME["lr_lambdas"] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
METHOD_NAME["lr_lambdas"][idx] = fn.__dict__.copy()
return METHOD_NAME | [
551,
553
] |
def METHOD_NAME(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult:
"""
Get a share
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share to retrieve.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20210801:getShare', __args__, opts=opts, typ=GetShareResult).value
return AwaitableGetShareResult(
created_at=pulumi.get(__ret__, 'created_at'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
share_kind=pulumi.get(__ret__, 'share_kind'),
system_data=pulumi.get(__ret__, 'system_data'),
terms=pulumi.get(__ret__, 'terms'),
type=pulumi.get(__ret__, 'type'),
user_email=pulumi.get(__ret__, 'user_email'),
user_name=pulumi.get(__ret__, 'user_name')) | [
19,
834
] |
def METHOD_NAME(sSearch=''):
oGui = cGui()
if sSearch:
sUrl = sSearch
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'class="featured-image"><a href="([^"]+)" title="([^"]+)"><img width=".+?" height=".+?" src="([^"]+)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
oGui.addText(SITE_IDENTIFIER)
if aResult[0]:
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
# first post filter
if (str(aEntry[2]) != "https://www.mamcin.com/wp-content/uploads/2017/10/plus-belle-la-vie-episode-suivant-en-avance.jpg"):
sUrl = aEntry[0]
sTitle = aEntry[1]
sThumb = aEntry[2]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sTitle, '', sThumb, '', oOutputParameterHandler)
progress_.VSclose(progress_)
sNextPage = __checkForNextPage(sHtmlContent)
if sNextPage:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
sPaging = re.search('page/([0-9]+)', sNextPage).group(1)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
if not sSearch:
oGui.setEndOfDirectory() | [
697,
9593
] |
def METHOD_NAME(self):
has_arg, option = getopt.long_has_args('abc', ['abc='])
self.assertTrue(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abcd')
self.assertError(getopt.long_has_args, 'abc', ['def'])
self.assertError(getopt.long_has_args, 'abc', [])
self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde']) | [
9,
524,
220,
335
] |
def METHOD_NAME(
self,
configuration: ExpectationConfiguration,
metrics,
runtime_configuration: dict = None,
execution_engine=None,
):
success = metrics.get("column_values.geometry_overlap").get("success")
indices = metrics.get("column_values.geometry_overlap").get("indices")
return {"success": success, "result": {"overlapping_indices": indices}} | [
187
] |
async def METHOD_NAME(self) -> None:
"""
Represents one loop of the service.
Users should override this method.
To actually run the service once, call `LoopService().start(loops=1)`
instead of `LoopService().run_once()`, because this method will not invoke setup
and teardown methods properly.
"""
raise NotImplementedError("LoopService subclasses must implement this method.") | [
22,
6353
] |
def METHOD_NAME(self):
sm2mm_1 = SomeM2MModel.objects.create(name='abc')
sm2mm_1.polymorphics = [self.pol_1, self.pol_2]
sm2mm_1 = SomeM2MModel.objects.get(name='abc')
sm2mm_2 = SomeM2MModel.objects.create(name='def')
sm2mm_2.polymorphics = [self.pol_2, self.pol_3]
with self.assertNumQueries(5):
# 5 queries:
# 1) SomeM2MModel
# 2) Content Types (usually cached, but turned off in tests)
# 3) PolymorphicModelBaseTest ids
# 4) PolymorphicModelTest based on 3)
# 5) PolymorphicModelTest2 based on 3)
result = {
sm.name: sm for sm in
SomeM2MModel.objects.prefetch_related(Prefetch(
lookup='polymorphics',
queryset=PolymorphicModelBaseTest.polymorphic_objects.polymorphic_filter( # noqa
some_m2m__in=SomeM2MModel.objects.all()
).all(),
)).order_by('name')
}
self.assertCountEqual(
result['abc'].polymorphics.all(),
[self.pol_1, self.pol_2]
)
self.assertCountEqual(
[inst._meta.model for inst in result['abc'].polymorphics.all()],
[PolymorphicModelTest, PolymorphicModelTest]
)
self.assertCountEqual(
result['def'].polymorphics.all(),
[self.pol_2, self.pol_3]
)
self.assertCountEqual(
[inst._meta.model for inst in result['def'].polymorphics.all()],
[PolymorphicModelTest, PolymorphicModelTest2]
) | [
9,
4433,
41,
518,
252,
69,
4434
] |
def METHOD_NAME(self):
"""
Notes
-----
计算所有的时间层。
"""
rdir = self.options['rdir']
step = self.options['step']
timeline = self.timeline
dt = timeline.current_time_step_length()
timeline.reset() # 时间置零
fname = rdir + '/test_'+ str(timeline.current).zfill(10) + '.vtu'
self.write_to_vtk(fname)
print(fname)
while not timeline.stop():
self.one_step_solve()
timeline.current += 1
if timeline.current%step == 0:
fname = rdir + '/test_'+ str(timeline.current).zfill(10) + '.vtu'
print(fname)
self.write_to_vtk(fname)
timeline.reset() | [
283
] |
def METHOD_NAME(path):
isolation = {'cgroup': {'path': path}}
client.load('empty', processes=1, isolation=isolation) | [
0,
11354,
157
] |
def METHOD_NAME(self) -> Node | None: ... | [
1511,
9851
] |
def METHOD_NAME() -> None:
migrate_nulls()
with op.batch_alter_table("journalist_login_attempt", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("replies", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("revoked_tokens", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("seen_files", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("seen_messages", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("seen_replies", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False) | [
738
] |
async def METHOD_NAME(self, unit_tag: int):
"""
Override this in your bot class.
This will event will be called when a unit (or structure, friendly or enemy) dies.
For enemy units, this only works if the enemy unit was in vision on death.
:param unit_tag:
""" | [
69,
805,
11551
] |
def METHOD_NAME():
# Wrong type of b1 and bk2 power spectra
with pytest.raises(ValueError):
ccl.nl_pt.LagrangianPTCalculator(b1_pk_kind='non-linear')
# Wrong type of b1 and bk2 power spectra
with pytest.raises(ValueError):
ccl.nl_pt.LagrangianPTCalculator(bk2_pk_kind='non-linear')
# Uninitialized templates
with pytest.raises(ccl.CCLError):
ptc = ccl.nl_pt.LagrangianPTCalculator()
ptc.get_biased_pk2d(TRS['TG'])
# TODO: Discuss this test
# Wrong pair combination
with pytest.raises(ValueError):
ptc = ccl.nl_pt.LagrangianPTCalculator(cosmo=COSMO)
ptc.get_pk2d_template('b1:b3') | [
9,
6454,
8688,
45
] |
def METHOD_NAME(self, op_fn, type_fn):
x = type_fn()
f = function([x], op_fn(x))
xval = _asarray(np.random.random(10) * 10, dtype=type_fn.dtype)
yval = f(xval)
assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype | [
9,
1506
] |
def METHOD_NAME(replay_start, timestamp):
if len(timestamp.split(".")[-1]) < 6:
for i in range(6 - len(timestamp.split(".")[-1])):
timestamp = timestamp + "0"
start = datetime.fromisoformat(replay_start)
stamp = datetime.fromisoformat(timestamp)
if start.tzinfo is None:
start = start.replace(tzinfo=timezone.utc)
if stamp.tzinfo is None:
stamp = stamp.replace(tzinfo=timezone.utc)
return ((stamp - start).total_seconds()) * 1000 | [
1407,
2443
] |
def METHOD_NAME(self, other):
"""
The equals method.
:param other: a different object.
:type other: object
:return: True if equal, otherwise False.
:rtype: bool
"""
if not isinstance(other, ASTForStmt):
return False
if self.get_variable() != other.get_variable():
return False
if not self.get_start_from().METHOD_NAME(other.get_start_from()):
return False
if not self.get_end_at().METHOD_NAME(other.get_end_at()):
return False
if self.get_step() != other.get_step():
return False
return self.get_block().METHOD_NAME(other.get_block()) | [
816
] |
def METHOD_NAME(left, right):
if diff_options:
opts = ' ' + ' '.join(diff_options)
else:
opts = ''
print('diff%s %s %s' % (opts, left, right)) | [
2443,
534
] |
def METHOD_NAME(self, pathname, setting):
"""
_addEnvironmentPath_
add a key = value1:value2:value3 environment setting to this step
"""
if getattr(self.data.environment.paths, pathname, None) == None:
setattr(self.data.environment.paths, pathname, [])
pathentry = getattr(self.data.environment.paths, pathname)
pathentry.append(setting)
return | [
238,
1027,
157
] |
def METHOD_NAME(self):
"""
Testing the exact solution which is a property of every backend.
"""
n_qubits = 8
register = range(n_qubits)
p = 1
correct_energy = -8
correct_config = [0, 1, 0, 1, 0, 1, 0, 1]
# The tests pass regardless of the value of betas and gammas is this correct?
betas = [np.pi / 8]
gammas = [np.pi / 4]
cost_hamiltonian = ring_of_disagrees(register)
mixer_hamiltonian = X_mixer_hamiltonian(n_qubits)
qaoa_descriptor = QAOADescriptor(cost_hamiltonian, mixer_hamiltonian, p)
variational_params_std = QAOAVariationalStandardParams(
qaoa_descriptor, betas, gammas
)
backend_analytical = QAOABackendAnalyticalSimulator(qaoa_descriptor)
# exact solution is defined as the property of the cost function
energy_vec, config_vec = backend_analytical.exact_solution
assert np.isclose(energy_vec, correct_energy)
config_vec = [config.tolist() for config in config_vec]
assert correct_config in config_vec | [
9,
2017,
725
] |
def METHOD_NAME(self):
cl = ConfigList(["un", "quatre", "trois"])
cl[1] = "deux"
self.assertEqual(cl.serialize(), ["un", "deux", "trois"]) | [
9,
200,
245,
5719
] |
def METHOD_NAME():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--analysis_file',
type=str,
default='sensitivity_0.data',
help="directory to save compressed model.")
parser.add_argument(
'--pruned_ratios',
nargs='+',
type=float,
default=[0.1, 0.2, 0.3, 0.4],
help="The ratios to be pruned when compute sensitivity.")
parser.add_argument(
'--target_loss',
type=float,
default=0.2,
help="use the target loss to get prune ratio of each parameter")
return parser | [
7220
] |
def METHOD_NAME(self):
return self.try_parse_helper("#") | [
1365,
214,
1161
] |
def METHOD_NAME():
if get_env("USE_FAST_LN"):
paddle.nn.LayerNorm = FastLayerNorm
elif get_env("USE_FUSED_LN"):
paddle.nn.LayerNorm = FusedLayerNorm
elif get_env("USE_FUSED_RMS_NORM"):
paddle.nn.LayerNorm = FusedRMSNorm
if get_env("USE_LINEAR_WITH_GRAD_ADD"):
paddle.nn.functional.linear = FusedLinearWithGradAdd.apply
paddle.incubate.nn.functional.fused_linear = FusedLinearWithGradAdd.apply | [
248,
2315
] |
def METHOD_NAME(self, item, spider): # noqa: C901
check_field(item, spider, "brand_wikidata", allowed_types=(str,), match_regex=self.wikidata_regex)
check_field(item, spider, "website", (str,), self.url_regex)
check_field(item, spider, "image", (str,), self.url_regex)
check_field(item, spider, "email", (str,), self.email_regex)
check_field(item, spider, "phone", (str,))
check_field(item, spider, "street_address", (str,))
check_field(item, spider, "city", (str,))
check_field(item, spider, "state", (str,))
check_field(item, spider, "postcode", (str,))
check_field(item, spider, "country", (str,), self.country_regex)
check_field(item, spider, "name", (str,))
check_field(item, spider, "brand", (str,))
if coords := get_lat_lon(item):
lat, lon = coords
if not (self.min_lat < lat < self.max_lat):
spider.crawler.stats.inc_value("atp/field/lat/invalid")
lat = None
if not (self.min_lon < lon < self.max_lon):
spider.crawler.stats.inc_value("atp/field/lon/invalid")
lon = None
if isinstance(lat, float) and isinstance(lon, float):
if math.fabs(lat) < 3 and math.fabs(lon) < 3:
spider.crawler.stats.inc_value("atp/geometry/null_island")
lat = None
lon = None
set_lat_lon(item, lat, lon)
if not (item.get("geometry") or get_lat_lon(item)):
spider.crawler.stats.inc_value("atp/field/lat/missing")
spider.crawler.stats.inc_value("atp/field/lon/missing")
if twitter := item.get("twitter"):
if not isinstance(twitter, str):
spider.crawler.stats.inc_value("atp/field/twitter/wrong_type")
elif not (self.url_regex.match(twitter) and "twitter.com" in twitter) and not self.twitter_regex.match(
twitter
):
spider.crawler.stats.inc_value("atp/field/twitter/invalid")
else:
spider.crawler.stats.inc_value("atp/field/twitter/missing")
if opening_hours := item.get("opening_hours"):
if isinstance(opening_hours, OpeningHours):
if opening_hours.day_hours:
item["opening_hours"] = opening_hours.as_opening_hours()
else:
item["opening_hours"] = None
spider.crawler.stats.inc_value("atp/field/opening_hours/missing")
elif not isinstance(opening_hours, str):
spider.crawler.stats.inc_value("atp/field/opening_hours/wrong_type")
elif not self.opening_hours_regex.match(opening_hours) and opening_hours != "24/7":
spider.crawler.stats.inc_value("atp/field/opening_hours/invalid")
else:
spider.crawler.stats.inc_value("atp/field/opening_hours/missing")
return item | [
356,
1024
] |
def METHOD_NAME(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2))) | [
2230,
681
] |
def METHOD_NAME(space, expected_batch_space_4):
batch_space_4 = batch_space(space, n=4)
assert batch_space_4 == expected_batch_space_4 | [
9,
2277,
173
] |
def METHOD_NAME(self):
return {
# sell
"energy_sell_rate": self._sell_energy_profile.input_energy_rate,
"energy_rate_profile": self._sell_energy_profile.input_profile,
"energy_rate_profile_uuid": self._sell_energy_profile.input_profile_uuid,
# buy
"energy_buy_rate": self._buy_energy_profile.input_energy_rate,
"buying_rate_profile": self._buy_energy_profile.input_profile,
"buying_rate_profile_uuid": self._buy_energy_profile.input_profile_uuid,
} | [
183
] |
def METHOD_NAME(
self,
start_well: AbstractWellCore,
num_channels: int = 1,
fail_if_full: bool = False,
):
"""
Removes tips from the tip tracker.
This method should be called when a tip is picked up. Generally, it
will be called with `num_channels=1` or `num_channels=8` for single-
and multi-channel respectively. If picking up with more than one
channel, this method will automatically determine which tips are used
based on the start well, the number of channels, and the geometry of
the tiprack.
:param start_well: The :py:class:`.Well` from which to pick up a tip.
For a single-channel pipette, this is the well to
send the pipette to. For a multi-channel pipette,
this is the well to send the back-most nozzle of the
pipette to.
:type start_well: :py:class:`.Well`
:param num_channels: The number of channels for the current pipette
:type num_channels: int
:param fail_if_full: for backwards compatibility
"""
# Select the column of the labware that contains the target well
target_column = [col for col in self._columns if start_well in col][0]
well_idx = target_column.index(start_well)
# Number of tips to pick up is the lesser of (1) the number of tips
# from the starting well to the end of the column, and (2) the number
# of channels of the pipette (so a 4-channel pipette would pick up a
# max of 4 tips, and picking up from the 2nd-to-bottom well in a
# column would get a maximum of 2 tips)
num_tips = min(len(target_column) - well_idx, num_channels)
target_wells = target_column[well_idx : well_idx + num_tips]
# In API version 2.2, we no longer reset the tip tracker when a tip
# is dropped back into a tiprack well. This fixes a behavior where
# subsequent transfers would reuse the dirty tip. However, sometimes
# the user explicitly wants to use a dirty tip, and this check would
# raise an exception if they tried to do so.
# An extension of work here is to have separate tip trackers for
# dirty tips and non-present tips; but until then, we can avoid the
# exception.
if fail_if_full:
assert all(
well.has_tip() for well in target_wells
), "{} is out of tips".format(str(self))
for well in target_wells:
well.set_has_tip(False) | [
1080,
7964
] |
def METHOD_NAME(administrator_name: Optional[str] = None,
managed_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedInstanceAdministratorResult:
"""
Gets a managed instance administrator.
Azure REST API version: 2021-11-01.
:param str managed_instance_name: The name of the managed instance.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
__args__ = dict()
__args__['administratorName'] = administrator_name
__args__['managedInstanceName'] = managed_instance_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql:getManagedInstanceAdministrator', __args__, opts=opts, typ=GetManagedInstanceAdministratorResult).value
return AwaitableGetManagedInstanceAdministratorResult(
administrator_type=pulumi.get(__ret__, 'administrator_type'),
id=pulumi.get(__ret__, 'id'),
login=pulumi.get(__ret__, 'login'),
name=pulumi.get(__ret__, 'name'),
sid=pulumi.get(__ret__, 'sid'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type')) | [
19,
3627,
89,
4801
] |
def METHOD_NAME():
blurb = lldb.debugger.GetVersionString()
top = blurb.split('\n')[0]
full = top.split(' ')[2]
major, minor = full.split('.')[:2]
return LldbVersion(full, int(major), int(minor)) | [
226,
-1,
2862
] |
def METHOD_NAME(module, client, backup_plan_name: str):
backup_plan_id = _list_backup_plans(client, backup_plan_name)
if not backup_plan_id:
return []
try:
result = client.get_backup_plan(BackupPlanId=backup_plan_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg=f"Failed to describe plan {backup_plan_id}")
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_backup_plan = []
try:
resource = result.get("BackupPlanArn", None)
tag_dict = get_backup_resource_tags(module, client, resource)
result.update({"tags": tag_dict})
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to get the backup plan tags")
snaked_backup_plan.append(camel_dict_to_snake_dict(result, ignore_list="tags"))
# Remove AWS API response and add top-level plan name
for v in snaked_backup_plan:
if "response_metadata" in v:
del v["response_metadata"]
v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"]
return snaked_backup_plan | [
19,
145,
2051
] |
def METHOD_NAME(self, variable):
if isinstance(variable, VariableMatch):
return variable
match = search_variable(variable)
if not match.is_variable() or match.items:
raise DataError("Invalid variable name '%s'." % variable)
return match | [
19,
590
] |
def METHOD_NAME(end_point_name, model_name, token):
if token is None:
return False
FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
cached_token = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
get_end_point_token(end_point_name, model_name)
if cached_token is not None and cached_token == token:
return True
return False | [
2433,
377,
466
] |
def METHOD_NAME(config, env):
import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
with open(fn, "w") as f:
f.write(rtyaml.dump(config)) | [
77,
817
] |
def METHOD_NAME(self, ep_name, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):
"""
Sends a Join notification when subtitles for an episode are downloaded
:param ep_name: The name of the episode subtitles were downloaded for
:param lang: The language of the downloaded subtitles
:param title: The title of the notification to send
"""
if settings.JOIN_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_join(title, "{0}: {1}".format(ep_name, lang)) | [
959,
3332,
136
] |
def METHOD_NAME(self) -> 'outputs.ResourceReferenceResponse':
"""
Reference to a resource.
"""
return pulumi.get(self, "custom_certificate") | [
343,
1548
] |
def METHOD_NAME(s: bytes) -> bytes:
"""
Serialize a byte string with Bitcoin's variable length string serialization.
:param s: The byte string to be serialized
:returns: The serialized byte string
"""
return ser_compact_size(len(s)) + s | [
12027,
144
] |
def METHOD_NAME(self, method):
"""
Teardown method
"""
try:
self.client.admin_drop_user("example-test")
time.sleep(1)
except e.InvalidUser:
pass
self.client.close() | [
1843,
103
] |
def METHOD_NAME():
import subprocess
cmd = "xdputil query | grep 'DPU Batch' | awk -F':' '{ print $2}' | awk -F',' '{ print $1}' "
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
ret = p.communicate()
if ret[0] == b'':
return 1
return int(ret[0]) | [
19,
2277
] |
def METHOD_NAME (programs):
print_test_name ("Rewrite test")
output = programs.meta_set (False, "--bext-originator \"Really, really long string\" output.wav")
output = programs.meta_set (False, "--bext-originator \"Short\" output.wav")
output = programs.meta_get (False, "--bext-originator output.wav")
if output.find ("really long") > 0:
print("\n\nError : output '%s' should not contain 'really long'." % output)
sys.exit (1)
print("ok") | [
9,
2887
] |
def METHOD_NAME(vertices, faces, root=0):
"""Unify the cycle directions of the given faces such that adjacent faces share opposite halfedges.
Parameters
----------
vertices : sequence[[float, float, float] | :class:`~compas.geometry.Point`]
A list of vertex coordinates.
faces : sequence[sequence[int]]
A list of faces with each face defined by a list of indices into the list of vertices.
root : int, optional
The starting face.
Returns
-------
list[list[int]]
A list of faces with the same orientation as the root face.
Raises
------
AssertionError
If not all faces were visited.
Notes
-----
The algorithm works by first building an adjacency dict of the faces, which can be traversed efficiently to unify all face cycles.
Although this process technically only requires the connectivity information contained in the faces,
the locations of the vertices can be used to speed up execution for very large collections of faces.
Examples
--------
>>> vertices = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]]
>>> faces = [[0, 1, 2], [0, 3, 2]]
>>> unify_cycles(vertices, faces)
[[0, 1, 2], [2, 3, 0]]
"""
def unify(node, nbr):
# find the common edge
for u, v in pairwise(faces[nbr] + faces[nbr][0:1]):
if u in faces[node] and v in faces[node]:
# node and nbr have edge u-v in common
i = faces[node].index(u)
j = faces[node].index(v)
if i == j - 1 or (j == 0 and u == faces[node][-1]):
# if the traversal of a neighboring halfedge
# is in the same direction
# flip the neighbor
faces[nbr][:] = faces[nbr][::-1]
return
adj = face_adjacency(vertices, faces)
visited = breadth_first_traverse(adj, root, unify)
assert len(list(visited)) == len(faces), "Not all faces were visited"
return faces | [
2969,
11449
] |
def METHOD_NAME(self, key: str) -> Any:
"""@brief Return the highest priority value for the option, or its default."""
for layer in self._layers:
if key in layer:
return layer[key]
return self.get_default(key) | [
19
] |
def METHOD_NAME(cls) -> dict[Any, Callable[[Any], Any]]:
if pydantic.VERSION.startswith("1"): # pragma: no cover
return {**_base_encoders, **cls._create_pydantic_v1_encoders()}
return {**_base_encoders, **cls._create_pydantic_v2_encoders()} | [
13579
] |
def METHOD_NAME(
cls,
method_,
url_,
api_key=None,
idempotency_key=None,
stripe_version=None,
stripe_account=None,
params=None,
):
params = None if params is None else params.copy()
api_key = util.read_special_variable(params, "api_key", api_key)
idempotency_key = util.read_special_variable(
params, "idempotency_key", idempotency_key
)
stripe_version = util.read_special_variable(
params, "stripe_version", stripe_version
)
stripe_account = util.read_special_variable(
params, "stripe_account", stripe_account
)
headers = util.read_special_variable(params, "headers", None)
requestor = api_requestor.APIRequestor(
api_key, api_version=stripe_version, account=stripe_account
)
if idempotency_key is not None:
headers = {} if headers is None else headers.copy()
headers.update(util.populate_headers(idempotency_key)) # type: ignore
response, _ = requestor.request_stream(method_, url_, params, headers)
return response | [
628,
377,
919
] |
def METHOD_NAME(dataloader):
latency_list = []
for idx, (inputs, labels) in enumerate(dataloader):
# dataloader should keep the order and len of inputs same with input_tensor
inputs = np.array([inputs])
feed_dict = dict(zip(input_tensor, inputs))
start = time.time()
predictions = model.sess.run(output_tensor, feed_dict)
end = time.time()
metric.update(predictions, labels)
latency_list.append(end-start)
if idx + 1 == iteration:
break
latency = np.array(latency_list).mean() / args.batch_size
return latency | [
1171,
717
] |
def METHOD_NAME( f, len ):
global Setsdrmasks, SetsdrmasksOnesCount
byteCount = (len+7)//8
Setsdrmasks = f.read( byteCount )
ls = []
SetsdrmasksOnesCount = 0
for b in Setsdrmasks:
ls.append( "%x" % ((b & 0xf0) >> 4) )
ls.append( "%x" % ( b & 0x0f ) )
for i in range(8):
if b & (1<<i):
SetsdrmasksOnesCount = SetsdrmasksOnesCount +1
return ''.join(ls) | [
203,
-1
] |
def METHOD_NAME(args: tuple[str, ...], expanded: tuple[str, ...]) -> None:
cli_alias = CliAlias.from_dict(
{
"--alias": "--flag goal",
}
)
assert cli_alias.expand_args(args) == expanded | [
9,
2450,
335,
584
] |
def METHOD_NAME(filters):
amount = 0.0
# get amounts from all the apps
for method_name in frappe.get_hooks(
"get_amounts_not_reflected_in_system_for_bank_reconciliation_statement"
):
amount += frappe.get_attr(method_name)(filters) or 0.0
return amount | [
19,
13864,
130,
13865,
623,
112
] |
def METHOD_NAME(
self,
pipeline_types: Tuple[MypyType, ...],
pipeline_kinds: List[ArgKind],
ctx: CallableContext,
) -> MypyType:
"""Pass pipeline functions to infer them one by one."""
parameter = FuncArg(None, self._instance, ARG_POS)
ret_type = ctx.default_return_type
for pipeline, kind in zip(pipeline_types, pipeline_kinds):
ret_type = self._proper_type(
analyze_call(
cast(FunctionLike, pipeline),
[parameter],
ctx,
show_errors=True,
),
)
parameter = FuncArg(None, ret_type, kind)
return ret_type | [
280,
1448,
771
] |
def METHOD_NAME(self) -> Optional['outputs.PeeringPropertiesDirectResponse']:
"""
The properties that define a direct peering.
"""
return pulumi.get(self, "direct") | [
4234
] |
def METHOD_NAME(model_name: str) -> None:
if SWIGNORE_PATH.exists():
lines = SWIGNORE_PATH.read_text().splitlines()
else:
lines = []
write_lines = []
for line in lines:
if "*/base-" in line or "*/adapter-" in line:
continue
write_lines.append(line)
for m in SUPPORTED_MODELS:
if m == model_name:
continue
write_lines.append(f"*/base-{m}/*")
write_lines.append(f"*/adapter-{m}/*")
SWIGNORE_PATH.write_text("\n".join(write_lines)) | [
86,
-1
] |
def METHOD_NAME(self, api_url):
response = requests.get(api_url, timeout=10)
response.raise_for_status()
return response.json() | [
128,
58
] |
def METHOD_NAME(band):
amp_mask = get_all_nonamp_fiberbitmask_val()
if band.lower().find('b')>=0:
amp_mask |= fmsk.BADAMPB
if band.lower().find('r')>=0:
amp_mask |= fmsk.BADAMPR
if band.lower().find('z')>=0:
amp_mask |= fmsk.BADAMPZ
return amp_mask | [
19,
75,
-1,
41,
8106
] |
def METHOD_NAME(self, fields) -> Optional[List[FieldModel]]:
"""
Recursively convert the parsed schema into required models
"""
field_models = []
for field in fields:
try:
field_models.append(
FieldModel(
name=field.name,
dataType=ProtobufDataTypes(field.type).name,
children=self.METHOD_NAME(field.message_type.fields)
if field.type == 11
else None,
)
)
except Exception as exc: # pylint: disable=broad-except
logger.debug(traceback.format_exc())
logger.warning(
f"Unable to parse the protobuf schema into models: {exc}"
)
return field_models | [
19,
5150,
342
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.