function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
list |
---|---|---|
def config_path():
conf = None
if os.name == 'nt':
conf = os.path.expandvars("%%appdata%%/.%s/environments.json" % CONFIG_DIRNAME)
else:
conf = os.path.expanduser("~/.%s/environments.json" % CONFIG_DIRNAME)
return conf
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def config():
conf = config_path()
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def style_header(text, width = 0):
if not text:
return ''
width = max(len(text) + HEADER_JUST * 2, width)
pad = ' ' * width
output = '\n%s%s\n%s\n%s%s\n' % (HEADER_STYLE, pad, text.center(width), pad, Style.RESET_ALL)
return output
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def style_text(text, styles, ljust=0, rjust=0, cen=0, lpad=0, rpad=0, pad=0, char=' ', restore=''):
if not text:
return ''
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def style_multiline(text, styles, ljust=0, rjust=0, cen=0, lpad=0, rpad=0, pad=0, char=' '):
if not text:
return ''
lines = text.split('\n')
fmt_text = ''
for text in lines:
text = style_text(text, styles, ljust, rjust, cen, lpad, rpad, pad, char)
fmt_text += text + '\n'
return fmt_text
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def update_config(configuration=None, add=None):
"""
Update the environments configuration on-disk.
"""
existing_config = config()
conf = config_path()
print(style_header('Zookeeper Environments'))
print("")
print(style_text('config:', TITLE_STYLE, pad=2), end='')
print(style_text(conf, INPUT_STYLE))
print(style_multiline(json.dumps(existing_config, indent=4, sort_keys=True), INFO_STYLE, lpad=4))
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def clusterstate(zookeepers, all_hosts, node='clusterstate.json'):
"""
Print clusterstatus.json contents
"""
zk_hosts = parse_zk_hosts(zookeepers, all_hosts=all_hosts)
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def show_node(zookeepers, node, all_hosts=False, leader=False, debug=False, interactive=False):
"""
Show a zookeeper node on one or more servers.
If the node has children, the children are displayed,
If the node doesn't have children, the contents of the node are displayed.
If leader is specified, only the leader is queried for the node
If all_hosts is specified, each zk host provided is queried individually... if the results
are different between nodes, the child nodes that are different will be highlighted.
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def watch(zookeepers, node, leader=False):
"""
Watch a particular zookeeper node for changes.
"""
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def my_listener(state):
if state == KazooState.LOST:
# Register somewhere that the session was lost
print(style_text('Connection Lost', ERROR_STYLE, pad=2))
elif state == KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
print(style_text('Connection Suspended', ERROR_STYLE, pad=2))
else:
# Handle being connected/reconnected to Zookeeper
# what are we supposed to do here?
print(style_text('Connected/Reconnected', INFO_STYLE, pad=2))
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def watch_children(children):
global WATCH_COUNTER
WATCH_COUNTER += 1
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def watch_data(data, stat, event):
global WATCH_COUNTER
WATCH_COUNTER += 1
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def admin_command(zookeepers, command, all_hosts=False, leader=False):
"""
Execute an administrative command
"""
command = text_type(command) # ensure we have unicode py2/py3
zk_hosts = parse_zk_hosts(zookeepers, all_hosts=all_hosts, leader=leader)
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def sessions_reset(zookeepers, server_id=None, ephemeral=False, solr=False):
"""
Reset connections/sessions to Zookeeper.
"""
# TODO support --clients / --solrj option ?
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def get_all_sessions(zk_client):
# Get connection/session information
conn_results = multi_admin_command(zk_client, b'cons')
conn_data = map(parse_admin_cons, conn_results)
conn_data = list(itertools.chain.from_iterable(conn_data))
# Get a dict of all valid zookeeper sessions as integers
return {con['sid']: con['client'][0] for con in conn_data if con.get('sid')}
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def break_session(zookeepers, session):
with tlock:
s_style = style_text("%s" % str(session_id), STATS_STYLE)
print(style_text("Resetting session: %s(%s)" % (s_style, all_sessions[session_id]), INFO_STYLE, lpad=2))
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def health_check(zookeepers):
zk_client = KazooClient(zookeepers)
for check in (check_zookeeper_connectivity,
check_ephemeral_sessions_fast,
check_ephemeral_znode_consistency,
check_ephemeral_dump_consistency,
check_watch_sessions_clients,
check_watch_sessions_duplicate,
check_queue_sizes,
check_watch_sessions_valid,
check_overseer_election):
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def cli():
"""
Build the CLI menu
"""
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def verify_json(arg):
try:
data = json.loads(arg)
except ValueError as e:
raise argparse.ArgumentTypeError("invalid json: %s" % e)
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def verify_env(arg):
try:
env_config = config()
except ValueError as e:
raise argparse.ArgumentTypeError('Cannot read configuration %s' % e)
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def verify_zk(arg):
hosts = arg.split('/')[0]
hosts = hosts.split(',')
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def verify_add(arg):
if '=' not in arg:
raise argparse.ArgumentTypeError("You must use the syntax ENVIRONMENT=127.0.0.1:2181")
env, zk = arg.split('=')
verify_zk(zk)
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def verify_node(arg):
if not arg.startswith('/'):
raise argparse.ArgumentTypeError("Zookeeper nodes start with /")
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def verify_cmd(arg):
if arg.lower() not in ZK_ADMIN_CMDS:
raise argparse.ArgumentTypeError("Invalid command '%s'... \nValid Commands: %s" % (arg, '\n '.join(ZK_ADMIN_CMDS)))
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def main(argv=None):
|
bendemott/solr-zkutil
|
[
15,
1,
15,
18,
1486525374
] |
def do_something(fLOG=None):
if fLOG:
fLOG("Did something.")
return 3
|
sdpython/pyquickhelper
|
[
21,
10,
21,
22,
1388194285
] |
def __init__(self):
"constructor"
self.buffer = StringIO()
|
sdpython/pyquickhelper
|
[
21,
10,
21,
22,
1388194285
] |
def meth(self):
pass
|
github/codeql
|
[
5783,
1304,
5783,
842,
1533054951
] |
def meth(self):
return super().meth()
|
github/codeql
|
[
5783,
1304,
5783,
842,
1533054951
] |
def meth(self):
return super().meth()
|
github/codeql
|
[
5783,
1304,
5783,
842,
1533054951
] |
def meth(self):
return super().meth()
|
github/codeql
|
[
5783,
1304,
5783,
842,
1533054951
] |
def meth(self):
return super().meth()
|
github/codeql
|
[
5783,
1304,
5783,
842,
1533054951
] |
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
|
Azure/azure-sdk-for-python
|
[
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
|
Azure/azure-sdk-for-python
|
[
3526,
2256,
3526,
986,
1335285972
] |
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
|
Azure/azure-sdk-for-python
|
[
3526,
2256,
3526,
986,
1335285972
] |
def get(
self,
location_name, # type: str
**kwargs # type: Any
|
Azure/azure-sdk-for-python
|
[
3526,
2256,
3526,
986,
1335285972
] |
def __init__(self, *args, **kwargs):
self.events = kwargs.pop('events', [])
self.raw = kwargs.pop('raw', None)
self.it_pos = 0
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def __getitem__(self, val):
if isinstance(val, int):
return self.events[val]
elif isinstance(val, slice):
return History(events=self.events[val])
raise TypeError("Unknown slice format: %s" % type(val))
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def __iter__(self):
return self
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def last(self):
"""Returns the last stored event
:rtype: swf.models.event.Event
"""
return self.events[-1]
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def first(self):
"""Returns the first stored event
:rtype: swf.models.event.Event
"""
return self.events[0]
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def finished(self):
"""Checks if the History matches with a finished Workflow
Execution history state.
"""
completion_states = (
'completed',
'failed',
'canceled',
'terminated'
)
if (isinstance(self.last, WorkflowExecutionEvent) and
self.last.state in completion_states):
return True
return False
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def reversed(self):
for i in xrange(len(self.events) - 1, -1, -1):
yield self.events[i]
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def distinct(self):
"""Extracts distinct history events based on their types
:rtype: list of swf.models.event.Event
"""
distinct_events = []
for key, group in groupby(self.events, lambda e: e.type):
g = list(group)
# Merge every WorkflowExecution events into same group
if (len(g) == 1 and
len(distinct_events) >= 1 and
g[0].type == "WorkflowExecution"):
# WorfklowExecution group will always be in first position
distinct_events[0].extend(g)
else:
distinct_events.append(list(g))
return distinct_events
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def compiled(self):
"""Compiled history version
:rtype: swf.models.history.History made of swf.models.event.CompiledEvent
"""
return self.compile()
|
botify-labs/python-simple-workflow
|
[
18,
4,
18,
15,
1364383242
] |
def annotate(
base: F,
parents: Callable[[F], List[F]],
decorate: Callable[[F], Tuple[List[L], bytes]],
diffopts: mdiff.diffopts,
skip: Optional[Callable[[F], bool]] = None,
|
facebookexperimental/eden
|
[
4737,
192,
4737,
106,
1462467227
] |
def __mapper_args__(cls):
name = cls.__name__
if name == 'ConfigurableOption':
return {
'polymorphic_on': 'type_',
'polymorphic_identity': 'configurable_option'
}
else:
return {'polymorphic_identity': camel_case_to_name(name)}
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def query(cls, *args):
query = super(ConfigurableOption, cls).query(*args)
query = query.filter(ConfigurableOption.active == True)
query = query.order_by(ConfigurableOption.order)
return query
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def move_up(self):
"""
Move the current instance up in the category's order
"""
order = self.order
if order > 0:
new_order = order - 1
self.__class__.insert(self, new_order)
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def get_next_order(cls):
"""
:returns: The next available order
:rtype: int
"""
query = DBSESSION().query(func.max(cls.order)).filter_by(active=True)
query = query.filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
)
query = query.first()
if query is not None and query[0] is not None:
result = query[0] + 1
else:
result = 0
return result
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def _query_active_items(cls):
"""
Build a query to collect active items of the current class
:rtype: :class:`sqlalchemy.Query`
"""
return DBSESSION().query(cls).filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
).filter_by(active=True)
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def insert(cls, item, new_order):
"""
Place the item at the given index
:param obj item: The item to move
:param int new_order: The new index of the item
"""
query = cls._query_active_items()
items = query.filter(cls.id != item.id).order_by(cls.order).all()
items.insert(new_order, item)
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def reorder(cls):
"""
Regenerate order attributes
"""
items = cls._query_active_items().order_by(cls.order).all()
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def test_default_disable():
from autonomie.forms.user.user import deferred_company_disable_default
companies = [Dummy(employees=range(2))]
user = Dummy(companies=companies)
req = Dummy(context=user)
assert not deferred_company_disable_default("", {'request': req})
companies = [Dummy(employees=[1])]
user = Dummy(companies=companies)
req = Dummy(context=user)
assert(deferred_company_disable_default("", {'request': req}))
|
CroissanceCommune/autonomie
|
[
21,
14,
21,
284,
1381245170
] |
def test_crisp_jaccard_sim(self):
"""Test abydos.distance.Jaccard.sim (crisp)."""
# Base cases
self.assertEqual(self.cmp_j_crisp.sim('', ''), 1.0)
self.assertEqual(self.cmp_j_crisp.sim('a', ''), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_j_crisp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_j_crisp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Nigel', 'Niall'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Niall', 'Nigel'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Colin', 'Coiln'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('Coiln', 'Colin'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_j_crisp.sim('ATCAACGAGT', 'AACGATTAG'), 0.5
)
|
chrislit/abydos
|
[
154,
26,
154,
63,
1398235847
] |
def test_fuzzy_jaccard_sim(self):
"""Test abydos.distance.Jaccard.sim (fuzzy)."""
# Base cases
self.assertEqual(self.cmp_j_fuzzy.sim('', ''), 1.0)
self.assertEqual(self.cmp_j_fuzzy.sim('a', ''), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_j_fuzzy.sim('abc', 'abc'), 1.0)
self.assertAlmostEqual(
self.cmp_j_fuzzy.sim('abcd', 'efgh'), 0.1111111111111111
)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Nigel', 'Niall'), 0.5)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Niall', 'Nigel'), 0.5)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Colin', 'Coiln'), 0.6)
self.assertAlmostEqual(self.cmp_j_fuzzy.sim('Coiln', 'Colin'), 0.6)
self.assertAlmostEqual(
self.cmp_j_fuzzy.sim('ATCAACGAGT', 'AACGATTAG'), 0.68
)
self.assertEqual(sum(self.cmp_j_fuzzy._union().values()), 11.0)
self.assertAlmostEqual(
Jaccard(intersection_type='fuzzy').sim('synonym', 'antonym'),
0.3333333333333333,
)
|
chrislit/abydos
|
[
154,
26,
154,
63,
1398235847
] |
def test_token_distance(self):
"""Test abydos.distance._TokenDistance members."""
self.assertAlmostEqual(
Jaccard(intersection_type='soft', alphabet=24).sim(
'ATCAACGAGT', 'AACGATTAG'
),
0.68,
)
self.assertAlmostEqual(
Jaccard(qval=1, alphabet='CGAT').sim('ATCAACGAGT', 'AACGATTAG'),
0.9,
)
self.assertAlmostEqual(
Jaccard(tokenizer=QSkipgrams(qval=3), alphabet='CGAT').sim(
'ATCAACGAGT', 'AACGATTAG'
),
0.6372795969773299,
)
self.assertAlmostEqual(
Jaccard(alphabet=None).sim('synonym', 'antonym'),
0.3333333333333333,
)
self.assertAlmostEqual(
Jaccard(tokenizer=QSkipgrams(qval=3)).sim('synonym', 'antonym'),
0.34146341463414637,
)
src_ctr = Counter({'a': 5, 'b': 2, 'c': 10})
tar_ctr = Counter({'a': 2, 'c': 1, 'd': 3, 'e': 12})
self.assertAlmostEqual(Jaccard().sim(src_ctr, tar_ctr), 0.09375)
self.assertAlmostEqual(
SokalMichener(normalizer='proportional').sim('synonym', 'antonym'),
0.984777917351113,
)
self.assertAlmostEqual(
SokalMichener(normalizer='log').sim('synonym', 'antonym'),
1.2385752469545532,
)
self.assertAlmostEqual(
SokalMichener(normalizer='exp', alphabet=0).sim(
'synonym', 'antonym'
),
3.221246147982545e18,
)
self.assertAlmostEqual(
SokalMichener(normalizer='laplace').sim('synonym', 'antonym'),
0.98856416772554,
)
self.assertAlmostEqual(
SokalMichener(normalizer='inverse').sim('synonym', 'antonym'),
197.95790155440417,
)
self.assertAlmostEqual(
SokalMichener(normalizer='complement').sim('synonym', 'antonym'),
1.0204081632653061,
)
self.assertAlmostEqual(
SokalMichener(normalizer='base case').sim('synonym', 'antonym'),
0.9897959183673469,
)
self.assertAlmostEqual(
SokalMichener().sim('synonym', 'antonym'), 0.9897959183673469
)
sm = SokalMichener()
sm._tokenize('synonym', 'antonym') # noqa: SF01
self.assertEqual(
sm._get_tokens(), # noqa: SF01
(
Counter(
{
'$s': 1,
'sy': 1,
'yn': 1,
'no': 1,
'on': 1,
'ny': 1,
'ym': 1,
'm#': 1,
}
),
Counter(
{
'$a': 1,
'an': 1,
'nt': 1,
'to': 1,
'on': 1,
'ny': 1,
'ym': 1,
'm#': 1,
}
),
),
)
self.assertEqual(sm._src_card(), 8) # noqa: SF01
self.assertEqual(sm._tar_card(), 8) # noqa: SF01
self.assertEqual(
sm._symmetric_difference(), # noqa: SF01
Counter(
{
'$s': 1,
'sy': 1,
'yn': 1,
'no': 1,
'$a': 1,
'an': 1,
'nt': 1,
'to': 1,
}
),
)
self.assertEqual(sm._symmetric_difference_card(), 8) # noqa: SF01
self.assertEqual(sm._total_complement_card(), 772) # noqa: SF01
self.assertEqual(sm._population_card(), 788) # noqa: SF01
self.assertEqual(
sm._union(), # noqa: SF01
Counter(
{
'$s': 1,
'sy': 1,
'yn': 1,
'no': 1,
'on': 1,
'ny': 1,
'ym': 1,
'm#': 1,
'$a': 1,
'an': 1,
'nt': 1,
'to': 1,
}
),
)
self.assertEqual(sm._union_card(), 12) # noqa: SF01
self.assertEqual(
sm._difference(), # noqa: SF01
Counter(
{
'$s': 1,
'sy': 1,
'yn': 1,
'no': 1,
'on': 0,
'ny': 0,
'ym': 0,
'm#': 0,
'$a': -1,
'an': -1,
'nt': -1,
'to': -1,
}
),
)
self.assertEqual(
sm._intersection(), # noqa: SF01
Counter({'on': 1, 'ny': 1, 'ym': 1, 'm#': 1}),
)
self.assertEqual(
sm._get_confusion_table(), # noqa: SF01
ConfusionTable(tp=4, tn=772, fp=4, fn=4),
)
sm = SokalMichener(
alphabet=Counter({'C': 20, 'G': 20, 'A': 20, 'T': 20}), qval=1
)
sm._tokenize('ATCAACGAGT', 'AACGATTAG') # noqa: SF01
self.assertEqual(sm._total_complement_card(), 61) # noqa: SF01
self.assertAlmostEqual(
self.cmp_j_linkage.sim('abandonned', 'abandoned'),
0.9090909090909091,
)
self.assertAlmostEqual(
self.cmp_j_linkage.sim('abundacies', 'abundances'),
0.6923076923076923,
)
# Some additional constructors needed to complete test coverage
self.assertAlmostEqual(
Jaccard(alphabet=None, qval=range(2, 4)).sim('abc', 'abcd'),
0.42857142857142855,
)
self.assertAlmostEqual(
AverageLinkage(qval=range(2, 4)).sim('abc', 'abcd'),
0.22558922558922556,
)
self.assertAlmostEqual(
Jaccard(alphabet='abcdefghijklmnop', qval=range(2, 4)).sim(
'abc', 'abcd'
),
0.42857142857142855,
)
self.assertAlmostEqual(
Jaccard(
alphabet='abcdefghijklmnop', tokenizer=WhitespaceTokenizer()
).sim('abc', 'abcd'),
0.0,
)
self.assertAlmostEqual(
Jaccard(alphabet=list('abcdefghijklmnop')).sim('abc', 'abcd'), 0.5
)
self.assertAlmostEqual(
Jaccard(tokenizer=CharacterTokenizer()).sim('abc', 'abcd'), 0.75
)
cmp_j_soft = Jaccard(intersection_type='soft')
self.assertEqual(cmp_j_soft._src_card(), 0) # noqa: SF01
self.assertEqual(cmp_j_soft._tar_card(), 0) # noqa: SF01
self.assertEqual(cmp_j_soft._src_only(), Counter()) # noqa: SF01
self.assertEqual(cmp_j_soft._tar_only(), Counter()) # noqa: SF01
self.assertEqual(cmp_j_soft._total(), Counter()) # noqa: SF01
self.assertEqual(cmp_j_soft._union(), Counter()) # noqa: SF01
self.assertEqual(cmp_j_soft._difference(), Counter()) # noqa: SF01
cmp_j_soft.sim('abcd', 'abcde')
self.assertEqual(cmp_j_soft._src_card(), 5) # noqa: SF01
self.assertEqual(cmp_j_soft._tar_card(), 6) # noqa: SF01
self.assertEqual(
cmp_j_soft._src_only(), Counter({'#': 0.5}) # noqa: SF01
)
self.assertEqual(
cmp_j_soft._tar_only(), Counter({'e#': 1, 'e': 0.5}) # noqa: SF01
)
self.assertEqual(
cmp_j_soft._total(), # noqa: SF01
Counter(
{
'e#': 1,
'e': 0.5,
'#': 0.5,
'$a': 2,
'ab': 2,
'bc': 2,
'cd': 2,
'd': 1.0,
}
),
)
self.assertEqual(
cmp_j_soft._union(), # noqa: SF01
Counter(
{
'e#': 1,
'e': 0.5,
'#': 0.5,
'$a': 1,
'ab': 1,
'bc': 1,
'cd': 1,
'd': 0.5,
}
),
)
self.assertEqual(
cmp_j_soft._difference(), # noqa: SF01
Counter({'#': 0.5, 'e#': -1, 'e': -0.5}),
)
|
chrislit/abydos
|
[
154,
26,
154,
63,
1398235847
] |
def should_show_debug_toolbar(request): # lint-amnesty, pylint: disable=missing-function-docstring
# We always want the toolbar on devstack unless running tests from another Docker container
hostname = request.get_host()
if hostname.startswith('edx.devstack.lms:') or hostname.startswith('lms.devstack.edx:'):
return False
return True
|
eduNEXT/edx-platform
|
[
5,
3,
5,
6,
1390926698
] |
def main(argv):
del argv # Unused.
if FLAGS.use_tpu:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
if FLAGS.mode in ('train',
'train_and_eval') and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify'
'--validation_file_pattern for evaluation.')
# Parse hparams
hparams = retinanet_segmentation_model.default_hparams()
hparams.parse(FLAGS.hparams)
params = dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
use_tpu=FLAGS.use_tpu,
resnet_checkpoint=FLAGS.resnet_checkpoint,
mode=FLAGS.mode,
)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master='',
model_dir=FLAGS.model_dir,
keep_checkpoint_max=3,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=contrib_tpu.TPUConfig(
FLAGS.iterations_per_loop,
FLAGS.num_shards,
per_host_input_for_training=(
contrib_tpu.InputPipelineConfig.PER_HOST_V2)))
model_fn = retinanet_segmentation_model.segmentation_model_fn
# TPU Estimator
eval_params = dict(
params,
use_tpu=FLAGS.use_tpu,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
)
if FLAGS.mode == 'train':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
max_steps=int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size),
)
if FLAGS.eval_after_training:
# Run evaluation on CPU after training finishes.
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
elif FLAGS.mode == 'eval':
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
def terminate_eval():
tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
FLAGS.eval_timeout)
return True
# Run evaluation when there's a new checkpoint
for ckpt in contrib_training.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info('Starting to evaluate.')
try:
# Note that if the eval_samples size is not fully divided by the
# eval_batch_size. The remainder will be dropped and result in
# differet evaluation performance than validating on the full set.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
total_step = int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size)
if current_step >= total_step:
tf.logging.info('Evaluation finished after training step %d' %
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info('Checkpoint %s no longer exists, skipping checkpoint' %
ckpt)
elif FLAGS.mode == 'train_and_eval':
train_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
eval_estimator = contrib_tpu.TPUEstimator(
model_fn=retinanet_segmentation_model.segmentation_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
for cycle in range(0, FLAGS.num_epochs):
tf.logging.info('Starting training cycle, epoch: %d.' % cycle)
train_estimator.train(
input_fn=dataloader.SegmentationInputReader(
FLAGS.training_file_pattern, is_training=True),
steps=int(FLAGS.num_examples_per_epoch / FLAGS.train_batch_size))
tf.logging.info('Starting evaluation cycle, epoch: {:d}.'.format(
cycle + 1))
# Run evaluation after training finishes.
eval_results = eval_estimator.evaluate(
input_fn=dataloader.SegmentationInputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples//FLAGS.eval_batch_size)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Mode not found.')
|
tensorflow/tpu
|
[
5035,
1773,
5035,
290,
1499817279
] |
def __init__(self, syntax, value=None, subtrees=None):
"""Initializer.
Args:
syntax: string representation of syntax
value: string representation of actual value
subtrees: list of tuple(edge_type, subtree nodes or single node)
"""
self.syntax = syntax
self.value = value
self.children = []
if subtrees is not None:
for e_type, children in subtrees:
if isinstance(children, list):
for c in children:
add_edge(parent_node=self, child_node=c, edge_type=e_type)
else:
add_edge(parent_node=self, child_node=children, edge_type=e_type)
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def add_child(self, edge_type, child_node):
self.children.append((edge_type, child_node))
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def __str__(self):
st = '(' + self.get_name()
for _, c in self.children:
st += c.__str__()
st += ')'
return st
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def pprint(self, tab_cnt=0):
if self.syntax == 'RegexTok' or self.syntax == 'ConstTok':
st = ' ' * tab_cnt + self.syntax + '('
_, p1 = self.children[0]
_, p2 = self.children[1]
_, direct = self.children[2]
name = p1.value
st += '%s, %d, %s)' % (name, p2.value, direct.value)
print(st)
return
st = ' ' * tab_cnt + self.get_name()
print(st)
for _, c in self.children:
c.pprint(tab_cnt=tab_cnt + 1)
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def add_edge(parent_node, child_node, edge_type):
parent_node.add_child(edge_type, child_node)
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def __init__(self, tree_root, node_types=RFILL_NODE_TYPES, edge_types=RFILL_EDGE_TYPES, add_rev_edge=True):
"""Initializer.
Args:
tree_root: ProgNode type; the root of tree representation
node_types: dict of nodetype to index
edge_types: dict of edgetype to index
add_rev_edge: whether add reversed edge
"""
self.tree_root = tree_root
self.add_rev_edge = add_rev_edge
self.node_types = node_types
self.edge_types = edge_types
# list of tree nodes
self.node_list = []
# node feature index
self.node_feats = []
# list of (from_idx, to_idx, etype_int) tuples
self.edge_list = []
self.last_terminal = None # used for linking terminals
self.build_graph(self.tree_root)
self.num_nodes = len(self.node_list)
self.num_edges = len(self.edge_list)
# unzipped version of edge list
# self.from_list, self.to_list, self.edge_feats = \
# [np.array(x, dtype=np.int32) for x in zip(*self.edge_list)]
self.node_feats = np.array(self.node_feats, dtype=np.int32)
self.subexpr_ids = []
for _, c in self.tree_root.children:
self.subexpr_ids.append(c.index)
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def add_bidir_edge(self, from_idx, to_idx, etype_str):
assert etype_str in self.edge_types
self.edge_list.append((from_idx, to_idx, self.edge_types[etype_str]))
if self.add_rev_edge:
# add reversed edge
rev_etype_str = 'rev-' + etype_str
assert rev_etype_str in self.edge_types
self.edge_list.append((to_idx, from_idx, self.edge_types[rev_etype_str]))
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def init_env_globals():
"""Set module level values from environment variables.
Encapsulated here to enable better testing.
"""
global C7N_SKIP_EVTERR, C7N_DEBUG_EVENT, C7N_CATCH_ERR
C7N_SKIP_EVTERR = os.environ.get(
'C7N_SKIP_ERR_EVENT', 'yes') == 'yes' and True or False
C7N_DEBUG_EVENT = os.environ.get(
'C7N_DEBUG_EVENT', 'yes') == 'yes' and True or False
C7N_CATCH_ERR = os.environ.get(
'C7N_CATCH_ERR', 'no').strip().lower() == 'yes' and True or False
|
kapilt/cloud-custodian
|
[
2,
2,
2,
8,
1461493242
] |
def setUp(self):
super(UtilTest, self).setUp()
self._output_dir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def test_save_load_variable(self):
file_path = os.path.join(self._output_dir, 'test_output_data.pkl')
# Case 1: Nested dictionary.
data = {'zz': 1, 'b': 234, 123: 'asdfa', 'dict': {'a': 123, 't': 123}}
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 2: 2-level nested dictionary.
data = collections.defaultdict(
lambda: collections.defaultdict(list))
data['first']['A'] = [1, 2, 3]
data['first']['B'] = [1, 2, 3]
data['second']['B'] = [1, 2, 3]
data['second']['C'] = [1, 2, 3]
data['third']['C'] = [1, 2, 3]
data['third']['D'] = [1, 2, 3]
data['path'] = 'asdfas/asdf/asdfasdf/'
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 3: Large array. If the size is too large, the test will timeout.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10000
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertListEqual(data, actual_variable)
self.assertIsInstance(actual_variable, list)
# Case 4: numpy array.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10
data = np.array(data)
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
np.testing.assert_array_equal(data, actual_variable)
self.assertIsInstance(actual_variable, np.ndarray)
# Case 5: A list of tuples.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
# Saving zip variable does not affect the iterative variable.
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
# python2 treats `actual_variable` as a list, however, python3 treats it as
# an iterative object.
self.assertListEqual(list(actual_variable), list(data))
# Case 6: In python2, the itertools.tee cannot be saved by cPickle. However,
# in python3, it can be saved.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
data_tee, _ = itertools.tee(data)
python_version = sys.version_info[0]
try:
file_util.save_variable(file_path, data_tee)
pickle_save_correctly = True
except cPickle.PicklingError:
pickle_save_correctly = False
self.assertTrue((pickle_save_correctly and python_version == 3) or
(not pickle_save_correctly and python_version == 2))
|
google-research/google-research
|
[
27788,
6881,
27788,
944,
1538678568
] |
def _get_snapshot(address: str):
response = requests.get(f"{address}/api/snapshot")
response.raise_for_status()
data = response.json()
schema_path = os.path.join(
os.path.dirname(dashboard.__file__), "modules/snapshot/snapshot_schema.json"
)
pprint.pprint(data)
jsonschema.validate(instance=data, schema=json.load(open(schema_path)))
return data
|
ray-project/ray
|
[
24488,
4264,
24488,
2914,
1477424310
] |
def wait_for_job_to_succeed():
data = _get_snapshot(address)
legacy_job_succeeded = False
job_succeeded = False
# Test legacy job snapshot (one driver per job).
for job_entry in data["data"]["snapshot"]["jobs"].values():
if job_entry["status"] is not None:
assert job_entry["config"]["metadata"]["jobSubmissionId"] == job_id
assert job_entry["status"] in {"PENDING", "RUNNING", "SUCCEEDED"}
assert job_entry["statusMessage"] is not None
legacy_job_succeeded = job_entry["status"] == "SUCCEEDED"
# Test new jobs snapshot (0 to N drivers per job).
for job_submission_id, entry in data["data"]["snapshot"][
"jobSubmission"
].items():
if entry["status"] is not None:
assert entry["status"] in {"PENDING", "RUNNING", "SUCCEEDED"}
assert entry["message"] is not None
# TODO(architkulkarni): Disable automatic camelcase.
assert entry["runtimeEnv"] == {"envVars": {"RAYTest123": "123"}}
assert entry["metadata"] == {"rayTest456": "456"}
assert entry["errorType"] is None
assert abs(entry["startTime"] - start_time_s) <= 2
if entry["status"] == "SUCCEEDED":
job_succeeded = True
assert entry["endTime"] >= entry["startTime"] + job_sleep_time_s
return legacy_job_succeeded and job_succeeded
|
ray-project/ray
|
[
24488,
4264,
24488,
2914,
1477424310
] |
def test_failed_job_status(
ray_start_with_dashboard, disable_aiohttp_cache, enable_test_module
|
ray-project/ray
|
[
24488,
4264,
24488,
2914,
1477424310
] |
def wait_for_job_to_fail():
data = _get_snapshot(address)
legacy_job_failed = False
job_failed = False
# Test legacy job snapshot (one driver per job).
for job_entry in data["data"]["snapshot"]["jobs"].values():
if job_entry["status"] is not None:
assert job_entry["config"]["metadata"]["jobSubmissionId"] == job_id
assert job_entry["status"] in {"PENDING", "RUNNING", "FAILED"}
assert job_entry["statusMessage"] is not None
legacy_job_failed = job_entry["status"] == "FAILED"
# Test new jobs snapshot (0 to N drivers per job).
for job_submission_id, entry in data["data"]["snapshot"][
"jobSubmission"
].items():
if entry["status"] is not None:
assert entry["status"] in {"PENDING", "RUNNING", "FAILED"}
assert entry["message"] is not None
# TODO(architkulkarni): Disable automatic camelcase.
assert entry["runtimeEnv"] == {"envVars": {"RAYTest456": "456"}}
assert entry["metadata"] == {"rayTest789": "789"}
assert entry["errorType"] is None
assert abs(entry["startTime"] - start_time_s) <= 2
if entry["status"] == "FAILED":
job_failed = True
assert entry["endTime"] >= entry["startTime"] + job_sleep_time_s
return legacy_job_failed and job_failed
|
ray-project/ray
|
[
24488,
4264,
24488,
2914,
1477424310
] |
def __virtual__():
"""
Load only on Mac OS
"""
if not salt.utils.platform.is_darwin():
return (
False,
"The mac_utils utility could not be loaded: "
"utility only works on MacOS systems.",
)
return __virtualname__
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def _check_launchctl_stderr(ret):
"""
helper class to check the launchctl stderr.
launchctl does not always return bad exit code
if there is a failure
"""
err = ret["stderr"].lower()
if "service is disabled" in err:
return True
return False
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def execute_return_result(cmd):
"""
Executes the passed command. Returns the standard out if successful
:param str cmd: The command to run
:return: The standard out of the command if successful, otherwise returns
an error
:rtype: str
:raises: Error if command fails or is not supported
"""
ret = _run_all(cmd)
if ret["retcode"] != 0 or "not supported" in ret["stdout"].lower():
msg = "Command Failed: {}\n".format(cmd)
msg += "Return Code: {}\n".format(ret["retcode"])
msg += "Output: {}\n".format(ret["stdout"])
msg += "Error: {}\n".format(ret["stderr"])
raise CommandExecutionError(msg)
return ret["stdout"]
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def validate_enabled(enabled):
"""
Helper function to validate the enabled parameter. Boolean values are
converted to "on" and "off". String values are checked to make sure they are
either "on" or "off"/"yes" or "no". Integer ``0`` will return "off". All
other integers will return "on"
:param enabled: Enabled can be boolean True or False, Integers, or string
values "on" and "off"/"yes" and "no".
:type: str, int, bool
:return: "on" or "off" or errors
:rtype: str
"""
if isinstance(enabled, str):
if enabled.lower() not in ["on", "off", "yes", "no"]:
msg = (
"\nMac Power: Invalid String Value for Enabled.\n"
"String values must be 'on' or 'off'/'yes' or 'no'.\n"
"Passed: {}".format(enabled)
)
raise SaltInvocationError(msg)
return "on" if enabled.lower() in ["on", "yes"] else "off"
return "on" if bool(enabled) else "off"
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def launchctl(sub_cmd, *args, **kwargs):
"""
Run a launchctl command and raise an error if it fails
Args: additional args are passed to launchctl
sub_cmd (str): Sub command supplied to launchctl
Kwargs: passed to ``cmd.run_all``
return_stdout (bool): A keyword argument. If true return the stdout of
the launchctl command
Returns:
bool: ``True`` if successful
str: The stdout of the launchctl command if requested
Raises:
CommandExecutionError: If command fails
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.launchctl('debug', 'org.cups.cupsd')
"""
# Get return type
return_stdout = kwargs.pop("return_stdout", False)
# Construct command
cmd = ["launchctl", sub_cmd]
cmd.extend(args)
# fix for https://github.com/saltstack/salt/issues/57436
if sub_cmd == "bootout":
kwargs["success_retcodes"] = [
36,
]
# Run command
kwargs["python_shell"] = False
kwargs = salt.utils.args.clean_kwargs(**kwargs)
ret = __salt__["cmd.run_all"](cmd, **kwargs)
error = _check_launchctl_stderr(ret)
# Raise an error or return successful result
if ret["retcode"] or error:
out = "Failed to {} service:\n".format(sub_cmd)
out += "stdout: {}\n".format(ret["stdout"])
out += "stderr: {}\n".format(ret["stderr"])
out += "retcode: {}".format(ret["retcode"])
raise CommandExecutionError(out)
else:
return ret["stdout"] if return_stdout else True
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def _available_services(refresh=False):
"""
This is a helper function for getting the available macOS services.
The strategy is to look through the known system locations for
launchd plist files, parse them, and use their information for
populating the list of services. Services can run without a plist
file present, but normally services which have an automated startup
will have a plist file, so this is a minor compromise.
"""
if "available_services" in __context__ and not refresh:
log.debug("Found context for available services.")
__context__["using_cached_services"] = True
return __context__["available_services"]
launchd_paths = {
"/Library/LaunchAgents",
"/Library/LaunchDaemons",
"/System/Library/LaunchAgents",
"/System/Library/LaunchDaemons",
}
agent_path = "/Users/{}/Library/LaunchAgents"
launchd_paths.update(
{
agent_path.format(user)
for user in os.listdir("/Users/")
if os.path.isdir(agent_path.format(user))
}
)
result = {}
for launch_dir in launchd_paths:
for root, dirs, files in salt.utils.path.os_walk(launch_dir):
for file_name in files:
data = _read_plist_file(root, file_name)
if data:
result[data["plist"]["Label"].lower()] = data
# put this in __context__ as this is a time consuming function.
# a fix for this issue. https://github.com/saltstack/salt/issues/48414
__context__["available_services"] = result
# this is a fresh gathering of services, set cached to false
__context__["using_cached_services"] = False
return result
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def console_user(username=False):
"""
Gets the UID or Username of the current console user.
:return: The uid or username of the console user.
:param bool username: Whether to return the username of the console
user instead of the UID. Defaults to False
:rtype: Interger of the UID, or a string of the username.
Raises:
CommandExecutionError: If we fail to get the UID.
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.console_user()
"""
try:
# returns the 'st_uid' stat from the /dev/console file.
uid = os.stat("/dev/console")[4]
except (OSError, IndexError):
# we should never get here but raise an error if so
raise CommandExecutionError("Failed to get a UID for the console user.")
if username:
return pwd.getpwuid(uid)[0]
return uid
|
saltstack/salt
|
[
13089,
5388,
13089,
3074,
1298233016
] |
def unknown_operation(num_qubits: int) -> 'SymbolInfo':
"""Generates a SymbolInfo object for an unknown operation.
Args:
num_qubits: the number of qubits in the operation
"""
symbol_info = SymbolInfo([], [])
for _ in range(num_qubits):
symbol_info.colors.append('gray')
symbol_info.labels.append('?')
return symbol_info
|
quantumlib/Cirq
|
[
3678,
836,
3678,
314,
1513294909
] |
def __call__(self, operation: cirq.Operation) -> Optional[SymbolInfo]:
return self.resolve(operation)
|
quantumlib/Cirq
|
[
3678,
836,
3678,
314,
1513294909
] |
def resolve(self, operation: cirq.Operation) -> Optional[SymbolInfo]:
"""Converts cirq.Operation objects into SymbolInfo objects for serialization."""
|
quantumlib/Cirq
|
[
3678,
836,
3678,
314,
1513294909
] |
def resolve(self, operation: cirq.Operation) -> Optional[SymbolInfo]:
"""Checks for the _circuit_diagram_info attribute of the operation,
and if it exists, build the symbol information from it. Otherwise,
builds symbol info for an unknown operation.
Args:
operation: the cirq.Operation object to resolve
"""
try:
info = cirq.circuit_diagram_info(operation)
except TypeError:
return SymbolInfo.unknown_operation(cirq.num_qubits(operation))
wire_symbols = info.wire_symbols
symbol_exponent = info._wire_symbols_including_formatted_exponent(
CircuitDiagramInfoArgs.UNINFORMED_DEFAULT
)
symbol_info = SymbolInfo(list(symbol_exponent), [])
for symbol in wire_symbols:
symbol_info.colors.append(DefaultResolver._SYMBOL_COLORS.get(symbol, 'gray'))
return symbol_info
|
quantumlib/Cirq
|
[
3678,
836,
3678,
314,
1513294909
] |
def resolve_operation(operation: cirq.Operation, resolvers: Iterable[SymbolResolver]) -> SymbolInfo:
"""Builds a SymbolInfo object based off of a designated operation
and list of resolvers. The latest resolver takes precendent.
Args:
operation: the cirq.Operation object to resolve
resolvers: a list of SymbolResolvers which provides instructions
on how to build SymbolInfo objects.
Raises:
ValueError: if the operation cannot be resolved into a symbol.
"""
symbol_info = None
for resolver in resolvers:
info = resolver(operation)
if info is not None:
symbol_info = info
if symbol_info is None:
raise ValueError(f'Cannot resolve operation: {operation}')
return symbol_info
|
quantumlib/Cirq
|
[
3678,
836,
3678,
314,
1513294909
] |
def __init__(self, wire_symbols, location_info, color_info, moment):
"""Gathers symbol information from an operation and builds an
object to represent it in 3D.
Args:
wire_symbols: a list of symbols taken from circuit_diagram_info()
that will be used to represent the operation in the 3D circuit.
location_info: A list of coordinates for each wire_symbol. The
index of the coordinate tuple in the location_info list must
correspond with the index of the symbol in the wire_symbols list.
color_info: a list representing the desired color of the symbol(s).
These will also correspond to index of the symbol in the
wire_symbols list.
moment: the moment where the symbol should be.
"""
self.wire_symbols = wire_symbols
self.location_info = location_info
self.color_info = color_info
self.moment = moment
|
quantumlib/Cirq
|
[
3678,
836,
3678,
314,
1513294909
] |
def process(self, fp, artifact):
try:
phase_config = json.load(fp)
except ValueError:
uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('Failed to parse json; (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self.report_malformed()
else:
_, implementation = JobPlan.get_build_step_for_job(job_id=self.step.job_id)
try:
implementation.expand_jobs(self.step, phase_config)
except ArtifactParseError:
uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('malformed %s artifact (step=%s, build=%s)', self.FILENAMES[0],
self.step.id.hex, uri, exc_info=True)
self.report_malformed()
except Exception:
uri = build_web_uri('/find_build/{0}/'.format(self.step.job.build_id.hex))
self.logger.warning('expand_jobs failed (step=%s, build=%s)', self.step.id.hex, uri, exc_info=True)
self.step.result = Result.infra_failed
db.session.add(self.step)
db.session.commit()
|
dropbox/changes
|
[
762,
65,
762,
11,
1379727686
] |
def protest (response, message):
response.send_response(200)
response.send_header('Content-type','text/plain')
response.end_headers()
response.wfile.write(message) # Should probably be JSON
|
Comcast/rulio
|
[
339,
58,
339,
28,
1447456380
] |
def do_GET(self):
protest(self, "You should POST with json.\n")
return
|
Comcast/rulio
|
[
339,
58,
339,
28,
1447456380
] |
def modifyFiles(self):
pass
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(self):
if self.withUse:
if Use.readline:
pass
if self.withBinary:
self.Run('''
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def modifyFile(self, path):
return 'sed -i s/^5/1/g %(destdir)s'+path
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(self):
TestRecipe1.setup(self)
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def modifyFile(self, path):
return 'sed -i s/^6/2/g %(destdir)s'+path
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(self):
TestRecipe1.setup(self)
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(self):
TestRecipe1.setup(self)
self.Config(exceptions = "/etc/.*")
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(r):
TestRecipe1.setup(r)
r.Remove(r.changed)
r.Remove(r.unchanged)
r.Remove(r.changedconfig)
r.Remove(r.unchangedconfig)
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(r):
r.Create('/foo', contents=r.fileText)
r.Transient('/foo')
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(r):
r.Create('/foo', contents=r.fileText)
r.Transient('/foo')
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
def setup(r):
#don't create foo
r.Create('/foo2', contents=r.fileText)
r.Transient('/foo2')
|
sassoftware/conary
|
[
47,
9,
47,
4,
1396904066
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.