repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
pedrotgn/pyactor | pyactor/context.py | Host.load_transport | def load_transport(self, url):
'''
For remote communication. Sets the communication dispatcher of the host
at the address and port specified.
The scheme must be http if using a XMLRPC dispatcher.
amqp for RabbitMQ communications.
This methos is internal. Automatically called when creating the host.
:param str. url: URL where to bind the host. Must be provided in
the tipical form: 'scheme://address:port/hierarchical_path'
'''
aurl = urlparse(url)
addrl = aurl.netloc.split(':')
self.addr = addrl[0], addrl[1]
self.transport = aurl.scheme
self.host_url = aurl
if aurl.scheme == 'http':
self.launch_actor('http', rpcactor.RPCDispatcher(url, self, 'rpc'))
elif aurl.scheme == 'amqp':
self.launch_actor('amqp', rpcactor.RPCDispatcher(url, self,
'rabbit')) | python | def load_transport(self, url):
'''
For remote communication. Sets the communication dispatcher of the host
at the address and port specified.
The scheme must be http if using a XMLRPC dispatcher.
amqp for RabbitMQ communications.
This methos is internal. Automatically called when creating the host.
:param str. url: URL where to bind the host. Must be provided in
the tipical form: 'scheme://address:port/hierarchical_path'
'''
aurl = urlparse(url)
addrl = aurl.netloc.split(':')
self.addr = addrl[0], addrl[1]
self.transport = aurl.scheme
self.host_url = aurl
if aurl.scheme == 'http':
self.launch_actor('http', rpcactor.RPCDispatcher(url, self, 'rpc'))
elif aurl.scheme == 'amqp':
self.launch_actor('amqp', rpcactor.RPCDispatcher(url, self,
'rabbit')) | [
"def",
"load_transport",
"(",
"self",
",",
"url",
")",
":",
"aurl",
"=",
"urlparse",
"(",
"url",
")",
"addrl",
"=",
"aurl",
".",
"netloc",
".",
"split",
"(",
"':'",
")",
"self",
".",
"addr",
"=",
"addrl",
"[",
"0",
"]",
",",
"addrl",
"[",
"1",
"]",
"self",
".",
"transport",
"=",
"aurl",
".",
"scheme",
"self",
".",
"host_url",
"=",
"aurl",
"if",
"aurl",
".",
"scheme",
"==",
"'http'",
":",
"self",
".",
"launch_actor",
"(",
"'http'",
",",
"rpcactor",
".",
"RPCDispatcher",
"(",
"url",
",",
"self",
",",
"'rpc'",
")",
")",
"elif",
"aurl",
".",
"scheme",
"==",
"'amqp'",
":",
"self",
".",
"launch_actor",
"(",
"'amqp'",
",",
"rpcactor",
".",
"RPCDispatcher",
"(",
"url",
",",
"self",
",",
"'rabbit'",
")",
")"
] | For remote communication. Sets the communication dispatcher of the host
at the address and port specified.
The scheme must be http if using a XMLRPC dispatcher.
amqp for RabbitMQ communications.
This methos is internal. Automatically called when creating the host.
:param str. url: URL where to bind the host. Must be provided in
the tipical form: 'scheme://address:port/hierarchical_path' | [
"For",
"remote",
"communication",
".",
"Sets",
"the",
"communication",
"dispatcher",
"of",
"the",
"host",
"at",
"the",
"address",
"and",
"port",
"specified",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L152-L176 | train |
pedrotgn/pyactor | pyactor/context.py | Host.has_actor | def has_actor(self, aid):
'''
Checks if the given id is used in the host by some actor.
:param str. aid: identifier of the actor to check.
:return: True if the id is used within the host.
'''
url = '%s://%s/%s' % (self.transport, self.host_url.netloc, aid)
return url in self.actors.keys() | python | def has_actor(self, aid):
'''
Checks if the given id is used in the host by some actor.
:param str. aid: identifier of the actor to check.
:return: True if the id is used within the host.
'''
url = '%s://%s/%s' % (self.transport, self.host_url.netloc, aid)
return url in self.actors.keys() | [
"def",
"has_actor",
"(",
"self",
",",
"aid",
")",
":",
"url",
"=",
"'%s://%s/%s'",
"%",
"(",
"self",
".",
"transport",
",",
"self",
".",
"host_url",
".",
"netloc",
",",
"aid",
")",
"return",
"url",
"in",
"self",
".",
"actors",
".",
"keys",
"(",
")"
] | Checks if the given id is used in the host by some actor.
:param str. aid: identifier of the actor to check.
:return: True if the id is used within the host. | [
"Checks",
"if",
"the",
"given",
"id",
"is",
"used",
"in",
"the",
"host",
"by",
"some",
"actor",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L235-L243 | train |
pedrotgn/pyactor | pyactor/context.py | Host.stop_actor | def stop_actor(self, aid):
'''
This method removes one actor from the Host, stoping it and deleting
all its references.
:param str. aid: identifier of the actor you want to stop.
'''
url = '%s://%s/%s' % (self.transport, self.host_url.netloc, aid)
if url != self.url:
actor = self.actors[url]
Proxy(actor).stop()
actor.thread.join()
del self.actors[url]
del self.threads[actor.thread] | python | def stop_actor(self, aid):
'''
This method removes one actor from the Host, stoping it and deleting
all its references.
:param str. aid: identifier of the actor you want to stop.
'''
url = '%s://%s/%s' % (self.transport, self.host_url.netloc, aid)
if url != self.url:
actor = self.actors[url]
Proxy(actor).stop()
actor.thread.join()
del self.actors[url]
del self.threads[actor.thread] | [
"def",
"stop_actor",
"(",
"self",
",",
"aid",
")",
":",
"url",
"=",
"'%s://%s/%s'",
"%",
"(",
"self",
".",
"transport",
",",
"self",
".",
"host_url",
".",
"netloc",
",",
"aid",
")",
"if",
"url",
"!=",
"self",
".",
"url",
":",
"actor",
"=",
"self",
".",
"actors",
"[",
"url",
"]",
"Proxy",
"(",
"actor",
")",
".",
"stop",
"(",
")",
"actor",
".",
"thread",
".",
"join",
"(",
")",
"del",
"self",
".",
"actors",
"[",
"url",
"]",
"del",
"self",
".",
"threads",
"[",
"actor",
".",
"thread",
"]"
] | This method removes one actor from the Host, stoping it and deleting
all its references.
:param str. aid: identifier of the actor you want to stop. | [
"This",
"method",
"removes",
"one",
"actor",
"from",
"the",
"Host",
"stoping",
"it",
"and",
"deleting",
"all",
"its",
"references",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L304-L317 | train |
pedrotgn/pyactor | pyactor/context.py | Host.lookup_url | def lookup_url(self, url, klass, module=None):
'''
Gets a proxy reference to the actor indicated by the URL in the
parameters. It can be a local reference or a remote direction to
another host.
This method can be called remotely synchronously.
:param srt. url: address that identifies an actor.
:param class klass: the class of the actor.
:param srt. module: if the actor class is not in the calling module,
you need to specify the module where it is here. Also, the *klass*
parameter change to be a string.
:return: :class:`~.Proxy` of the actor requested.
:raises: :class:`NotFoundError`, if the URL specified do not
correspond to any actor in the host.
:raises: :class:`HostDownError` if the host is down.
:raises: :class:`HostError` if there is an error looking for
the actor in another server.
'''
if not self.alive:
raise HostDownError()
aurl = urlparse(url)
if self.is_local(aurl):
if url not in self.actors.keys():
raise NotFoundError(url)
else:
return Proxy(self.actors[url])
else:
try:
dispatcher = self.actors[aurl.scheme]
if module is not None:
try:
module_ = __import__(module, globals(), locals(),
[klass], -1)
klass_ = getattr(module_, klass)
except Exception, e:
raise HostError("At lookup_url: " +
"Import failed for module " + module +
", class " + klass +
". Check this values for the lookup." +
" ERROR: " + str(e))
elif isinstance(klass, (types.TypeType, types.ClassType)):
klass_ = klass
else:
raise HostError("The class specified to look up is" +
" not a class.")
remote_actor = actor.ActorRef(url, klass_, dispatcher.channel)
return Proxy(remote_actor)
except HostError:
raise
except Exception, e:
raise HostError("ERROR looking for the actor on another " +
"server. Hosts must " +
"be in http to work properly. " + str(e)) | python | def lookup_url(self, url, klass, module=None):
'''
Gets a proxy reference to the actor indicated by the URL in the
parameters. It can be a local reference or a remote direction to
another host.
This method can be called remotely synchronously.
:param srt. url: address that identifies an actor.
:param class klass: the class of the actor.
:param srt. module: if the actor class is not in the calling module,
you need to specify the module where it is here. Also, the *klass*
parameter change to be a string.
:return: :class:`~.Proxy` of the actor requested.
:raises: :class:`NotFoundError`, if the URL specified do not
correspond to any actor in the host.
:raises: :class:`HostDownError` if the host is down.
:raises: :class:`HostError` if there is an error looking for
the actor in another server.
'''
if not self.alive:
raise HostDownError()
aurl = urlparse(url)
if self.is_local(aurl):
if url not in self.actors.keys():
raise NotFoundError(url)
else:
return Proxy(self.actors[url])
else:
try:
dispatcher = self.actors[aurl.scheme]
if module is not None:
try:
module_ = __import__(module, globals(), locals(),
[klass], -1)
klass_ = getattr(module_, klass)
except Exception, e:
raise HostError("At lookup_url: " +
"Import failed for module " + module +
", class " + klass +
". Check this values for the lookup." +
" ERROR: " + str(e))
elif isinstance(klass, (types.TypeType, types.ClassType)):
klass_ = klass
else:
raise HostError("The class specified to look up is" +
" not a class.")
remote_actor = actor.ActorRef(url, klass_, dispatcher.channel)
return Proxy(remote_actor)
except HostError:
raise
except Exception, e:
raise HostError("ERROR looking for the actor on another " +
"server. Hosts must " +
"be in http to work properly. " + str(e)) | [
"def",
"lookup_url",
"(",
"self",
",",
"url",
",",
"klass",
",",
"module",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"alive",
":",
"raise",
"HostDownError",
"(",
")",
"aurl",
"=",
"urlparse",
"(",
"url",
")",
"if",
"self",
".",
"is_local",
"(",
"aurl",
")",
":",
"if",
"url",
"not",
"in",
"self",
".",
"actors",
".",
"keys",
"(",
")",
":",
"raise",
"NotFoundError",
"(",
"url",
")",
"else",
":",
"return",
"Proxy",
"(",
"self",
".",
"actors",
"[",
"url",
"]",
")",
"else",
":",
"try",
":",
"dispatcher",
"=",
"self",
".",
"actors",
"[",
"aurl",
".",
"scheme",
"]",
"if",
"module",
"is",
"not",
"None",
":",
"try",
":",
"module_",
"=",
"__import__",
"(",
"module",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"klass",
"]",
",",
"-",
"1",
")",
"klass_",
"=",
"getattr",
"(",
"module_",
",",
"klass",
")",
"except",
"Exception",
",",
"e",
":",
"raise",
"HostError",
"(",
"\"At lookup_url: \"",
"+",
"\"Import failed for module \"",
"+",
"module",
"+",
"\", class \"",
"+",
"klass",
"+",
"\". Check this values for the lookup.\"",
"+",
"\" ERROR: \"",
"+",
"str",
"(",
"e",
")",
")",
"elif",
"isinstance",
"(",
"klass",
",",
"(",
"types",
".",
"TypeType",
",",
"types",
".",
"ClassType",
")",
")",
":",
"klass_",
"=",
"klass",
"else",
":",
"raise",
"HostError",
"(",
"\"The class specified to look up is\"",
"+",
"\" not a class.\"",
")",
"remote_actor",
"=",
"actor",
".",
"ActorRef",
"(",
"url",
",",
"klass_",
",",
"dispatcher",
".",
"channel",
")",
"return",
"Proxy",
"(",
"remote_actor",
")",
"except",
"HostError",
":",
"raise",
"except",
"Exception",
",",
"e",
":",
"raise",
"HostError",
"(",
"\"ERROR looking for the actor on another \"",
"+",
"\"server. Hosts must \"",
"+",
"\"be in http to work properly. \"",
"+",
"str",
"(",
"e",
")",
")"
] | Gets a proxy reference to the actor indicated by the URL in the
parameters. It can be a local reference or a remote direction to
another host.
This method can be called remotely synchronously.
:param srt. url: address that identifies an actor.
:param class klass: the class of the actor.
:param srt. module: if the actor class is not in the calling module,
you need to specify the module where it is here. Also, the *klass*
parameter change to be a string.
:return: :class:`~.Proxy` of the actor requested.
:raises: :class:`NotFoundError`, if the URL specified do not
correspond to any actor in the host.
:raises: :class:`HostDownError` if the host is down.
:raises: :class:`HostError` if there is an error looking for
the actor in another server. | [
"Gets",
"a",
"proxy",
"reference",
"to",
"the",
"actor",
"indicated",
"by",
"the",
"URL",
"in",
"the",
"parameters",
".",
"It",
"can",
"be",
"a",
"local",
"reference",
"or",
"a",
"remote",
"direction",
"to",
"another",
"host",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L319-L373 | train |
pedrotgn/pyactor | pyactor/context.py | Host.dumps | def dumps(self, param):
'''
Checks the parameters generating new proxy instances to avoid
query concurrences from shared proxies and creating proxies for
actors from another host.
'''
if isinstance(param, Proxy):
module_name = param.actor.klass.__module__
filename = sys.modules[module_name].__file__
return ProxyRef(param.actor.url, param.actor.klass.__name__,
module_name)
elif isinstance(param, list):
return [self.dumps(elem) for elem in param]
elif isinstance(param, dict):
new_dict = param
for key in new_dict.keys():
new_dict[key] = self.dumps(new_dict[key])
return new_dict
elif isinstance(param, tuple):
return tuple([self.dumps(elem) for elem in param])
else:
return param | python | def dumps(self, param):
'''
Checks the parameters generating new proxy instances to avoid
query concurrences from shared proxies and creating proxies for
actors from another host.
'''
if isinstance(param, Proxy):
module_name = param.actor.klass.__module__
filename = sys.modules[module_name].__file__
return ProxyRef(param.actor.url, param.actor.klass.__name__,
module_name)
elif isinstance(param, list):
return [self.dumps(elem) for elem in param]
elif isinstance(param, dict):
new_dict = param
for key in new_dict.keys():
new_dict[key] = self.dumps(new_dict[key])
return new_dict
elif isinstance(param, tuple):
return tuple([self.dumps(elem) for elem in param])
else:
return param | [
"def",
"dumps",
"(",
"self",
",",
"param",
")",
":",
"if",
"isinstance",
"(",
"param",
",",
"Proxy",
")",
":",
"module_name",
"=",
"param",
".",
"actor",
".",
"klass",
".",
"__module__",
"filename",
"=",
"sys",
".",
"modules",
"[",
"module_name",
"]",
".",
"__file__",
"return",
"ProxyRef",
"(",
"param",
".",
"actor",
".",
"url",
",",
"param",
".",
"actor",
".",
"klass",
".",
"__name__",
",",
"module_name",
")",
"elif",
"isinstance",
"(",
"param",
",",
"list",
")",
":",
"return",
"[",
"self",
".",
"dumps",
"(",
"elem",
")",
"for",
"elem",
"in",
"param",
"]",
"elif",
"isinstance",
"(",
"param",
",",
"dict",
")",
":",
"new_dict",
"=",
"param",
"for",
"key",
"in",
"new_dict",
".",
"keys",
"(",
")",
":",
"new_dict",
"[",
"key",
"]",
"=",
"self",
".",
"dumps",
"(",
"new_dict",
"[",
"key",
"]",
")",
"return",
"new_dict",
"elif",
"isinstance",
"(",
"param",
",",
"tuple",
")",
":",
"return",
"tuple",
"(",
"[",
"self",
".",
"dumps",
"(",
"elem",
")",
"for",
"elem",
"in",
"param",
"]",
")",
"else",
":",
"return",
"param"
] | Checks the parameters generating new proxy instances to avoid
query concurrences from shared proxies and creating proxies for
actors from another host. | [
"Checks",
"the",
"parameters",
"generating",
"new",
"proxy",
"instances",
"to",
"avoid",
"query",
"concurrences",
"from",
"shared",
"proxies",
"and",
"creating",
"proxies",
"for",
"actors",
"from",
"another",
"host",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L419-L440 | train |
pedrotgn/pyactor | pyactor/context.py | Host.loads | def loads(self, param):
'''
Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host.
'''
if isinstance(param, ProxyRef):
try:
return self.lookup_url(param.url, param.klass, param.module)
except HostError:
print "Can't lookup for the actor received with the call. \
It does not exist or the url is unreachable.", param
raise HostError(param)
elif isinstance(param, list):
return [self.loads(elem) for elem in param]
elif isinstance(param, tuple):
return tuple([self.loads(elem) for elem in param])
elif isinstance(param, dict):
new_dict = param
for key in new_dict.keys():
new_dict[key] = self.loads(new_dict[key])
return new_dict
else:
return param | python | def loads(self, param):
'''
Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host.
'''
if isinstance(param, ProxyRef):
try:
return self.lookup_url(param.url, param.klass, param.module)
except HostError:
print "Can't lookup for the actor received with the call. \
It does not exist or the url is unreachable.", param
raise HostError(param)
elif isinstance(param, list):
return [self.loads(elem) for elem in param]
elif isinstance(param, tuple):
return tuple([self.loads(elem) for elem in param])
elif isinstance(param, dict):
new_dict = param
for key in new_dict.keys():
new_dict[key] = self.loads(new_dict[key])
return new_dict
else:
return param | [
"def",
"loads",
"(",
"self",
",",
"param",
")",
":",
"if",
"isinstance",
"(",
"param",
",",
"ProxyRef",
")",
":",
"try",
":",
"return",
"self",
".",
"lookup_url",
"(",
"param",
".",
"url",
",",
"param",
".",
"klass",
",",
"param",
".",
"module",
")",
"except",
"HostError",
":",
"print",
"\"Can't lookup for the actor received with the call. \\\n It does not exist or the url is unreachable.\"",
",",
"param",
"raise",
"HostError",
"(",
"param",
")",
"elif",
"isinstance",
"(",
"param",
",",
"list",
")",
":",
"return",
"[",
"self",
".",
"loads",
"(",
"elem",
")",
"for",
"elem",
"in",
"param",
"]",
"elif",
"isinstance",
"(",
"param",
",",
"tuple",
")",
":",
"return",
"tuple",
"(",
"[",
"self",
".",
"loads",
"(",
"elem",
")",
"for",
"elem",
"in",
"param",
"]",
")",
"elif",
"isinstance",
"(",
"param",
",",
"dict",
")",
":",
"new_dict",
"=",
"param",
"for",
"key",
"in",
"new_dict",
".",
"keys",
"(",
")",
":",
"new_dict",
"[",
"key",
"]",
"=",
"self",
".",
"loads",
"(",
"new_dict",
"[",
"key",
"]",
")",
"return",
"new_dict",
"else",
":",
"return",
"param"
] | Checks the return parameters generating new proxy instances to
avoid query concurrences from shared proxies and creating
proxies for actors from another host. | [
"Checks",
"the",
"return",
"parameters",
"generating",
"new",
"proxy",
"instances",
"to",
"avoid",
"query",
"concurrences",
"from",
"shared",
"proxies",
"and",
"creating",
"proxies",
"for",
"actors",
"from",
"another",
"host",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L442-L465 | train |
pedrotgn/pyactor | pyactor/context.py | Host.new_parallel | def new_parallel(self, function, *params):
'''
Register a new thread executing a parallel method.
'''
# Create a pool if not created (processes or Gevent...)
if self.ppool is None:
if core_type == 'thread':
from multiprocessing.pool import ThreadPool
self.ppool = ThreadPool(500)
else:
from gevent.pool import Pool
self.ppool = Pool(500)
# Add the new task to the pool
self.ppool.apply_async(function, *params) | python | def new_parallel(self, function, *params):
'''
Register a new thread executing a parallel method.
'''
# Create a pool if not created (processes or Gevent...)
if self.ppool is None:
if core_type == 'thread':
from multiprocessing.pool import ThreadPool
self.ppool = ThreadPool(500)
else:
from gevent.pool import Pool
self.ppool = Pool(500)
# Add the new task to the pool
self.ppool.apply_async(function, *params) | [
"def",
"new_parallel",
"(",
"self",
",",
"function",
",",
"*",
"params",
")",
":",
"# Create a pool if not created (processes or Gevent...)",
"if",
"self",
".",
"ppool",
"is",
"None",
":",
"if",
"core_type",
"==",
"'thread'",
":",
"from",
"multiprocessing",
".",
"pool",
"import",
"ThreadPool",
"self",
".",
"ppool",
"=",
"ThreadPool",
"(",
"500",
")",
"else",
":",
"from",
"gevent",
".",
"pool",
"import",
"Pool",
"self",
".",
"ppool",
"=",
"Pool",
"(",
"500",
")",
"# Add the new task to the pool",
"self",
".",
"ppool",
".",
"apply_async",
"(",
"function",
",",
"*",
"params",
")"
] | Register a new thread executing a parallel method. | [
"Register",
"a",
"new",
"thread",
"executing",
"a",
"parallel",
"method",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/context.py#L467-L480 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpSession.write_to_local | def write_to_local(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a remote file and write it locally."""
self.__log.debug("Writing R[%s] -> L[%s]." % (filepath_from,
filepath_to))
with SftpFile(self, filepath_from, 'r') as sf_from:
with open(filepath_to, 'wb') as file_to:
while 1:
part = sf_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
file_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
mtime_epoch = mktime(mtime_dt.timetuple())
utime(filepath_to, (mtime_epoch, mtime_epoch)) | python | def write_to_local(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a remote file and write it locally."""
self.__log.debug("Writing R[%s] -> L[%s]." % (filepath_from,
filepath_to))
with SftpFile(self, filepath_from, 'r') as sf_from:
with open(filepath_to, 'wb') as file_to:
while 1:
part = sf_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
file_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
mtime_epoch = mktime(mtime_dt.timetuple())
utime(filepath_to, (mtime_epoch, mtime_epoch)) | [
"def",
"write_to_local",
"(",
"self",
",",
"filepath_from",
",",
"filepath_to",
",",
"mtime_dt",
"=",
"None",
")",
":",
"self",
".",
"__log",
".",
"debug",
"(",
"\"Writing R[%s] -> L[%s].\"",
"%",
"(",
"filepath_from",
",",
"filepath_to",
")",
")",
"with",
"SftpFile",
"(",
"self",
",",
"filepath_from",
",",
"'r'",
")",
"as",
"sf_from",
":",
"with",
"open",
"(",
"filepath_to",
",",
"'wb'",
")",
"as",
"file_to",
":",
"while",
"1",
":",
"part",
"=",
"sf_from",
".",
"read",
"(",
"MAX_MIRROR_WRITE_CHUNK_SIZE",
")",
"file_to",
".",
"write",
"(",
"part",
")",
"if",
"len",
"(",
"part",
")",
"<",
"MAX_MIRROR_WRITE_CHUNK_SIZE",
":",
"break",
"if",
"mtime_dt",
"is",
"None",
":",
"mtime_dt",
"=",
"datetime",
".",
"now",
"(",
")",
"mtime_epoch",
"=",
"mktime",
"(",
"mtime_dt",
".",
"timetuple",
"(",
")",
")",
"utime",
"(",
"filepath_to",
",",
"(",
"mtime_epoch",
",",
"mtime_epoch",
")",
")"
] | Open a remote file and write it locally. | [
"Open",
"a",
"remote",
"file",
"and",
"write",
"it",
"locally",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L570-L589 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpSession.write_to_remote | def write_to_remote(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a local file and write it remotely."""
self.__log.debug("Writing L[%s] -> R[%s]." % (filepath_from,
filepath_to))
with open(filepath_from, 'rb') as file_from:
with SftpFile(self, filepath_to, 'w') as sf_to:
while 1:
part = file_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
sf_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
self.utimes_dt(filepath_to, mtime_dt, mtime_dt) | python | def write_to_remote(self, filepath_from, filepath_to, mtime_dt=None):
"""Open a local file and write it remotely."""
self.__log.debug("Writing L[%s] -> R[%s]." % (filepath_from,
filepath_to))
with open(filepath_from, 'rb') as file_from:
with SftpFile(self, filepath_to, 'w') as sf_to:
while 1:
part = file_from.read(MAX_MIRROR_WRITE_CHUNK_SIZE)
sf_to.write(part)
if len(part) < MAX_MIRROR_WRITE_CHUNK_SIZE:
break
if mtime_dt is None:
mtime_dt = datetime.now()
self.utimes_dt(filepath_to, mtime_dt, mtime_dt) | [
"def",
"write_to_remote",
"(",
"self",
",",
"filepath_from",
",",
"filepath_to",
",",
"mtime_dt",
"=",
"None",
")",
":",
"self",
".",
"__log",
".",
"debug",
"(",
"\"Writing L[%s] -> R[%s].\"",
"%",
"(",
"filepath_from",
",",
"filepath_to",
")",
")",
"with",
"open",
"(",
"filepath_from",
",",
"'rb'",
")",
"as",
"file_from",
":",
"with",
"SftpFile",
"(",
"self",
",",
"filepath_to",
",",
"'w'",
")",
"as",
"sf_to",
":",
"while",
"1",
":",
"part",
"=",
"file_from",
".",
"read",
"(",
"MAX_MIRROR_WRITE_CHUNK_SIZE",
")",
"sf_to",
".",
"write",
"(",
"part",
")",
"if",
"len",
"(",
"part",
")",
"<",
"MAX_MIRROR_WRITE_CHUNK_SIZE",
":",
"break",
"if",
"mtime_dt",
"is",
"None",
":",
"mtime_dt",
"=",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"utimes_dt",
"(",
"filepath_to",
",",
"mtime_dt",
",",
"mtime_dt",
")"
] | Open a local file and write it remotely. | [
"Open",
"a",
"local",
"file",
"and",
"write",
"it",
"remotely",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L591-L609 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFile.open | def open(self):
"""This is the only way to open a file resource."""
self.__sf = _sftp_open(self.__sftp_session_int,
self.__filepath,
self.access_type_int,
self.__create_mode)
if self.access_type_is_append is True:
self.seek(self.filesize)
return SftpFileObject(self) | python | def open(self):
"""This is the only way to open a file resource."""
self.__sf = _sftp_open(self.__sftp_session_int,
self.__filepath,
self.access_type_int,
self.__create_mode)
if self.access_type_is_append is True:
self.seek(self.filesize)
return SftpFileObject(self) | [
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"__sf",
"=",
"_sftp_open",
"(",
"self",
".",
"__sftp_session_int",
",",
"self",
".",
"__filepath",
",",
"self",
".",
"access_type_int",
",",
"self",
".",
"__create_mode",
")",
"if",
"self",
".",
"access_type_is_append",
"is",
"True",
":",
"self",
".",
"seek",
"(",
"self",
".",
"filesize",
")",
"return",
"SftpFileObject",
"(",
"self",
")"
] | This is the only way to open a file resource. | [
"This",
"is",
"the",
"only",
"way",
"to",
"open",
"a",
"file",
"resource",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L698-L709 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.read | def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b | python | def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"not",
"None",
":",
"return",
"self",
".",
"__sf",
".",
"read",
"(",
"size",
")",
"block_size",
"=",
"self",
".",
"__class__",
".",
"__block_size",
"b",
"=",
"bytearray",
"(",
")",
"received_bytes",
"=",
"0",
"while",
"1",
":",
"partial",
"=",
"self",
".",
"__sf",
".",
"read",
"(",
"block_size",
")",
"# self.__log.debug(\"Reading (%d) bytes. (%d) bytes returned.\" % ",
"# (block_size, len(partial)))",
"b",
".",
"extend",
"(",
"partial",
")",
"received_bytes",
"+=",
"len",
"(",
"partial",
")",
"if",
"len",
"(",
"partial",
")",
"<",
"block_size",
":",
"self",
".",
"__log",
".",
"debug",
"(",
"\"End of file.\"",
")",
"break",
"self",
".",
"__log",
".",
"debug",
"(",
"\"Read (%d) bytes for total-file.\"",
"%",
"(",
"received_bytes",
")",
")",
"return",
"b"
] | Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file. | [
"Read",
"a",
"length",
"of",
"bytes",
".",
"Return",
"empty",
"on",
"EOF",
".",
"If",
"size",
"is",
"omitted",
"return",
"whole",
"file",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L886-L912 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.seek | def seek(self, offset, whence=SEEK_SET):
"""Reposition the file pointer."""
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset) | python | def seek(self, offset, whence=SEEK_SET):
"""Reposition the file pointer."""
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset) | [
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"whence",
"=",
"SEEK_SET",
")",
":",
"if",
"whence",
"==",
"SEEK_SET",
":",
"self",
".",
"__sf",
".",
"seek",
"(",
"offset",
")",
"elif",
"whence",
"==",
"SEEK_CUR",
":",
"self",
".",
"__sf",
".",
"seek",
"(",
"self",
".",
"tell",
"(",
")",
"+",
"offset",
")",
"elif",
"whence",
"==",
"SEEK_END",
":",
"self",
".",
"__sf",
".",
"seek",
"(",
"self",
".",
"__sf",
".",
"filesize",
"-",
"offset",
")"
] | Reposition the file pointer. | [
"Reposition",
"the",
"file",
"pointer",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L919-L927 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.readline | def readline(self, size=None):
"""Read a single line of text with EOF."""
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line | python | def readline(self, size=None):
"""Read a single line of text with EOF."""
# TODO: Add support for Unicode.
(line, nl) = self.__buffer.read_until_nl(self.__retrieve_data)
if self.__sf.access_type_has_universal_nl and nl is not None:
self.__newlines[nl] = True
return line | [
"def",
"readline",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"# TODO: Add support for Unicode.",
"(",
"line",
",",
"nl",
")",
"=",
"self",
".",
"__buffer",
".",
"read_until_nl",
"(",
"self",
".",
"__retrieve_data",
")",
"if",
"self",
".",
"__sf",
".",
"access_type_has_universal_nl",
"and",
"nl",
"is",
"not",
"None",
":",
"self",
".",
"__newlines",
"[",
"nl",
"]",
"=",
"True",
"return",
"line"
] | Read a single line of text with EOF. | [
"Read",
"a",
"single",
"line",
"of",
"text",
"with",
"EOF",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L960-L969 | train |
dsoprea/PySecure | pysecure/adapters/sftpa.py | SftpFileObject.__retrieve_data | def __retrieve_data(self):
"""Read more data from the file."""
if self.__eof is True:
return b''
logging.debug("Reading another block.")
block = self.read(self.__block_size)
if block == b'':
self.__log.debug("We've encountered the EOF.")
self.__eof = True
return block | python | def __retrieve_data(self):
"""Read more data from the file."""
if self.__eof is True:
return b''
logging.debug("Reading another block.")
block = self.read(self.__block_size)
if block == b'':
self.__log.debug("We've encountered the EOF.")
self.__eof = True
return block | [
"def",
"__retrieve_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"__eof",
"is",
"True",
":",
"return",
"b''",
"logging",
".",
"debug",
"(",
"\"Reading another block.\"",
")",
"block",
"=",
"self",
".",
"read",
"(",
"self",
".",
"__block_size",
")",
"if",
"block",
"==",
"b''",
":",
"self",
".",
"__log",
".",
"debug",
"(",
"\"We've encountered the EOF.\"",
")",
"self",
".",
"__eof",
"=",
"True",
"return",
"block"
] | Read more data from the file. | [
"Read",
"more",
"data",
"from",
"the",
"file",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/sftpa.py#L971-L983 | train |
CI-WATER/gsshapy | gsshapy/modeling/model.py | GSSHAModel.set_mask_from_shapefile | def set_mask_from_shapefile(self, shapefile_path, cell_size):
"""
Adds a mask from a shapefile
"""
# make sure paths are absolute as the working directory changes
shapefile_path = os.path.abspath(shapefile_path)
# ADD MASK
with tmp_chdir(self.project_directory):
mask_name = '{0}.msk'.format(self.project_manager.name)
msk_file = WatershedMaskFile(project_file=self.project_manager,
session=self.db_session)
msk_file.generateFromWatershedShapefile(shapefile_path,
cell_size=cell_size,
out_raster_path=mask_name,
load_raster_to_db=self.load_rasters_to_db) | python | def set_mask_from_shapefile(self, shapefile_path, cell_size):
"""
Adds a mask from a shapefile
"""
# make sure paths are absolute as the working directory changes
shapefile_path = os.path.abspath(shapefile_path)
# ADD MASK
with tmp_chdir(self.project_directory):
mask_name = '{0}.msk'.format(self.project_manager.name)
msk_file = WatershedMaskFile(project_file=self.project_manager,
session=self.db_session)
msk_file.generateFromWatershedShapefile(shapefile_path,
cell_size=cell_size,
out_raster_path=mask_name,
load_raster_to_db=self.load_rasters_to_db) | [
"def",
"set_mask_from_shapefile",
"(",
"self",
",",
"shapefile_path",
",",
"cell_size",
")",
":",
"# make sure paths are absolute as the working directory changes",
"shapefile_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"shapefile_path",
")",
"# ADD MASK",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_directory",
")",
":",
"mask_name",
"=",
"'{0}.msk'",
".",
"format",
"(",
"self",
".",
"project_manager",
".",
"name",
")",
"msk_file",
"=",
"WatershedMaskFile",
"(",
"project_file",
"=",
"self",
".",
"project_manager",
",",
"session",
"=",
"self",
".",
"db_session",
")",
"msk_file",
".",
"generateFromWatershedShapefile",
"(",
"shapefile_path",
",",
"cell_size",
"=",
"cell_size",
",",
"out_raster_path",
"=",
"mask_name",
",",
"load_raster_to_db",
"=",
"self",
".",
"load_rasters_to_db",
")"
] | Adds a mask from a shapefile | [
"Adds",
"a",
"mask",
"from",
"a",
"shapefile"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L196-L211 | train |
CI-WATER/gsshapy | gsshapy/modeling/model.py | GSSHAModel.set_elevation | def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db) | python | def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db) | [
"def",
"set_elevation",
"(",
"self",
",",
"elevation_grid_path",
",",
"mask_shapefile",
")",
":",
"# ADD ELEVATION FILE",
"ele_file",
"=",
"ElevationGridFile",
"(",
"project_file",
"=",
"self",
".",
"project_manager",
",",
"session",
"=",
"self",
".",
"db_session",
")",
"ele_file",
".",
"generateFromRaster",
"(",
"elevation_grid_path",
",",
"mask_shapefile",
",",
"load_raster_to_db",
"=",
"self",
".",
"load_rasters_to_db",
")"
] | Adds elevation file to project | [
"Adds",
"elevation",
"file",
"to",
"project"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L213-L222 | train |
CI-WATER/gsshapy | gsshapy/modeling/model.py | GSSHAModel.set_outlet | def set_outlet(self, latitude, longitude, outslope):
"""
Adds outlet point to project
"""
self.project_manager.setOutlet(latitude=latitude, longitude=longitude,
outslope=outslope) | python | def set_outlet(self, latitude, longitude, outslope):
"""
Adds outlet point to project
"""
self.project_manager.setOutlet(latitude=latitude, longitude=longitude,
outslope=outslope) | [
"def",
"set_outlet",
"(",
"self",
",",
"latitude",
",",
"longitude",
",",
"outslope",
")",
":",
"self",
".",
"project_manager",
".",
"setOutlet",
"(",
"latitude",
"=",
"latitude",
",",
"longitude",
"=",
"longitude",
",",
"outslope",
"=",
"outslope",
")"
] | Adds outlet point to project | [
"Adds",
"outlet",
"point",
"to",
"project"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L224-L229 | train |
CI-WATER/gsshapy | gsshapy/modeling/model.py | GSSHAModel.set_event | def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration) | python | def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration) | [
"def",
"set_event",
"(",
"self",
",",
"simulation_start",
"=",
"None",
",",
"simulation_duration",
"=",
"None",
",",
"simulation_end",
"=",
"None",
",",
"rain_intensity",
"=",
"2",
",",
"rain_duration",
"=",
"timedelta",
"(",
"seconds",
"=",
"30",
"*",
"60",
")",
",",
"event_type",
"=",
"'EVENT'",
",",
")",
":",
"# ADD TEMPORTAL EVENT INFORMAITON",
"if",
"event_type",
"==",
"'LONG_TERM'",
":",
"self",
".",
"event",
"=",
"LongTermMode",
"(",
"self",
".",
"project_manager",
",",
"self",
".",
"db_session",
",",
"self",
".",
"project_directory",
",",
"simulation_start",
"=",
"simulation_start",
",",
"simulation_end",
"=",
"simulation_end",
",",
"simulation_duration",
"=",
"simulation_duration",
",",
")",
"else",
":",
"# 'EVENT'",
"self",
".",
"event",
"=",
"EventMode",
"(",
"self",
".",
"project_manager",
",",
"self",
".",
"db_session",
",",
"self",
".",
"project_directory",
",",
"simulation_start",
"=",
"simulation_start",
",",
"simulation_duration",
"=",
"simulation_duration",
",",
")",
"self",
".",
"event",
".",
"add_uniform_precip_event",
"(",
"intensity",
"=",
"rain_intensity",
",",
"duration",
"=",
"rain_duration",
")"
] | Initializes event for GSSHA model | [
"Initializes",
"event",
"for",
"GSSHA",
"model"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L259-L287 | train |
CI-WATER/gsshapy | gsshapy/modeling/model.py | GSSHAModel.write | def write(self):
"""
Write project to directory
"""
# write data
self.project_manager.writeInput(session=self.db_session,
directory=self.project_directory,
name=self.project_manager.name) | python | def write(self):
"""
Write project to directory
"""
# write data
self.project_manager.writeInput(session=self.db_session,
directory=self.project_directory,
name=self.project_manager.name) | [
"def",
"write",
"(",
"self",
")",
":",
"# write data",
"self",
".",
"project_manager",
".",
"writeInput",
"(",
"session",
"=",
"self",
".",
"db_session",
",",
"directory",
"=",
"self",
".",
"project_directory",
",",
"name",
"=",
"self",
".",
"project_manager",
".",
"name",
")"
] | Write project to directory | [
"Write",
"project",
"to",
"directory"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/model.py#L289-L296 | train |
dsoprea/PySecure | pysecure/sftp_mirror.py | SftpMirror.mirror | def mirror(self, handler, path_from, path_to, log_files=False):
"""Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
"""
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir) | python | def mirror(self, handler, path_from, path_to, log_files=False):
"""Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving.
"""
q = deque([''])
while q:
path = q.popleft()
full_from = ('%s/%s' % (path_from, path)) if path else path_from
full_to = ('%s/%s' % (path_to, path)) if path else path_to
subdirs = handler(full_from, full_to, log_files)
for subdir in subdirs:
q.append(('%s/%s' % (path, subdir)) if path else subdir) | [
"def",
"mirror",
"(",
"self",
",",
"handler",
",",
"path_from",
",",
"path_to",
",",
"log_files",
"=",
"False",
")",
":",
"q",
"=",
"deque",
"(",
"[",
"''",
"]",
")",
"while",
"q",
":",
"path",
"=",
"q",
".",
"popleft",
"(",
")",
"full_from",
"=",
"(",
"'%s/%s'",
"%",
"(",
"path_from",
",",
"path",
")",
")",
"if",
"path",
"else",
"path_from",
"full_to",
"=",
"(",
"'%s/%s'",
"%",
"(",
"path_to",
",",
"path",
")",
")",
"if",
"path",
"else",
"path_to",
"subdirs",
"=",
"handler",
"(",
"full_from",
",",
"full_to",
",",
"log_files",
")",
"for",
"subdir",
"in",
"subdirs",
":",
"q",
".",
"append",
"(",
"(",
"'%s/%s'",
"%",
"(",
"path",
",",
"subdir",
")",
")",
"if",
"path",
"else",
"subdir",
")"
] | Recursively mirror the contents of "path_from" into "path_to".
"handler" should be self.mirror_to_local_no_recursion or
self.mirror_to_remote_no_recursion to represent which way the files are
moving. | [
"Recursively",
"mirror",
"the",
"contents",
"of",
"path_from",
"into",
"path_to",
".",
"handler",
"should",
"be",
"self",
".",
"mirror_to_local_no_recursion",
"or",
"self",
".",
"mirror_to_remote_no_recursion",
"to",
"represent",
"which",
"way",
"the",
"files",
"are",
"moving",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/sftp_mirror.py#L26-L42 | train |
CI-WATER/gsshapy | gsshapy/orm/lnd.py | LinkNodeDatasetFile.linkToChannelInputFile | def linkToChannelInputFile(self, session, channelInputFile, force=False):
"""
Create database relationships between the link node dataset and the channel input file.
The link node dataset only stores references to the links and nodes--not the geometry. The link and node
geometries are stored in the channel input file. The two files must be linked with database relationships to
allow the creation of link node dataset visualizations.
This process is not performed automatically during reading, because it can be very costly in terms of read time.
This operation can only be performed after both files have been read into the database.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
channelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with
this link node dataset file.
force (bool, optional): Force channel input file reassignment. When false (default), channel input file
assignment is skipped if it has already been performed.
"""
# Only perform operation if the channel input file has not been assigned or the force parameter is true
if self.channelInputFile is not None and not force:
return
# Set the channel input file relationship
self.channelInputFile = channelInputFile
# Retrieve the fluvial stream links
orderedLinks = channelInputFile.getOrderedLinks(session)
# Retrieve the LinkNodeTimeStep objects
timeSteps = self.timeSteps
# Link each link dataset in each time step
for timeStep in timeSteps:
# Retrieve link datasets
linkDatasets = timeStep.linkDatasets
# Link each node dataset
for l, linkDataset in enumerate(linkDatasets):
# Get the fluvial link and nodes
streamLink = orderedLinks[l]
streamNodes = streamLink.nodes
# Link link datasets to fluvial links
linkDataset.link = streamLink
# Retrieve node datasets
nodeDatasets = linkDataset.nodeDatasets
# Link the node dataset with the channel input file nodes
if len(nodeDatasets) > 0 and len(streamNodes) > 0:
for n, nodeDataset in enumerate(nodeDatasets):
nodeDataset.node = streamNodes[n]
session.add(self)
session.commit() | python | def linkToChannelInputFile(self, session, channelInputFile, force=False):
"""
Create database relationships between the link node dataset and the channel input file.
The link node dataset only stores references to the links and nodes--not the geometry. The link and node
geometries are stored in the channel input file. The two files must be linked with database relationships to
allow the creation of link node dataset visualizations.
This process is not performed automatically during reading, because it can be very costly in terms of read time.
This operation can only be performed after both files have been read into the database.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
channelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with
this link node dataset file.
force (bool, optional): Force channel input file reassignment. When false (default), channel input file
assignment is skipped if it has already been performed.
"""
# Only perform operation if the channel input file has not been assigned or the force parameter is true
if self.channelInputFile is not None and not force:
return
# Set the channel input file relationship
self.channelInputFile = channelInputFile
# Retrieve the fluvial stream links
orderedLinks = channelInputFile.getOrderedLinks(session)
# Retrieve the LinkNodeTimeStep objects
timeSteps = self.timeSteps
# Link each link dataset in each time step
for timeStep in timeSteps:
# Retrieve link datasets
linkDatasets = timeStep.linkDatasets
# Link each node dataset
for l, linkDataset in enumerate(linkDatasets):
# Get the fluvial link and nodes
streamLink = orderedLinks[l]
streamNodes = streamLink.nodes
# Link link datasets to fluvial links
linkDataset.link = streamLink
# Retrieve node datasets
nodeDatasets = linkDataset.nodeDatasets
# Link the node dataset with the channel input file nodes
if len(nodeDatasets) > 0 and len(streamNodes) > 0:
for n, nodeDataset in enumerate(nodeDatasets):
nodeDataset.node = streamNodes[n]
session.add(self)
session.commit() | [
"def",
"linkToChannelInputFile",
"(",
"self",
",",
"session",
",",
"channelInputFile",
",",
"force",
"=",
"False",
")",
":",
"# Only perform operation if the channel input file has not been assigned or the force parameter is true",
"if",
"self",
".",
"channelInputFile",
"is",
"not",
"None",
"and",
"not",
"force",
":",
"return",
"# Set the channel input file relationship",
"self",
".",
"channelInputFile",
"=",
"channelInputFile",
"# Retrieve the fluvial stream links",
"orderedLinks",
"=",
"channelInputFile",
".",
"getOrderedLinks",
"(",
"session",
")",
"# Retrieve the LinkNodeTimeStep objects",
"timeSteps",
"=",
"self",
".",
"timeSteps",
"# Link each link dataset in each time step",
"for",
"timeStep",
"in",
"timeSteps",
":",
"# Retrieve link datasets",
"linkDatasets",
"=",
"timeStep",
".",
"linkDatasets",
"# Link each node dataset",
"for",
"l",
",",
"linkDataset",
"in",
"enumerate",
"(",
"linkDatasets",
")",
":",
"# Get the fluvial link and nodes",
"streamLink",
"=",
"orderedLinks",
"[",
"l",
"]",
"streamNodes",
"=",
"streamLink",
".",
"nodes",
"# Link link datasets to fluvial links",
"linkDataset",
".",
"link",
"=",
"streamLink",
"# Retrieve node datasets",
"nodeDatasets",
"=",
"linkDataset",
".",
"nodeDatasets",
"# Link the node dataset with the channel input file nodes",
"if",
"len",
"(",
"nodeDatasets",
")",
">",
"0",
"and",
"len",
"(",
"streamNodes",
")",
">",
"0",
":",
"for",
"n",
",",
"nodeDataset",
"in",
"enumerate",
"(",
"nodeDatasets",
")",
":",
"nodeDataset",
".",
"node",
"=",
"streamNodes",
"[",
"n",
"]",
"session",
".",
"add",
"(",
"self",
")",
"session",
".",
"commit",
"(",
")"
] | Create database relationships between the link node dataset and the channel input file.
The link node dataset only stores references to the links and nodes--not the geometry. The link and node
geometries are stored in the channel input file. The two files must be linked with database relationships to
allow the creation of link node dataset visualizations.
This process is not performed automatically during reading, because it can be very costly in terms of read time.
This operation can only be performed after both files have been read into the database.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
channelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with
this link node dataset file.
force (bool, optional): Force channel input file reassignment. When false (default), channel input file
assignment is skipped if it has already been performed. | [
"Create",
"database",
"relationships",
"between",
"the",
"link",
"node",
"dataset",
"and",
"the",
"channel",
"input",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/lnd.py#L76-L131 | train |
CI-WATER/gsshapy | gsshapy/orm/lnd.py | LinkNodeDatasetFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Link Node Dataset File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = ('NUM_LINKS',
'TIME_STEP',
'NUM_TS',
'START_TIME',
'TS')
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
self.name = f.readline().strip()
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'NUM_LINKS':
# NUM_LINKS handler
self.numLinks = schunk[1]
elif card == 'TIME_STEP':
# TIME_STEP handler
self.timeStepInterval = schunk[1]
elif card == 'NUM_TS':
# NUM_TS handler
self.numTimeSteps = schunk[1]
elif card == 'START_TIME':
# START_TIME handler
self.startTime = '%s %s %s %s %s %s' % (
schunk[1],
schunk[2],
schunk[3],
schunk[4],
schunk[5],
schunk[6])
elif card == 'TS':
# TS handler
for line in chunk:
sline = line.strip().split()
token = sline[0]
# Cases
if token == 'TS':
# Time Step line handler
timeStep = LinkNodeTimeStep(timeStep=sline[1])
timeStep.linkNodeDataset = self
else:
# Split the line
spLinkLine = line.strip().split()
# Create LinkDataset GSSHAPY object
linkDataset = LinkDataset()
linkDataset.numNodeDatasets = int(spLinkLine[0])
linkDataset.timeStep = timeStep
linkDataset.linkNodeDatasetFile = self
# Parse line into NodeDatasets
NODE_VALUE_INCREMENT = 2
statusIndex = 1
valueIndex = statusIndex + 1
# Parse line into node datasets
if linkDataset.numNodeDatasets > 0:
for i in range(0, linkDataset.numNodeDatasets):
# Create NodeDataset GSSHAPY object
nodeDataset = NodeDataset()
nodeDataset.status = int(spLinkLine[statusIndex])
nodeDataset.value = float(spLinkLine[valueIndex])
nodeDataset.linkDataset = linkDataset
nodeDataset.linkNodeDatasetFile = self
# Increment to next status/value pair
statusIndex += NODE_VALUE_INCREMENT
valueIndex += NODE_VALUE_INCREMENT
else:
nodeDataset = NodeDataset()
nodeDataset.value = float(spLinkLine[1])
nodeDataset.linkDataset = linkDataset
nodeDataset.linkNodeDatasetFile = self | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Link Node Dataset File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = ('NUM_LINKS',
'TIME_STEP',
'NUM_TS',
'START_TIME',
'TS')
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
self.name = f.readline().strip()
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'NUM_LINKS':
# NUM_LINKS handler
self.numLinks = schunk[1]
elif card == 'TIME_STEP':
# TIME_STEP handler
self.timeStepInterval = schunk[1]
elif card == 'NUM_TS':
# NUM_TS handler
self.numTimeSteps = schunk[1]
elif card == 'START_TIME':
# START_TIME handler
self.startTime = '%s %s %s %s %s %s' % (
schunk[1],
schunk[2],
schunk[3],
schunk[4],
schunk[5],
schunk[6])
elif card == 'TS':
# TS handler
for line in chunk:
sline = line.strip().split()
token = sline[0]
# Cases
if token == 'TS':
# Time Step line handler
timeStep = LinkNodeTimeStep(timeStep=sline[1])
timeStep.linkNodeDataset = self
else:
# Split the line
spLinkLine = line.strip().split()
# Create LinkDataset GSSHAPY object
linkDataset = LinkDataset()
linkDataset.numNodeDatasets = int(spLinkLine[0])
linkDataset.timeStep = timeStep
linkDataset.linkNodeDatasetFile = self
# Parse line into NodeDatasets
NODE_VALUE_INCREMENT = 2
statusIndex = 1
valueIndex = statusIndex + 1
# Parse line into node datasets
if linkDataset.numNodeDatasets > 0:
for i in range(0, linkDataset.numNodeDatasets):
# Create NodeDataset GSSHAPY object
nodeDataset = NodeDataset()
nodeDataset.status = int(spLinkLine[statusIndex])
nodeDataset.value = float(spLinkLine[valueIndex])
nodeDataset.linkDataset = linkDataset
nodeDataset.linkNodeDatasetFile = self
# Increment to next status/value pair
statusIndex += NODE_VALUE_INCREMENT
valueIndex += NODE_VALUE_INCREMENT
else:
nodeDataset = NodeDataset()
nodeDataset.value = float(spLinkLine[1])
nodeDataset.linkDataset = linkDataset
nodeDataset.linkNodeDatasetFile = self | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Dictionary of keywords/cards and parse function names",
"KEYWORDS",
"=",
"(",
"'NUM_LINKS'",
",",
"'TIME_STEP'",
",",
"'NUM_TS'",
",",
"'START_TIME'",
",",
"'TS'",
")",
"# Parse file into chunks associated with keywords/cards",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"name",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"f",
")",
"# Parse chunks associated with each key",
"for",
"card",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"card",
"==",
"'NUM_LINKS'",
":",
"# NUM_LINKS handler",
"self",
".",
"numLinks",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"card",
"==",
"'TIME_STEP'",
":",
"# TIME_STEP handler",
"self",
".",
"timeStepInterval",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"card",
"==",
"'NUM_TS'",
":",
"# NUM_TS handler",
"self",
".",
"numTimeSteps",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"card",
"==",
"'START_TIME'",
":",
"# START_TIME handler",
"self",
".",
"startTime",
"=",
"'%s %s %s %s %s %s'",
"%",
"(",
"schunk",
"[",
"1",
"]",
",",
"schunk",
"[",
"2",
"]",
",",
"schunk",
"[",
"3",
"]",
",",
"schunk",
"[",
"4",
"]",
",",
"schunk",
"[",
"5",
"]",
",",
"schunk",
"[",
"6",
"]",
")",
"elif",
"card",
"==",
"'TS'",
":",
"# TS handler",
"for",
"line",
"in",
"chunk",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"token",
"=",
"sline",
"[",
"0",
"]",
"# Cases",
"if",
"token",
"==",
"'TS'",
":",
"# Time Step line handler",
"timeStep",
"=",
"LinkNodeTimeStep",
"(",
"timeStep",
"=",
"sline",
"[",
"1",
"]",
")",
"timeStep",
".",
"linkNodeDataset",
"=",
"self",
"else",
":",
"# Split the line",
"spLinkLine",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Create LinkDataset GSSHAPY object",
"linkDataset",
"=",
"LinkDataset",
"(",
")",
"linkDataset",
".",
"numNodeDatasets",
"=",
"int",
"(",
"spLinkLine",
"[",
"0",
"]",
")",
"linkDataset",
".",
"timeStep",
"=",
"timeStep",
"linkDataset",
".",
"linkNodeDatasetFile",
"=",
"self",
"# Parse line into NodeDatasets",
"NODE_VALUE_INCREMENT",
"=",
"2",
"statusIndex",
"=",
"1",
"valueIndex",
"=",
"statusIndex",
"+",
"1",
"# Parse line into node datasets",
"if",
"linkDataset",
".",
"numNodeDatasets",
">",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"linkDataset",
".",
"numNodeDatasets",
")",
":",
"# Create NodeDataset GSSHAPY object",
"nodeDataset",
"=",
"NodeDataset",
"(",
")",
"nodeDataset",
".",
"status",
"=",
"int",
"(",
"spLinkLine",
"[",
"statusIndex",
"]",
")",
"nodeDataset",
".",
"value",
"=",
"float",
"(",
"spLinkLine",
"[",
"valueIndex",
"]",
")",
"nodeDataset",
".",
"linkDataset",
"=",
"linkDataset",
"nodeDataset",
".",
"linkNodeDatasetFile",
"=",
"self",
"# Increment to next status/value pair",
"statusIndex",
"+=",
"NODE_VALUE_INCREMENT",
"valueIndex",
"+=",
"NODE_VALUE_INCREMENT",
"else",
":",
"nodeDataset",
"=",
"NodeDataset",
"(",
")",
"nodeDataset",
".",
"value",
"=",
"float",
"(",
"spLinkLine",
"[",
"1",
"]",
")",
"nodeDataset",
".",
"linkDataset",
"=",
"linkDataset",
"nodeDataset",
".",
"linkNodeDatasetFile",
"=",
"self"
] | Link Node Dataset File Read from File Method | [
"Link",
"Node",
"Dataset",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/lnd.py#L356-L448 | train |
CI-WATER/gsshapy | gsshapy/orm/lnd.py | LinkNodeDatasetFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Link Node Dataset File Write to File Method
"""
# Retrieve TimeStep objects
timeSteps = self.timeSteps
# Write Lines
openFile.write('%s\n' % self.name)
openFile.write('NUM_LINKS %s\n' % self.numLinks)
openFile.write('TIME_STEP %s\n' % self.timeStepInterval)
openFile.write('NUM_TS %s\n' % self.numTimeSteps)
openFile.write('START_TIME %s\n' % self.startTime)
for timeStep in timeSteps:
openFile.write('TS %s\n' % timeStep.timeStep)
# Retrieve LinkDataset objects
linkDatasets = timeStep.linkDatasets
for linkDataset in linkDatasets:
# Write number of node datasets values
openFile.write('{0} '.format(linkDataset.numNodeDatasets))
# Retrieve NodeDatasets
nodeDatasets = linkDataset.nodeDatasets
if linkDataset.numNodeDatasets > 0:
for nodeDataset in nodeDatasets:
# Write status and value
openFile.write('{0} {1:.5f} '.format(nodeDataset.status, nodeDataset.value))
else:
for nodeDataset in nodeDatasets:
# Write status and value
if linkDataset.numNodeDatasets < 0:
openFile.write('{0:.5f}'.format(nodeDataset.value))
else:
openFile.write('{0:.3f}'.format(nodeDataset.value))
# Write new line character after each link dataset
openFile.write('\n')
# Insert empty line between time steps
openFile.write('\n') | python | def _write(self, session, openFile, replaceParamFile):
"""
Link Node Dataset File Write to File Method
"""
# Retrieve TimeStep objects
timeSteps = self.timeSteps
# Write Lines
openFile.write('%s\n' % self.name)
openFile.write('NUM_LINKS %s\n' % self.numLinks)
openFile.write('TIME_STEP %s\n' % self.timeStepInterval)
openFile.write('NUM_TS %s\n' % self.numTimeSteps)
openFile.write('START_TIME %s\n' % self.startTime)
for timeStep in timeSteps:
openFile.write('TS %s\n' % timeStep.timeStep)
# Retrieve LinkDataset objects
linkDatasets = timeStep.linkDatasets
for linkDataset in linkDatasets:
# Write number of node datasets values
openFile.write('{0} '.format(linkDataset.numNodeDatasets))
# Retrieve NodeDatasets
nodeDatasets = linkDataset.nodeDatasets
if linkDataset.numNodeDatasets > 0:
for nodeDataset in nodeDatasets:
# Write status and value
openFile.write('{0} {1:.5f} '.format(nodeDataset.status, nodeDataset.value))
else:
for nodeDataset in nodeDatasets:
# Write status and value
if linkDataset.numNodeDatasets < 0:
openFile.write('{0:.5f}'.format(nodeDataset.value))
else:
openFile.write('{0:.3f}'.format(nodeDataset.value))
# Write new line character after each link dataset
openFile.write('\n')
# Insert empty line between time steps
openFile.write('\n') | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Retrieve TimeStep objects",
"timeSteps",
"=",
"self",
".",
"timeSteps",
"# Write Lines",
"openFile",
".",
"write",
"(",
"'%s\\n'",
"%",
"self",
".",
"name",
")",
"openFile",
".",
"write",
"(",
"'NUM_LINKS %s\\n'",
"%",
"self",
".",
"numLinks",
")",
"openFile",
".",
"write",
"(",
"'TIME_STEP %s\\n'",
"%",
"self",
".",
"timeStepInterval",
")",
"openFile",
".",
"write",
"(",
"'NUM_TS %s\\n'",
"%",
"self",
".",
"numTimeSteps",
")",
"openFile",
".",
"write",
"(",
"'START_TIME %s\\n'",
"%",
"self",
".",
"startTime",
")",
"for",
"timeStep",
"in",
"timeSteps",
":",
"openFile",
".",
"write",
"(",
"'TS %s\\n'",
"%",
"timeStep",
".",
"timeStep",
")",
"# Retrieve LinkDataset objects",
"linkDatasets",
"=",
"timeStep",
".",
"linkDatasets",
"for",
"linkDataset",
"in",
"linkDatasets",
":",
"# Write number of node datasets values",
"openFile",
".",
"write",
"(",
"'{0} '",
".",
"format",
"(",
"linkDataset",
".",
"numNodeDatasets",
")",
")",
"# Retrieve NodeDatasets",
"nodeDatasets",
"=",
"linkDataset",
".",
"nodeDatasets",
"if",
"linkDataset",
".",
"numNodeDatasets",
">",
"0",
":",
"for",
"nodeDataset",
"in",
"nodeDatasets",
":",
"# Write status and value",
"openFile",
".",
"write",
"(",
"'{0} {1:.5f} '",
".",
"format",
"(",
"nodeDataset",
".",
"status",
",",
"nodeDataset",
".",
"value",
")",
")",
"else",
":",
"for",
"nodeDataset",
"in",
"nodeDatasets",
":",
"# Write status and value",
"if",
"linkDataset",
".",
"numNodeDatasets",
"<",
"0",
":",
"openFile",
".",
"write",
"(",
"'{0:.5f}'",
".",
"format",
"(",
"nodeDataset",
".",
"value",
")",
")",
"else",
":",
"openFile",
".",
"write",
"(",
"'{0:.3f}'",
".",
"format",
"(",
"nodeDataset",
".",
"value",
")",
")",
"# Write new line character after each link dataset",
"openFile",
".",
"write",
"(",
"'\\n'",
")",
"# Insert empty line between time steps",
"openFile",
".",
"write",
"(",
"'\\n'",
")"
] | Link Node Dataset File Write to File Method | [
"Link",
"Node",
"Dataset",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/lnd.py#L452-L496 | train |
cs50/cli50 | cli50/__main__.py | login | def login(container):
"""Log into container."""
columns, lines = shutil.get_terminal_size() # Temporary
try:
subprocess.check_call([
"docker", "exec",
"--env", f"COLUMNS={str(columns)},LINES={str(lines)}", # Temporary
"--env", f"LINES={str(lines)}", # Temporary
"--interactive",
"--tty",
container,
"bash",
"--login"
])
except subprocess.CalledProcessError:
raise RuntimeError() from None | python | def login(container):
"""Log into container."""
columns, lines = shutil.get_terminal_size() # Temporary
try:
subprocess.check_call([
"docker", "exec",
"--env", f"COLUMNS={str(columns)},LINES={str(lines)}", # Temporary
"--env", f"LINES={str(lines)}", # Temporary
"--interactive",
"--tty",
container,
"bash",
"--login"
])
except subprocess.CalledProcessError:
raise RuntimeError() from None | [
"def",
"login",
"(",
"container",
")",
":",
"columns",
",",
"lines",
"=",
"shutil",
".",
"get_terminal_size",
"(",
")",
"# Temporary",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"\"docker\"",
",",
"\"exec\"",
",",
"\"--env\"",
",",
"f\"COLUMNS={str(columns)},LINES={str(lines)}\"",
",",
"# Temporary",
"\"--env\"",
",",
"f\"LINES={str(lines)}\"",
",",
"# Temporary",
"\"--interactive\"",
",",
"\"--tty\"",
",",
"container",
",",
"\"bash\"",
",",
"\"--login\"",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"raise",
"RuntimeError",
"(",
")",
"from",
"None"
] | Log into container. | [
"Log",
"into",
"container",
"."
] | f712328200dd40c3e19e73893644cb61125ea66e | https://github.com/cs50/cli50/blob/f712328200dd40c3e19e73893644cb61125ea66e/cli50/__main__.py#L195-L210 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | Event._update_simulation_start | def _update_simulation_start(self, simulation_start):
"""
Update GSSHA simulation start time
"""
self.simulation_start = simulation_start
if self.simulation_duration is not None and self.simulation_start is not None:
self.simulation_end = self.simulation_start + self.simulation_duration
self._update_simulation_start_cards() | python | def _update_simulation_start(self, simulation_start):
"""
Update GSSHA simulation start time
"""
self.simulation_start = simulation_start
if self.simulation_duration is not None and self.simulation_start is not None:
self.simulation_end = self.simulation_start + self.simulation_duration
self._update_simulation_start_cards() | [
"def",
"_update_simulation_start",
"(",
"self",
",",
"simulation_start",
")",
":",
"self",
".",
"simulation_start",
"=",
"simulation_start",
"if",
"self",
".",
"simulation_duration",
"is",
"not",
"None",
"and",
"self",
".",
"simulation_start",
"is",
"not",
"None",
":",
"self",
".",
"simulation_end",
"=",
"self",
".",
"simulation_start",
"+",
"self",
".",
"simulation_duration",
"self",
".",
"_update_simulation_start_cards",
"(",
")"
] | Update GSSHA simulation start time | [
"Update",
"GSSHA",
"simulation",
"start",
"time"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L148-L155 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | Event._update_simulation_start_cards | def _update_simulation_start_cards(self):
"""
Update GSSHA cards for simulation start
"""
if self.simulation_start is not None:
self._update_card("START_DATE", self.simulation_start.strftime("%Y %m %d"))
self._update_card("START_TIME", self.simulation_start.strftime("%H %M")) | python | def _update_simulation_start_cards(self):
"""
Update GSSHA cards for simulation start
"""
if self.simulation_start is not None:
self._update_card("START_DATE", self.simulation_start.strftime("%Y %m %d"))
self._update_card("START_TIME", self.simulation_start.strftime("%H %M")) | [
"def",
"_update_simulation_start_cards",
"(",
"self",
")",
":",
"if",
"self",
".",
"simulation_start",
"is",
"not",
"None",
":",
"self",
".",
"_update_card",
"(",
"\"START_DATE\"",
",",
"self",
".",
"simulation_start",
".",
"strftime",
"(",
"\"%Y %m %d\"",
")",
")",
"self",
".",
"_update_card",
"(",
"\"START_TIME\"",
",",
"self",
".",
"simulation_start",
".",
"strftime",
"(",
"\"%H %M\"",
")",
")"
] | Update GSSHA cards for simulation start | [
"Update",
"GSSHA",
"cards",
"for",
"simulation",
"start"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L157-L163 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | Event._update_simulation_end_from_lsm | def _update_simulation_end_from_lsm(self):
"""
Update simulation end time from LSM
"""
te = self.l2g.xd.lsm.datetime[-1]
simulation_end = te.replace(tzinfo=utc) \
.astimezone(tz=self.tz) \
.replace(tzinfo=None)
if self.simulation_end is None:
self.simulation_end = simulation_end
elif self.simulation_end > simulation_end:
self.simulation_end = simulation_end
self._update_card("END_TIME",
self.simulation_end
.strftime("%Y %m %d %H %M")) | python | def _update_simulation_end_from_lsm(self):
"""
Update simulation end time from LSM
"""
te = self.l2g.xd.lsm.datetime[-1]
simulation_end = te.replace(tzinfo=utc) \
.astimezone(tz=self.tz) \
.replace(tzinfo=None)
if self.simulation_end is None:
self.simulation_end = simulation_end
elif self.simulation_end > simulation_end:
self.simulation_end = simulation_end
self._update_card("END_TIME",
self.simulation_end
.strftime("%Y %m %d %H %M")) | [
"def",
"_update_simulation_end_from_lsm",
"(",
"self",
")",
":",
"te",
"=",
"self",
".",
"l2g",
".",
"xd",
".",
"lsm",
".",
"datetime",
"[",
"-",
"1",
"]",
"simulation_end",
"=",
"te",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
".",
"astimezone",
"(",
"tz",
"=",
"self",
".",
"tz",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"if",
"self",
".",
"simulation_end",
"is",
"None",
":",
"self",
".",
"simulation_end",
"=",
"simulation_end",
"elif",
"self",
".",
"simulation_end",
">",
"simulation_end",
":",
"self",
".",
"simulation_end",
"=",
"simulation_end",
"self",
".",
"_update_card",
"(",
"\"END_TIME\"",
",",
"self",
".",
"simulation_end",
".",
"strftime",
"(",
"\"%Y %m %d %H %M\"",
")",
")"
] | Update simulation end time from LSM | [
"Update",
"simulation",
"end",
"time",
"from",
"LSM"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L165-L180 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | Event.add_precip_file | def add_precip_file(self, precip_file_path, interpolation_type=None):
"""
Adds a precip file to project with interpolation_type
"""
# precip file read in
self._update_card('PRECIP_FILE', precip_file_path, True)
if interpolation_type is None:
# check if precip type exists already in card
if not self.project_manager.getCard('RAIN_INV_DISTANCE') \
and not self.project_manager.getCard('RAIN_THIESSEN'):
# if no type exists, then make it theissen
self._update_card('RAIN_THIESSEN', '')
else:
if interpolation_type.upper() not in self.PRECIP_INTERP_TYPES:
raise IndexError("Invalid interpolation_type {0}".format(interpolation_type))
interpolation_type = interpolation_type.upper()
if interpolation_type == "INV_DISTANCE":
self._update_card('RAIN_INV_DISTANCE', '')
self.project_manager.deleteCard('RAIN_THIESSEN', self.db_session)
else:
self._update_card('RAIN_THIESSEN', '')
self.project_manager.deleteCard('RAIN_INV_DISTANCE', self.db_session) | python | def add_precip_file(self, precip_file_path, interpolation_type=None):
"""
Adds a precip file to project with interpolation_type
"""
# precip file read in
self._update_card('PRECIP_FILE', precip_file_path, True)
if interpolation_type is None:
# check if precip type exists already in card
if not self.project_manager.getCard('RAIN_INV_DISTANCE') \
and not self.project_manager.getCard('RAIN_THIESSEN'):
# if no type exists, then make it theissen
self._update_card('RAIN_THIESSEN', '')
else:
if interpolation_type.upper() not in self.PRECIP_INTERP_TYPES:
raise IndexError("Invalid interpolation_type {0}".format(interpolation_type))
interpolation_type = interpolation_type.upper()
if interpolation_type == "INV_DISTANCE":
self._update_card('RAIN_INV_DISTANCE', '')
self.project_manager.deleteCard('RAIN_THIESSEN', self.db_session)
else:
self._update_card('RAIN_THIESSEN', '')
self.project_manager.deleteCard('RAIN_INV_DISTANCE', self.db_session) | [
"def",
"add_precip_file",
"(",
"self",
",",
"precip_file_path",
",",
"interpolation_type",
"=",
"None",
")",
":",
"# precip file read in",
"self",
".",
"_update_card",
"(",
"'PRECIP_FILE'",
",",
"precip_file_path",
",",
"True",
")",
"if",
"interpolation_type",
"is",
"None",
":",
"# check if precip type exists already in card",
"if",
"not",
"self",
".",
"project_manager",
".",
"getCard",
"(",
"'RAIN_INV_DISTANCE'",
")",
"and",
"not",
"self",
".",
"project_manager",
".",
"getCard",
"(",
"'RAIN_THIESSEN'",
")",
":",
"# if no type exists, then make it theissen",
"self",
".",
"_update_card",
"(",
"'RAIN_THIESSEN'",
",",
"''",
")",
"else",
":",
"if",
"interpolation_type",
".",
"upper",
"(",
")",
"not",
"in",
"self",
".",
"PRECIP_INTERP_TYPES",
":",
"raise",
"IndexError",
"(",
"\"Invalid interpolation_type {0}\"",
".",
"format",
"(",
"interpolation_type",
")",
")",
"interpolation_type",
"=",
"interpolation_type",
".",
"upper",
"(",
")",
"if",
"interpolation_type",
"==",
"\"INV_DISTANCE\"",
":",
"self",
".",
"_update_card",
"(",
"'RAIN_INV_DISTANCE'",
",",
"''",
")",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"'RAIN_THIESSEN'",
",",
"self",
".",
"db_session",
")",
"else",
":",
"self",
".",
"_update_card",
"(",
"'RAIN_THIESSEN'",
",",
"''",
")",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"'RAIN_INV_DISTANCE'",
",",
"self",
".",
"db_session",
")"
] | Adds a precip file to project with interpolation_type | [
"Adds",
"a",
"precip",
"file",
"to",
"project",
"with",
"interpolation_type"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L197-L220 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | Event.prepare_gag_lsm | def prepare_gag_lsm(self, lsm_precip_data_var, lsm_precip_type, interpolation_type=None):
"""
Prepares Gage output for GSSHA simulation
Parameters:
lsm_precip_data_var(list or str): String of name for precipitation variable name or list of precip variable names. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`.
lsm_precip_type(str): Type of precipitation. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`.
interpolation_type(str): Type of interpolation for LSM precipitation. Can be "INV_DISTANCE" or "THIESSEN". Default is "THIESSEN".
"""
if self.l2g is None:
raise ValueError("LSM converter not loaded ...")
# remove uniform precip cards
for unif_precip_card in self.UNIFORM_PRECIP_CARDS:
self.project_manager.deleteCard(unif_precip_card, self.db_session)
with tmp_chdir(self.project_manager.project_directory):
# PRECIPITATION CARD
out_gage_file = '{0}.gag'.format(self.project_manager.name)
self.l2g.lsm_precip_to_gssha_precip_gage(out_gage_file,
lsm_data_var=lsm_precip_data_var,
precip_type=lsm_precip_type)
# SIMULATION TIME CARDS
self._update_simulation_end_from_lsm()
self.set_simulation_duration(self.simulation_end-self.simulation_start)
# precip file read in
self.add_precip_file(out_gage_file, interpolation_type)
# make sure xarray dataset closed
self.l2g.xd.close() | python | def prepare_gag_lsm(self, lsm_precip_data_var, lsm_precip_type, interpolation_type=None):
"""
Prepares Gage output for GSSHA simulation
Parameters:
lsm_precip_data_var(list or str): String of name for precipitation variable name or list of precip variable names. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`.
lsm_precip_type(str): Type of precipitation. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`.
interpolation_type(str): Type of interpolation for LSM precipitation. Can be "INV_DISTANCE" or "THIESSEN". Default is "THIESSEN".
"""
if self.l2g is None:
raise ValueError("LSM converter not loaded ...")
# remove uniform precip cards
for unif_precip_card in self.UNIFORM_PRECIP_CARDS:
self.project_manager.deleteCard(unif_precip_card, self.db_session)
with tmp_chdir(self.project_manager.project_directory):
# PRECIPITATION CARD
out_gage_file = '{0}.gag'.format(self.project_manager.name)
self.l2g.lsm_precip_to_gssha_precip_gage(out_gage_file,
lsm_data_var=lsm_precip_data_var,
precip_type=lsm_precip_type)
# SIMULATION TIME CARDS
self._update_simulation_end_from_lsm()
self.set_simulation_duration(self.simulation_end-self.simulation_start)
# precip file read in
self.add_precip_file(out_gage_file, interpolation_type)
# make sure xarray dataset closed
self.l2g.xd.close() | [
"def",
"prepare_gag_lsm",
"(",
"self",
",",
"lsm_precip_data_var",
",",
"lsm_precip_type",
",",
"interpolation_type",
"=",
"None",
")",
":",
"if",
"self",
".",
"l2g",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"LSM converter not loaded ...\"",
")",
"# remove uniform precip cards",
"for",
"unif_precip_card",
"in",
"self",
".",
"UNIFORM_PRECIP_CARDS",
":",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"unif_precip_card",
",",
"self",
".",
"db_session",
")",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_manager",
".",
"project_directory",
")",
":",
"# PRECIPITATION CARD",
"out_gage_file",
"=",
"'{0}.gag'",
".",
"format",
"(",
"self",
".",
"project_manager",
".",
"name",
")",
"self",
".",
"l2g",
".",
"lsm_precip_to_gssha_precip_gage",
"(",
"out_gage_file",
",",
"lsm_data_var",
"=",
"lsm_precip_data_var",
",",
"precip_type",
"=",
"lsm_precip_type",
")",
"# SIMULATION TIME CARDS",
"self",
".",
"_update_simulation_end_from_lsm",
"(",
")",
"self",
".",
"set_simulation_duration",
"(",
"self",
".",
"simulation_end",
"-",
"self",
".",
"simulation_start",
")",
"# precip file read in",
"self",
".",
"add_precip_file",
"(",
"out_gage_file",
",",
"interpolation_type",
")",
"# make sure xarray dataset closed",
"self",
".",
"l2g",
".",
"xd",
".",
"close",
"(",
")"
] | Prepares Gage output for GSSHA simulation
Parameters:
lsm_precip_data_var(list or str): String of name for precipitation variable name or list of precip variable names. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`.
lsm_precip_type(str): Type of precipitation. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`.
interpolation_type(str): Type of interpolation for LSM precipitation. Can be "INV_DISTANCE" or "THIESSEN". Default is "THIESSEN". | [
"Prepares",
"Gage",
"output",
"for",
"GSSHA",
"simulation"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L222-L253 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | Event.prepare_rapid_streamflow | def prepare_rapid_streamflow(self, path_to_rapid_qout, connection_list_file):
"""
Prepares RAPID streamflow for GSSHA simulation
"""
ihg_filename = '{0}.ihg'.format(self.project_manager.name)
with tmp_chdir(self.project_manager.project_directory):
# write out IHG file
time_index_range = []
with RAPIDDataset(path_to_rapid_qout, out_tzinfo=self.tz) as qout_nc:
time_index_range = qout_nc.get_time_index_range(date_search_start=self.simulation_start,
date_search_end=self.simulation_end)
if len(time_index_range) > 0:
time_array = qout_nc.get_time_array(return_datetime=True,
time_index_array=time_index_range)
# GSSHA STARTS INGESTING STREAMFLOW AT SECOND TIME STEP
if self.simulation_start is not None:
if self.simulation_start == time_array[0]:
log.warning("First timestep of streamflow skipped "
"in order for GSSHA to capture the streamflow.")
time_index_range = time_index_range[1:]
time_array = time_array[1:]
if len(time_index_range) > 0:
start_datetime = time_array[0]
if self.simulation_start is None:
self._update_simulation_start(start_datetime)
if self.simulation_end is None:
self.simulation_end = time_array[-1]
qout_nc.write_flows_to_gssha_time_series_ihg(ihg_filename,
connection_list_file,
date_search_start=start_datetime,
date_search_end=self.simulation_end,
)
else:
log.warning("No streamflow values found in time range ...")
if len(time_index_range) > 0:
# update cards
self._update_simulation_start_cards()
self._update_card("END_TIME", self.simulation_end.strftime("%Y %m %d %H %M"))
self._update_card("CHAN_POINT_INPUT", ihg_filename, True)
# update duration
self.set_simulation_duration(self.simulation_end-self.simulation_start)
# UPDATE GMT CARD
self._update_gmt()
else:
# cleanup
os.remove(ihg_filename)
self.project_manager.deleteCard('CHAN_POINT_INPUT', self.db_session) | python | def prepare_rapid_streamflow(self, path_to_rapid_qout, connection_list_file):
"""
Prepares RAPID streamflow for GSSHA simulation
"""
ihg_filename = '{0}.ihg'.format(self.project_manager.name)
with tmp_chdir(self.project_manager.project_directory):
# write out IHG file
time_index_range = []
with RAPIDDataset(path_to_rapid_qout, out_tzinfo=self.tz) as qout_nc:
time_index_range = qout_nc.get_time_index_range(date_search_start=self.simulation_start,
date_search_end=self.simulation_end)
if len(time_index_range) > 0:
time_array = qout_nc.get_time_array(return_datetime=True,
time_index_array=time_index_range)
# GSSHA STARTS INGESTING STREAMFLOW AT SECOND TIME STEP
if self.simulation_start is not None:
if self.simulation_start == time_array[0]:
log.warning("First timestep of streamflow skipped "
"in order for GSSHA to capture the streamflow.")
time_index_range = time_index_range[1:]
time_array = time_array[1:]
if len(time_index_range) > 0:
start_datetime = time_array[0]
if self.simulation_start is None:
self._update_simulation_start(start_datetime)
if self.simulation_end is None:
self.simulation_end = time_array[-1]
qout_nc.write_flows_to_gssha_time_series_ihg(ihg_filename,
connection_list_file,
date_search_start=start_datetime,
date_search_end=self.simulation_end,
)
else:
log.warning("No streamflow values found in time range ...")
if len(time_index_range) > 0:
# update cards
self._update_simulation_start_cards()
self._update_card("END_TIME", self.simulation_end.strftime("%Y %m %d %H %M"))
self._update_card("CHAN_POINT_INPUT", ihg_filename, True)
# update duration
self.set_simulation_duration(self.simulation_end-self.simulation_start)
# UPDATE GMT CARD
self._update_gmt()
else:
# cleanup
os.remove(ihg_filename)
self.project_manager.deleteCard('CHAN_POINT_INPUT', self.db_session) | [
"def",
"prepare_rapid_streamflow",
"(",
"self",
",",
"path_to_rapid_qout",
",",
"connection_list_file",
")",
":",
"ihg_filename",
"=",
"'{0}.ihg'",
".",
"format",
"(",
"self",
".",
"project_manager",
".",
"name",
")",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_manager",
".",
"project_directory",
")",
":",
"# write out IHG file",
"time_index_range",
"=",
"[",
"]",
"with",
"RAPIDDataset",
"(",
"path_to_rapid_qout",
",",
"out_tzinfo",
"=",
"self",
".",
"tz",
")",
"as",
"qout_nc",
":",
"time_index_range",
"=",
"qout_nc",
".",
"get_time_index_range",
"(",
"date_search_start",
"=",
"self",
".",
"simulation_start",
",",
"date_search_end",
"=",
"self",
".",
"simulation_end",
")",
"if",
"len",
"(",
"time_index_range",
")",
">",
"0",
":",
"time_array",
"=",
"qout_nc",
".",
"get_time_array",
"(",
"return_datetime",
"=",
"True",
",",
"time_index_array",
"=",
"time_index_range",
")",
"# GSSHA STARTS INGESTING STREAMFLOW AT SECOND TIME STEP",
"if",
"self",
".",
"simulation_start",
"is",
"not",
"None",
":",
"if",
"self",
".",
"simulation_start",
"==",
"time_array",
"[",
"0",
"]",
":",
"log",
".",
"warning",
"(",
"\"First timestep of streamflow skipped \"",
"\"in order for GSSHA to capture the streamflow.\"",
")",
"time_index_range",
"=",
"time_index_range",
"[",
"1",
":",
"]",
"time_array",
"=",
"time_array",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"time_index_range",
")",
">",
"0",
":",
"start_datetime",
"=",
"time_array",
"[",
"0",
"]",
"if",
"self",
".",
"simulation_start",
"is",
"None",
":",
"self",
".",
"_update_simulation_start",
"(",
"start_datetime",
")",
"if",
"self",
".",
"simulation_end",
"is",
"None",
":",
"self",
".",
"simulation_end",
"=",
"time_array",
"[",
"-",
"1",
"]",
"qout_nc",
".",
"write_flows_to_gssha_time_series_ihg",
"(",
"ihg_filename",
",",
"connection_list_file",
",",
"date_search_start",
"=",
"start_datetime",
",",
"date_search_end",
"=",
"self",
".",
"simulation_end",
",",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"No streamflow values found in time range ...\"",
")",
"if",
"len",
"(",
"time_index_range",
")",
">",
"0",
":",
"# update cards",
"self",
".",
"_update_simulation_start_cards",
"(",
")",
"self",
".",
"_update_card",
"(",
"\"END_TIME\"",
",",
"self",
".",
"simulation_end",
".",
"strftime",
"(",
"\"%Y %m %d %H %M\"",
")",
")",
"self",
".",
"_update_card",
"(",
"\"CHAN_POINT_INPUT\"",
",",
"ihg_filename",
",",
"True",
")",
"# update duration",
"self",
".",
"set_simulation_duration",
"(",
"self",
".",
"simulation_end",
"-",
"self",
".",
"simulation_start",
")",
"# UPDATE GMT CARD",
"self",
".",
"_update_gmt",
"(",
")",
"else",
":",
"# cleanup",
"os",
".",
"remove",
"(",
"ihg_filename",
")",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"'CHAN_POINT_INPUT'",
",",
"self",
".",
"db_session",
")"
] | Prepares RAPID streamflow for GSSHA simulation | [
"Prepares",
"RAPID",
"streamflow",
"for",
"GSSHA",
"simulation"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L255-L312 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | EventMode.add_uniform_precip_event | def add_uniform_precip_event(self, intensity, duration):
"""
Add a uniform precip event
"""
self.project_manager.setCard('PRECIP_UNIF', '')
self.project_manager.setCard('RAIN_INTENSITY', str(intensity))
self.project_manager.setCard('RAIN_DURATION', str(duration.total_seconds()/60.0)) | python | def add_uniform_precip_event(self, intensity, duration):
"""
Add a uniform precip event
"""
self.project_manager.setCard('PRECIP_UNIF', '')
self.project_manager.setCard('RAIN_INTENSITY', str(intensity))
self.project_manager.setCard('RAIN_DURATION', str(duration.total_seconds()/60.0)) | [
"def",
"add_uniform_precip_event",
"(",
"self",
",",
"intensity",
",",
"duration",
")",
":",
"self",
".",
"project_manager",
".",
"setCard",
"(",
"'PRECIP_UNIF'",
",",
"''",
")",
"self",
".",
"project_manager",
".",
"setCard",
"(",
"'RAIN_INTENSITY'",
",",
"str",
"(",
"intensity",
")",
")",
"self",
".",
"project_manager",
".",
"setCard",
"(",
"'RAIN_DURATION'",
",",
"str",
"(",
"duration",
".",
"total_seconds",
"(",
")",
"/",
"60.0",
")",
")"
] | Add a uniform precip event | [
"Add",
"a",
"uniform",
"precip",
"event"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L383-L389 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | LongTermMode._update_gmt | def _update_gmt(self):
"""
Based on timezone and start date, the GMT card is updated
"""
if self.simulation_start is not None:
# NOTE: Because of daylight savings time,
# offset result depends on time of the year
offset_string = str(self.simulation_start.replace(tzinfo=self.tz)
.utcoffset().total_seconds()/3600.)
self._update_card('GMT', offset_string) | python | def _update_gmt(self):
"""
Based on timezone and start date, the GMT card is updated
"""
if self.simulation_start is not None:
# NOTE: Because of daylight savings time,
# offset result depends on time of the year
offset_string = str(self.simulation_start.replace(tzinfo=self.tz)
.utcoffset().total_seconds()/3600.)
self._update_card('GMT', offset_string) | [
"def",
"_update_gmt",
"(",
"self",
")",
":",
"if",
"self",
".",
"simulation_start",
"is",
"not",
"None",
":",
"# NOTE: Because of daylight savings time,",
"# offset result depends on time of the year",
"offset_string",
"=",
"str",
"(",
"self",
".",
"simulation_start",
".",
"replace",
"(",
"tzinfo",
"=",
"self",
".",
"tz",
")",
".",
"utcoffset",
"(",
")",
".",
"total_seconds",
"(",
")",
"/",
"3600.",
")",
"self",
".",
"_update_card",
"(",
"'GMT'",
",",
"offset_string",
")"
] | Based on timezone and start date, the GMT card is updated | [
"Based",
"on",
"timezone",
"and",
"start",
"date",
"the",
"GMT",
"card",
"is",
"updated"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L497-L506 | train |
CI-WATER/gsshapy | gsshapy/modeling/event.py | LongTermMode.prepare_hmet_lsm | def prepare_hmet_lsm(self, lsm_data_var_map_array,
hmet_ascii_output_folder=None,
netcdf_file_path=None):
"""
Prepares HMET data for GSSHA simulation from land surface model data.
Parameters:
lsm_data_var_map_array(str): Array with connections for LSM output and GSSHA input. See: :func:`~gsshapy.grid.GRIDtoGSSHA.`
hmet_ascii_output_folder(Optional[str]): Path to diretory to output HMET ASCII files. Mutually exclusice with netcdf_file_path. Default is None.
netcdf_file_path(Optional[str]): If you want the HMET data output as a NetCDF4 file for input to GSSHA. Mutually exclusice with hmet_ascii_output_folder. Default is None.
"""
if self.l2g is None:
raise ValueError("LSM converter not loaded ...")
with tmp_chdir(self.project_manager.project_directory):
# GSSHA simulation does not work after HMET data is finished
self._update_simulation_end_from_lsm()
# HMET CARDS
if netcdf_file_path is not None:
self.l2g.lsm_data_to_subset_netcdf(netcdf_file_path, lsm_data_var_map_array)
self._update_card("HMET_NETCDF", netcdf_file_path, True)
self.project_manager.deleteCard('HMET_ASCII', self.db_session)
else:
if "{0}" in hmet_ascii_output_folder and "{1}" in hmet_ascii_output_folder:
hmet_ascii_output_folder = hmet_ascii_output_folder.format(self.simulation_start.strftime("%Y%m%d%H%M"),
self.simulation_end.strftime("%Y%m%d%H%M"))
self.l2g.lsm_data_to_arc_ascii(lsm_data_var_map_array,
main_output_folder=os.path.join(self.gssha_directory,
hmet_ascii_output_folder))
self._update_card("HMET_ASCII", os.path.join(hmet_ascii_output_folder, 'hmet_file_list.txt'), True)
self.project_manager.deleteCard('HMET_NETCDF', self.db_session)
# UPDATE GMT CARD
self._update_gmt() | python | def prepare_hmet_lsm(self, lsm_data_var_map_array,
hmet_ascii_output_folder=None,
netcdf_file_path=None):
"""
Prepares HMET data for GSSHA simulation from land surface model data.
Parameters:
lsm_data_var_map_array(str): Array with connections for LSM output and GSSHA input. See: :func:`~gsshapy.grid.GRIDtoGSSHA.`
hmet_ascii_output_folder(Optional[str]): Path to diretory to output HMET ASCII files. Mutually exclusice with netcdf_file_path. Default is None.
netcdf_file_path(Optional[str]): If you want the HMET data output as a NetCDF4 file for input to GSSHA. Mutually exclusice with hmet_ascii_output_folder. Default is None.
"""
if self.l2g is None:
raise ValueError("LSM converter not loaded ...")
with tmp_chdir(self.project_manager.project_directory):
# GSSHA simulation does not work after HMET data is finished
self._update_simulation_end_from_lsm()
# HMET CARDS
if netcdf_file_path is not None:
self.l2g.lsm_data_to_subset_netcdf(netcdf_file_path, lsm_data_var_map_array)
self._update_card("HMET_NETCDF", netcdf_file_path, True)
self.project_manager.deleteCard('HMET_ASCII', self.db_session)
else:
if "{0}" in hmet_ascii_output_folder and "{1}" in hmet_ascii_output_folder:
hmet_ascii_output_folder = hmet_ascii_output_folder.format(self.simulation_start.strftime("%Y%m%d%H%M"),
self.simulation_end.strftime("%Y%m%d%H%M"))
self.l2g.lsm_data_to_arc_ascii(lsm_data_var_map_array,
main_output_folder=os.path.join(self.gssha_directory,
hmet_ascii_output_folder))
self._update_card("HMET_ASCII", os.path.join(hmet_ascii_output_folder, 'hmet_file_list.txt'), True)
self.project_manager.deleteCard('HMET_NETCDF', self.db_session)
# UPDATE GMT CARD
self._update_gmt() | [
"def",
"prepare_hmet_lsm",
"(",
"self",
",",
"lsm_data_var_map_array",
",",
"hmet_ascii_output_folder",
"=",
"None",
",",
"netcdf_file_path",
"=",
"None",
")",
":",
"if",
"self",
".",
"l2g",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"LSM converter not loaded ...\"",
")",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_manager",
".",
"project_directory",
")",
":",
"# GSSHA simulation does not work after HMET data is finished",
"self",
".",
"_update_simulation_end_from_lsm",
"(",
")",
"# HMET CARDS",
"if",
"netcdf_file_path",
"is",
"not",
"None",
":",
"self",
".",
"l2g",
".",
"lsm_data_to_subset_netcdf",
"(",
"netcdf_file_path",
",",
"lsm_data_var_map_array",
")",
"self",
".",
"_update_card",
"(",
"\"HMET_NETCDF\"",
",",
"netcdf_file_path",
",",
"True",
")",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"'HMET_ASCII'",
",",
"self",
".",
"db_session",
")",
"else",
":",
"if",
"\"{0}\"",
"in",
"hmet_ascii_output_folder",
"and",
"\"{1}\"",
"in",
"hmet_ascii_output_folder",
":",
"hmet_ascii_output_folder",
"=",
"hmet_ascii_output_folder",
".",
"format",
"(",
"self",
".",
"simulation_start",
".",
"strftime",
"(",
"\"%Y%m%d%H%M\"",
")",
",",
"self",
".",
"simulation_end",
".",
"strftime",
"(",
"\"%Y%m%d%H%M\"",
")",
")",
"self",
".",
"l2g",
".",
"lsm_data_to_arc_ascii",
"(",
"lsm_data_var_map_array",
",",
"main_output_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gssha_directory",
",",
"hmet_ascii_output_folder",
")",
")",
"self",
".",
"_update_card",
"(",
"\"HMET_ASCII\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"hmet_ascii_output_folder",
",",
"'hmet_file_list.txt'",
")",
",",
"True",
")",
"self",
".",
"project_manager",
".",
"deleteCard",
"(",
"'HMET_NETCDF'",
",",
"self",
".",
"db_session",
")",
"# UPDATE GMT CARD",
"self",
".",
"_update_gmt",
"(",
")"
] | Prepares HMET data for GSSHA simulation from land surface model data.
Parameters:
lsm_data_var_map_array(str): Array with connections for LSM output and GSSHA input. See: :func:`~gsshapy.grid.GRIDtoGSSHA.`
hmet_ascii_output_folder(Optional[str]): Path to diretory to output HMET ASCII files. Mutually exclusice with netcdf_file_path. Default is None.
netcdf_file_path(Optional[str]): If you want the HMET data output as a NetCDF4 file for input to GSSHA. Mutually exclusice with hmet_ascii_output_folder. Default is None. | [
"Prepares",
"HMET",
"data",
"for",
"GSSHA",
"simulation",
"from",
"land",
"surface",
"model",
"data",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/modeling/event.py#L508-L542 | train |
Robpol86/etaprogress | etaprogress/components/misc.py | get_remaining_width | def get_remaining_width(sample_string, max_terminal_width=None):
"""Returns the number of characters available if sample string were to be printed in the terminal.
Positional arguments:
sample_string -- gets the length of this string.
Keyword arguments:
max_terminal_width -- limit the overall width of everything to these many characters.
Returns:
Integer.
"""
if max_terminal_width is not None:
available_width = min(terminal_width(), max_terminal_width)
else:
available_width = terminal_width()
return available_width - len(sample_string) | python | def get_remaining_width(sample_string, max_terminal_width=None):
"""Returns the number of characters available if sample string were to be printed in the terminal.
Positional arguments:
sample_string -- gets the length of this string.
Keyword arguments:
max_terminal_width -- limit the overall width of everything to these many characters.
Returns:
Integer.
"""
if max_terminal_width is not None:
available_width = min(terminal_width(), max_terminal_width)
else:
available_width = terminal_width()
return available_width - len(sample_string) | [
"def",
"get_remaining_width",
"(",
"sample_string",
",",
"max_terminal_width",
"=",
"None",
")",
":",
"if",
"max_terminal_width",
"is",
"not",
"None",
":",
"available_width",
"=",
"min",
"(",
"terminal_width",
"(",
")",
",",
"max_terminal_width",
")",
"else",
":",
"available_width",
"=",
"terminal_width",
"(",
")",
"return",
"available_width",
"-",
"len",
"(",
"sample_string",
")"
] | Returns the number of characters available if sample string were to be printed in the terminal.
Positional arguments:
sample_string -- gets the length of this string.
Keyword arguments:
max_terminal_width -- limit the overall width of everything to these many characters.
Returns:
Integer. | [
"Returns",
"the",
"number",
"of",
"characters",
"available",
"if",
"sample",
"string",
"were",
"to",
"be",
"printed",
"in",
"the",
"terminal",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/misc.py#L134-L150 | train |
Robpol86/etaprogress | etaprogress/components/misc.py | _WindowsCSBI._define_csbi | def _define_csbi():
"""Defines structs and populates _WindowsCSBI.CSBI."""
if _WindowsCSBI.CSBI is not None:
return
class COORD(ctypes.Structure):
"""Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119"""
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRECT(ctypes.Structure):
"""Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311"""
_fields_ = [('Left', ctypes.c_short), ('Top', ctypes.c_short), ('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class ConsoleScreenBufferInfo(ctypes.Structure):
"""Windows CONSOLE_SCREEN_BUFFER_INFO structure.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093
"""
_fields_ = [
('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', ctypes.wintypes.WORD),
('srWindow', SmallRECT),
('dwMaximumWindowSize', COORD)
]
_WindowsCSBI.CSBI = ConsoleScreenBufferInfo | python | def _define_csbi():
"""Defines structs and populates _WindowsCSBI.CSBI."""
if _WindowsCSBI.CSBI is not None:
return
class COORD(ctypes.Structure):
"""Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119"""
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRECT(ctypes.Structure):
"""Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311"""
_fields_ = [('Left', ctypes.c_short), ('Top', ctypes.c_short), ('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class ConsoleScreenBufferInfo(ctypes.Structure):
"""Windows CONSOLE_SCREEN_BUFFER_INFO structure.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093
"""
_fields_ = [
('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', ctypes.wintypes.WORD),
('srWindow', SmallRECT),
('dwMaximumWindowSize', COORD)
]
_WindowsCSBI.CSBI = ConsoleScreenBufferInfo | [
"def",
"_define_csbi",
"(",
")",
":",
"if",
"_WindowsCSBI",
".",
"CSBI",
"is",
"not",
"None",
":",
"return",
"class",
"COORD",
"(",
"ctypes",
".",
"Structure",
")",
":",
"\"\"\"Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119\"\"\"",
"_fields_",
"=",
"[",
"(",
"'X'",
",",
"ctypes",
".",
"c_short",
")",
",",
"(",
"'Y'",
",",
"ctypes",
".",
"c_short",
")",
"]",
"class",
"SmallRECT",
"(",
"ctypes",
".",
"Structure",
")",
":",
"\"\"\"Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311\"\"\"",
"_fields_",
"=",
"[",
"(",
"'Left'",
",",
"ctypes",
".",
"c_short",
")",
",",
"(",
"'Top'",
",",
"ctypes",
".",
"c_short",
")",
",",
"(",
"'Right'",
",",
"ctypes",
".",
"c_short",
")",
",",
"(",
"'Bottom'",
",",
"ctypes",
".",
"c_short",
")",
"]",
"class",
"ConsoleScreenBufferInfo",
"(",
"ctypes",
".",
"Structure",
")",
":",
"\"\"\"Windows CONSOLE_SCREEN_BUFFER_INFO structure.\n http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093\n \"\"\"",
"_fields_",
"=",
"[",
"(",
"'dwSize'",
",",
"COORD",
")",
",",
"(",
"'dwCursorPosition'",
",",
"COORD",
")",
",",
"(",
"'wAttributes'",
",",
"ctypes",
".",
"wintypes",
".",
"WORD",
")",
",",
"(",
"'srWindow'",
",",
"SmallRECT",
")",
",",
"(",
"'dwMaximumWindowSize'",
",",
"COORD",
")",
"]",
"_WindowsCSBI",
".",
"CSBI",
"=",
"ConsoleScreenBufferInfo"
] | Defines structs and populates _WindowsCSBI.CSBI. | [
"Defines",
"structs",
"and",
"populates",
"_WindowsCSBI",
".",
"CSBI",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/misc.py#L39-L65 | train |
Robpol86/etaprogress | etaprogress/components/misc.py | _WindowsCSBI.initialize | def initialize():
"""Initializes the WINDLL resource and populated the CSBI class variable."""
_WindowsCSBI._define_csbi()
_WindowsCSBI.HANDLE_STDERR = _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-12)
_WindowsCSBI.HANDLE_STDOUT = _WindowsCSBI.HANDLE_STDOUT or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-11)
if _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes:
return
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.argtypes = [ctypes.wintypes.DWORD]
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.restype = ctypes.wintypes.HANDLE
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.restype = ctypes.wintypes.BOOL
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes = [
ctypes.wintypes.HANDLE, ctypes.POINTER(_WindowsCSBI.CSBI)
] | python | def initialize():
"""Initializes the WINDLL resource and populated the CSBI class variable."""
_WindowsCSBI._define_csbi()
_WindowsCSBI.HANDLE_STDERR = _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-12)
_WindowsCSBI.HANDLE_STDOUT = _WindowsCSBI.HANDLE_STDOUT or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-11)
if _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes:
return
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.argtypes = [ctypes.wintypes.DWORD]
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.restype = ctypes.wintypes.HANDLE
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.restype = ctypes.wintypes.BOOL
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes = [
ctypes.wintypes.HANDLE, ctypes.POINTER(_WindowsCSBI.CSBI)
] | [
"def",
"initialize",
"(",
")",
":",
"_WindowsCSBI",
".",
"_define_csbi",
"(",
")",
"_WindowsCSBI",
".",
"HANDLE_STDERR",
"=",
"_WindowsCSBI",
".",
"HANDLE_STDERR",
"or",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
"(",
"-",
"12",
")",
"_WindowsCSBI",
".",
"HANDLE_STDOUT",
"=",
"_WindowsCSBI",
".",
"HANDLE_STDOUT",
"or",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
"(",
"-",
"11",
")",
"if",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
".",
"argtypes",
":",
"return",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
".",
"argtypes",
"=",
"[",
"ctypes",
".",
"wintypes",
".",
"DWORD",
"]",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
".",
"restype",
"=",
"ctypes",
".",
"wintypes",
".",
"HANDLE",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
".",
"restype",
"=",
"ctypes",
".",
"wintypes",
".",
"BOOL",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
".",
"argtypes",
"=",
"[",
"ctypes",
".",
"wintypes",
".",
"HANDLE",
",",
"ctypes",
".",
"POINTER",
"(",
"_WindowsCSBI",
".",
"CSBI",
")",
"]"
] | Initializes the WINDLL resource and populated the CSBI class variable. | [
"Initializes",
"the",
"WINDLL",
"resource",
"and",
"populated",
"the",
"CSBI",
"class",
"variable",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/misc.py#L68-L81 | train |
churchill-lab/gbrs | gbrs/emase_utils.py | stencil | def stencil(**kwargs):
"""
Applying genotype calls to multi-way alignment incidence matrix
:param alnfile: alignment incidence file (h5),
:param gtypefile: genotype calls by GBRS (tsv),
:param grpfile: gene ID to isoform ID mapping info (tsv)
:return: genotyped version of alignment incidence file (h5)
"""
alnfile = kwargs.get('alnfile')
gtypefile = kwargs.get('gtypefile')
grpfile = kwargs.get('grpfile')
if grpfile is None:
grpfile2chk = os.path.join(DATA_DIR, 'ref.gene2transcripts.tsv')
if os.path.exists(grpfile2chk):
grpfile = grpfile2chk
else:
print >> sys.stderr, '[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.'
# Load alignment incidence matrix ('alnfile' is assumed to be in multiway transcriptome)
alnmat = emase.AlignmentPropertyMatrix(h5file=alnfile, grpfile=grpfile)
# Load genotype calls
hid = dict(zip(alnmat.hname, np.arange(alnmat.num_haplotypes)))
gid = dict(zip(alnmat.gname, np.arange(len(alnmat.gname))))
gtmask = np.zeros((alnmat.num_haplotypes, alnmat.num_loci))
gtcall_g = dict.fromkeys(alnmat.gname)
with open(gtypefile) as fh:
if grpfile is not None:
gtcall_t = dict.fromkeys(alnmat.lname)
for curline in dropwhile(is_comment, fh):
item = curline.rstrip().split("\t")
g, gt = item[:2]
gtcall_g[g] = gt
hid2set = np.array([hid[c] for c in gt])
tid2set = np.array(alnmat.groups[gid[g]])
gtmask[np.meshgrid(hid2set, tid2set)] = 1.0
for t in tid2set:
gtcall_t[alnmat.lname[t]] = gt
else:
for curline in dropwhile(is_comment, fh):
item = curline.rstrip().split("\t")
g, gt = item[:2]
gtcall_g[g] = gt
hid2set = np.array([hid[c] for c in gt])
gtmask[np.meshgrid(hid2set, gid[g])] = 1.0
alnmat.multiply(gtmask, axis=2)
for h in xrange(alnmat.num_haplotypes):
alnmat.data[h].eliminate_zeros()
outfile = kwargs.get('outfile')
if outfile is None:
outfile = 'gbrs.stenciled.' + os.path.basename(alnfile)
alnmat.save(h5file=outfile) | python | def stencil(**kwargs):
"""
Applying genotype calls to multi-way alignment incidence matrix
:param alnfile: alignment incidence file (h5),
:param gtypefile: genotype calls by GBRS (tsv),
:param grpfile: gene ID to isoform ID mapping info (tsv)
:return: genotyped version of alignment incidence file (h5)
"""
alnfile = kwargs.get('alnfile')
gtypefile = kwargs.get('gtypefile')
grpfile = kwargs.get('grpfile')
if grpfile is None:
grpfile2chk = os.path.join(DATA_DIR, 'ref.gene2transcripts.tsv')
if os.path.exists(grpfile2chk):
grpfile = grpfile2chk
else:
print >> sys.stderr, '[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.'
# Load alignment incidence matrix ('alnfile' is assumed to be in multiway transcriptome)
alnmat = emase.AlignmentPropertyMatrix(h5file=alnfile, grpfile=grpfile)
# Load genotype calls
hid = dict(zip(alnmat.hname, np.arange(alnmat.num_haplotypes)))
gid = dict(zip(alnmat.gname, np.arange(len(alnmat.gname))))
gtmask = np.zeros((alnmat.num_haplotypes, alnmat.num_loci))
gtcall_g = dict.fromkeys(alnmat.gname)
with open(gtypefile) as fh:
if grpfile is not None:
gtcall_t = dict.fromkeys(alnmat.lname)
for curline in dropwhile(is_comment, fh):
item = curline.rstrip().split("\t")
g, gt = item[:2]
gtcall_g[g] = gt
hid2set = np.array([hid[c] for c in gt])
tid2set = np.array(alnmat.groups[gid[g]])
gtmask[np.meshgrid(hid2set, tid2set)] = 1.0
for t in tid2set:
gtcall_t[alnmat.lname[t]] = gt
else:
for curline in dropwhile(is_comment, fh):
item = curline.rstrip().split("\t")
g, gt = item[:2]
gtcall_g[g] = gt
hid2set = np.array([hid[c] for c in gt])
gtmask[np.meshgrid(hid2set, gid[g])] = 1.0
alnmat.multiply(gtmask, axis=2)
for h in xrange(alnmat.num_haplotypes):
alnmat.data[h].eliminate_zeros()
outfile = kwargs.get('outfile')
if outfile is None:
outfile = 'gbrs.stenciled.' + os.path.basename(alnfile)
alnmat.save(h5file=outfile) | [
"def",
"stencil",
"(",
"*",
"*",
"kwargs",
")",
":",
"alnfile",
"=",
"kwargs",
".",
"get",
"(",
"'alnfile'",
")",
"gtypefile",
"=",
"kwargs",
".",
"get",
"(",
"'gtypefile'",
")",
"grpfile",
"=",
"kwargs",
".",
"get",
"(",
"'grpfile'",
")",
"if",
"grpfile",
"is",
"None",
":",
"grpfile2chk",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DATA_DIR",
",",
"'ref.gene2transcripts.tsv'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"grpfile2chk",
")",
":",
"grpfile",
"=",
"grpfile2chk",
"else",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"'[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.'",
"# Load alignment incidence matrix ('alnfile' is assumed to be in multiway transcriptome)",
"alnmat",
"=",
"emase",
".",
"AlignmentPropertyMatrix",
"(",
"h5file",
"=",
"alnfile",
",",
"grpfile",
"=",
"grpfile",
")",
"# Load genotype calls",
"hid",
"=",
"dict",
"(",
"zip",
"(",
"alnmat",
".",
"hname",
",",
"np",
".",
"arange",
"(",
"alnmat",
".",
"num_haplotypes",
")",
")",
")",
"gid",
"=",
"dict",
"(",
"zip",
"(",
"alnmat",
".",
"gname",
",",
"np",
".",
"arange",
"(",
"len",
"(",
"alnmat",
".",
"gname",
")",
")",
")",
")",
"gtmask",
"=",
"np",
".",
"zeros",
"(",
"(",
"alnmat",
".",
"num_haplotypes",
",",
"alnmat",
".",
"num_loci",
")",
")",
"gtcall_g",
"=",
"dict",
".",
"fromkeys",
"(",
"alnmat",
".",
"gname",
")",
"with",
"open",
"(",
"gtypefile",
")",
"as",
"fh",
":",
"if",
"grpfile",
"is",
"not",
"None",
":",
"gtcall_t",
"=",
"dict",
".",
"fromkeys",
"(",
"alnmat",
".",
"lname",
")",
"for",
"curline",
"in",
"dropwhile",
"(",
"is_comment",
",",
"fh",
")",
":",
"item",
"=",
"curline",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"g",
",",
"gt",
"=",
"item",
"[",
":",
"2",
"]",
"gtcall_g",
"[",
"g",
"]",
"=",
"gt",
"hid2set",
"=",
"np",
".",
"array",
"(",
"[",
"hid",
"[",
"c",
"]",
"for",
"c",
"in",
"gt",
"]",
")",
"tid2set",
"=",
"np",
".",
"array",
"(",
"alnmat",
".",
"groups",
"[",
"gid",
"[",
"g",
"]",
"]",
")",
"gtmask",
"[",
"np",
".",
"meshgrid",
"(",
"hid2set",
",",
"tid2set",
")",
"]",
"=",
"1.0",
"for",
"t",
"in",
"tid2set",
":",
"gtcall_t",
"[",
"alnmat",
".",
"lname",
"[",
"t",
"]",
"]",
"=",
"gt",
"else",
":",
"for",
"curline",
"in",
"dropwhile",
"(",
"is_comment",
",",
"fh",
")",
":",
"item",
"=",
"curline",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"g",
",",
"gt",
"=",
"item",
"[",
":",
"2",
"]",
"gtcall_g",
"[",
"g",
"]",
"=",
"gt",
"hid2set",
"=",
"np",
".",
"array",
"(",
"[",
"hid",
"[",
"c",
"]",
"for",
"c",
"in",
"gt",
"]",
")",
"gtmask",
"[",
"np",
".",
"meshgrid",
"(",
"hid2set",
",",
"gid",
"[",
"g",
"]",
")",
"]",
"=",
"1.0",
"alnmat",
".",
"multiply",
"(",
"gtmask",
",",
"axis",
"=",
"2",
")",
"for",
"h",
"in",
"xrange",
"(",
"alnmat",
".",
"num_haplotypes",
")",
":",
"alnmat",
".",
"data",
"[",
"h",
"]",
".",
"eliminate_zeros",
"(",
")",
"outfile",
"=",
"kwargs",
".",
"get",
"(",
"'outfile'",
")",
"if",
"outfile",
"is",
"None",
":",
"outfile",
"=",
"'gbrs.stenciled.'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"alnfile",
")",
"alnmat",
".",
"save",
"(",
"h5file",
"=",
"outfile",
")"
] | Applying genotype calls to multi-way alignment incidence matrix
:param alnfile: alignment incidence file (h5),
:param gtypefile: genotype calls by GBRS (tsv),
:param grpfile: gene ID to isoform ID mapping info (tsv)
:return: genotyped version of alignment incidence file (h5) | [
"Applying",
"genotype",
"calls",
"to",
"multi",
"-",
"way",
"alignment",
"incidence",
"matrix"
] | 0f32d2620e82cb1459e56083af7c6e5c72d6ea88 | https://github.com/churchill-lab/gbrs/blob/0f32d2620e82cb1459e56083af7c6e5c72d6ea88/gbrs/emase_utils.py#L160-L213 | train |
hellupline/flask-manager | flask_manager/tree.py | Tree.register_items | def register_items(self, items):
"""Bulk ``register_item``.
Args:
items (iterable[Tree]):
Sequence of nodes to be registered as children.
"""
for item in items:
item.set_parent(self)
self.items.extend(items) | python | def register_items(self, items):
"""Bulk ``register_item``.
Args:
items (iterable[Tree]):
Sequence of nodes to be registered as children.
"""
for item in items:
item.set_parent(self)
self.items.extend(items) | [
"def",
"register_items",
"(",
"self",
",",
"items",
")",
":",
"for",
"item",
"in",
"items",
":",
"item",
".",
"set_parent",
"(",
"self",
")",
"self",
".",
"items",
".",
"extend",
"(",
"items",
")"
] | Bulk ``register_item``.
Args:
items (iterable[Tree]):
Sequence of nodes to be registered as children. | [
"Bulk",
"register_item",
"."
] | 70e48309f73aacf55f5c37b43165791ae1cf6861 | https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L38-L47 | train |
hellupline/flask-manager | flask_manager/tree.py | Tree.endpoints | def endpoints(self):
"""
Get all the endpoints under this node in a tree like structure.
Returns:
(tuple):
name (str): This node's name.
endpoint (str): Endpoint name relative to root.
children (list): ``child.endpoints for each child
"""
children = [item.endpoints() for item in self.items]
return self.name, self.endpoint, children | python | def endpoints(self):
"""
Get all the endpoints under this node in a tree like structure.
Returns:
(tuple):
name (str): This node's name.
endpoint (str): Endpoint name relative to root.
children (list): ``child.endpoints for each child
"""
children = [item.endpoints() for item in self.items]
return self.name, self.endpoint, children | [
"def",
"endpoints",
"(",
"self",
")",
":",
"children",
"=",
"[",
"item",
".",
"endpoints",
"(",
")",
"for",
"item",
"in",
"self",
".",
"items",
"]",
"return",
"self",
".",
"name",
",",
"self",
".",
"endpoint",
",",
"children"
] | Get all the endpoints under this node in a tree like structure.
Returns:
(tuple):
name (str): This node's name.
endpoint (str): Endpoint name relative to root.
children (list): ``child.endpoints for each child | [
"Get",
"all",
"the",
"endpoints",
"under",
"this",
"node",
"in",
"a",
"tree",
"like",
"structure",
"."
] | 70e48309f73aacf55f5c37b43165791ae1cf6861 | https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L74-L86 | train |
hellupline/flask-manager | flask_manager/tree.py | Tree.absolute_name | def absolute_name(self):
"""Get the absolute name of ``self``.
Returns:
str: the absolute name.
"""
if self.is_root() or self.parent.is_root():
return utils.slugify(self.name)
return ':'.join([self.parent.absolute_name, utils.slugify(self.name)]) | python | def absolute_name(self):
"""Get the absolute name of ``self``.
Returns:
str: the absolute name.
"""
if self.is_root() or self.parent.is_root():
return utils.slugify(self.name)
return ':'.join([self.parent.absolute_name, utils.slugify(self.name)]) | [
"def",
"absolute_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_root",
"(",
")",
"or",
"self",
".",
"parent",
".",
"is_root",
"(",
")",
":",
"return",
"utils",
".",
"slugify",
"(",
"self",
".",
"name",
")",
"return",
"':'",
".",
"join",
"(",
"[",
"self",
".",
"parent",
".",
"absolute_name",
",",
"utils",
".",
"slugify",
"(",
"self",
".",
"name",
")",
"]",
")"
] | Get the absolute name of ``self``.
Returns:
str: the absolute name. | [
"Get",
"the",
"absolute",
"name",
"of",
"self",
"."
] | 70e48309f73aacf55f5c37b43165791ae1cf6861 | https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L103-L111 | train |
hellupline/flask-manager | flask_manager/tree.py | Tree.absolute_url | def absolute_url(self):
"""Get the absolute url of ``self``.
Returns:
str: the absolute url.
"""
if self.is_root():
return utils.concat_urls(self.url)
return utils.concat_urls(self.parent.absolute_url, self.url) | python | def absolute_url(self):
"""Get the absolute url of ``self``.
Returns:
str: the absolute url.
"""
if self.is_root():
return utils.concat_urls(self.url)
return utils.concat_urls(self.parent.absolute_url, self.url) | [
"def",
"absolute_url",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_root",
"(",
")",
":",
"return",
"utils",
".",
"concat_urls",
"(",
"self",
".",
"url",
")",
"return",
"utils",
".",
"concat_urls",
"(",
"self",
".",
"parent",
".",
"absolute_url",
",",
"self",
".",
"url",
")"
] | Get the absolute url of ``self``.
Returns:
str: the absolute url. | [
"Get",
"the",
"absolute",
"url",
"of",
"self",
"."
] | 70e48309f73aacf55f5c37b43165791ae1cf6861 | https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/tree.py#L114-L122 | train |
theodoregoetz/wernher | wernher/map_view.py | MapView.split_tracks | def split_tracks(lat,lon,*args):
'''assumes eastward motion'''
tracks = []
lt,ln = [lat[0]],[lon[0]]
zz = [[z[0]] for z in args]
for i in range(1,len(lon)):
lt.append(lat[i])
for z,a in zip(zz,args):
z.append(a[i])
d1 = abs(lon[i] - lon[i-1])
d2 = abs((lon[i-1] + 360) - lon[i])
d3 = abs(lon[i-1] - (lon[i] + 360))
if d2 < d1:
ln.append(lon[i]-360)
tracks.append([np.array(lt),np.array(ln)] \
+ [np.array(z) for z in zz])
lt = [lat[i-1],lat[i]]
ln = [lon[i-1]+360,lon[i]]
zz = [[z[i-1]] for z in args]
elif d3 < d1:
ln.append(lon[i]+360)
tracks.append([np.array(lt),np.array(ln)] \
+ [np.array(z) for z in zz])
lt = [lat[i-1],lat[i]]
ln = [lon[i-1]-360,lon[i]]
zz = [[z[i-1],z[i]] for z in args]
else:
ln.append(lon[i])
if len(lt):
tracks.append([np.array(lt),np.array(ln)] \
+ [np.array(z) for z in zz])
return tracks | python | def split_tracks(lat,lon,*args):
'''assumes eastward motion'''
tracks = []
lt,ln = [lat[0]],[lon[0]]
zz = [[z[0]] for z in args]
for i in range(1,len(lon)):
lt.append(lat[i])
for z,a in zip(zz,args):
z.append(a[i])
d1 = abs(lon[i] - lon[i-1])
d2 = abs((lon[i-1] + 360) - lon[i])
d3 = abs(lon[i-1] - (lon[i] + 360))
if d2 < d1:
ln.append(lon[i]-360)
tracks.append([np.array(lt),np.array(ln)] \
+ [np.array(z) for z in zz])
lt = [lat[i-1],lat[i]]
ln = [lon[i-1]+360,lon[i]]
zz = [[z[i-1]] for z in args]
elif d3 < d1:
ln.append(lon[i]+360)
tracks.append([np.array(lt),np.array(ln)] \
+ [np.array(z) for z in zz])
lt = [lat[i-1],lat[i]]
ln = [lon[i-1]-360,lon[i]]
zz = [[z[i-1],z[i]] for z in args]
else:
ln.append(lon[i])
if len(lt):
tracks.append([np.array(lt),np.array(ln)] \
+ [np.array(z) for z in zz])
return tracks | [
"def",
"split_tracks",
"(",
"lat",
",",
"lon",
",",
"*",
"args",
")",
":",
"tracks",
"=",
"[",
"]",
"lt",
",",
"ln",
"=",
"[",
"lat",
"[",
"0",
"]",
"]",
",",
"[",
"lon",
"[",
"0",
"]",
"]",
"zz",
"=",
"[",
"[",
"z",
"[",
"0",
"]",
"]",
"for",
"z",
"in",
"args",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"lon",
")",
")",
":",
"lt",
".",
"append",
"(",
"lat",
"[",
"i",
"]",
")",
"for",
"z",
",",
"a",
"in",
"zip",
"(",
"zz",
",",
"args",
")",
":",
"z",
".",
"append",
"(",
"a",
"[",
"i",
"]",
")",
"d1",
"=",
"abs",
"(",
"lon",
"[",
"i",
"]",
"-",
"lon",
"[",
"i",
"-",
"1",
"]",
")",
"d2",
"=",
"abs",
"(",
"(",
"lon",
"[",
"i",
"-",
"1",
"]",
"+",
"360",
")",
"-",
"lon",
"[",
"i",
"]",
")",
"d3",
"=",
"abs",
"(",
"lon",
"[",
"i",
"-",
"1",
"]",
"-",
"(",
"lon",
"[",
"i",
"]",
"+",
"360",
")",
")",
"if",
"d2",
"<",
"d1",
":",
"ln",
".",
"append",
"(",
"lon",
"[",
"i",
"]",
"-",
"360",
")",
"tracks",
".",
"append",
"(",
"[",
"np",
".",
"array",
"(",
"lt",
")",
",",
"np",
".",
"array",
"(",
"ln",
")",
"]",
"+",
"[",
"np",
".",
"array",
"(",
"z",
")",
"for",
"z",
"in",
"zz",
"]",
")",
"lt",
"=",
"[",
"lat",
"[",
"i",
"-",
"1",
"]",
",",
"lat",
"[",
"i",
"]",
"]",
"ln",
"=",
"[",
"lon",
"[",
"i",
"-",
"1",
"]",
"+",
"360",
",",
"lon",
"[",
"i",
"]",
"]",
"zz",
"=",
"[",
"[",
"z",
"[",
"i",
"-",
"1",
"]",
"]",
"for",
"z",
"in",
"args",
"]",
"elif",
"d3",
"<",
"d1",
":",
"ln",
".",
"append",
"(",
"lon",
"[",
"i",
"]",
"+",
"360",
")",
"tracks",
".",
"append",
"(",
"[",
"np",
".",
"array",
"(",
"lt",
")",
",",
"np",
".",
"array",
"(",
"ln",
")",
"]",
"+",
"[",
"np",
".",
"array",
"(",
"z",
")",
"for",
"z",
"in",
"zz",
"]",
")",
"lt",
"=",
"[",
"lat",
"[",
"i",
"-",
"1",
"]",
",",
"lat",
"[",
"i",
"]",
"]",
"ln",
"=",
"[",
"lon",
"[",
"i",
"-",
"1",
"]",
"-",
"360",
",",
"lon",
"[",
"i",
"]",
"]",
"zz",
"=",
"[",
"[",
"z",
"[",
"i",
"-",
"1",
"]",
",",
"z",
"[",
"i",
"]",
"]",
"for",
"z",
"in",
"args",
"]",
"else",
":",
"ln",
".",
"append",
"(",
"lon",
"[",
"i",
"]",
")",
"if",
"len",
"(",
"lt",
")",
":",
"tracks",
".",
"append",
"(",
"[",
"np",
".",
"array",
"(",
"lt",
")",
",",
"np",
".",
"array",
"(",
"ln",
")",
"]",
"+",
"[",
"np",
".",
"array",
"(",
"z",
")",
"for",
"z",
"in",
"zz",
"]",
")",
"return",
"tracks"
] | assumes eastward motion | [
"assumes",
"eastward",
"motion"
] | ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e | https://github.com/theodoregoetz/wernher/blob/ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e/wernher/map_view.py#L56-L87 | train |
Robpol86/etaprogress | etaprogress/progress.py | ProgressBarWget.str_rate | def str_rate(self):
"""Returns the rate with formatting. If done, returns the overall rate instead."""
# Handle special cases.
if not self._eta.started or self._eta.stalled or not self.rate:
return '--.-KiB/s'
unit_rate, unit = UnitByte(self._eta.rate_overall if self.done else self.rate).auto
if unit_rate >= 100:
formatter = '%d'
elif unit_rate >= 10:
formatter = '%.1f'
else:
formatter = '%.2f'
return '{0}{1}/s'.format(locale.format(formatter, unit_rate, grouping=False), unit) | python | def str_rate(self):
"""Returns the rate with formatting. If done, returns the overall rate instead."""
# Handle special cases.
if not self._eta.started or self._eta.stalled or not self.rate:
return '--.-KiB/s'
unit_rate, unit = UnitByte(self._eta.rate_overall if self.done else self.rate).auto
if unit_rate >= 100:
formatter = '%d'
elif unit_rate >= 10:
formatter = '%.1f'
else:
formatter = '%.2f'
return '{0}{1}/s'.format(locale.format(formatter, unit_rate, grouping=False), unit) | [
"def",
"str_rate",
"(",
"self",
")",
":",
"# Handle special cases.",
"if",
"not",
"self",
".",
"_eta",
".",
"started",
"or",
"self",
".",
"_eta",
".",
"stalled",
"or",
"not",
"self",
".",
"rate",
":",
"return",
"'--.-KiB/s'",
"unit_rate",
",",
"unit",
"=",
"UnitByte",
"(",
"self",
".",
"_eta",
".",
"rate_overall",
"if",
"self",
".",
"done",
"else",
"self",
".",
"rate",
")",
".",
"auto",
"if",
"unit_rate",
">=",
"100",
":",
"formatter",
"=",
"'%d'",
"elif",
"unit_rate",
">=",
"10",
":",
"formatter",
"=",
"'%.1f'",
"else",
":",
"formatter",
"=",
"'%.2f'",
"return",
"'{0}{1}/s'",
".",
"format",
"(",
"locale",
".",
"format",
"(",
"formatter",
",",
"unit_rate",
",",
"grouping",
"=",
"False",
")",
",",
"unit",
")"
] | Returns the rate with formatting. If done, returns the overall rate instead. | [
"Returns",
"the",
"rate",
"with",
"formatting",
".",
"If",
"done",
"returns",
"the",
"overall",
"rate",
"instead",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/progress.py#L239-L252 | train |
Robpol86/etaprogress | etaprogress/progress.py | ProgressBarYum.str_rate | def str_rate(self):
"""Returns the rate with formatting."""
# Handle special cases.
if not self._eta.started or self._eta.stalled or not self.rate:
return '--- KiB/s'
unit_rate, unit = UnitByte(self.rate).auto_no_thousands
if unit_rate >= 10:
formatter = '%d'
else:
formatter = '%0.1f'
return '{0} {1}/s'.format(locale.format(formatter, unit_rate, grouping=False), unit) | python | def str_rate(self):
"""Returns the rate with formatting."""
# Handle special cases.
if not self._eta.started or self._eta.stalled or not self.rate:
return '--- KiB/s'
unit_rate, unit = UnitByte(self.rate).auto_no_thousands
if unit_rate >= 10:
formatter = '%d'
else:
formatter = '%0.1f'
return '{0} {1}/s'.format(locale.format(formatter, unit_rate, grouping=False), unit) | [
"def",
"str_rate",
"(",
"self",
")",
":",
"# Handle special cases.",
"if",
"not",
"self",
".",
"_eta",
".",
"started",
"or",
"self",
".",
"_eta",
".",
"stalled",
"or",
"not",
"self",
".",
"rate",
":",
"return",
"'--- KiB/s'",
"unit_rate",
",",
"unit",
"=",
"UnitByte",
"(",
"self",
".",
"rate",
")",
".",
"auto_no_thousands",
"if",
"unit_rate",
">=",
"10",
":",
"formatter",
"=",
"'%d'",
"else",
":",
"formatter",
"=",
"'%0.1f'",
"return",
"'{0} {1}/s'",
".",
"format",
"(",
"locale",
".",
"format",
"(",
"formatter",
",",
"unit_rate",
",",
"grouping",
"=",
"False",
")",
",",
"unit",
")"
] | Returns the rate with formatting. | [
"Returns",
"the",
"rate",
"with",
"formatting",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/progress.py#L339-L350 | train |
CI-WATER/gsshapy | gsshapy/lib/db_tools.py | init_db | def init_db(sqlalchemy_url):
"""
Initialize database with gsshapy tables
"""
engine = create_engine(sqlalchemy_url)
start = time.time()
metadata.create_all(engine)
return time.time() - start | python | def init_db(sqlalchemy_url):
"""
Initialize database with gsshapy tables
"""
engine = create_engine(sqlalchemy_url)
start = time.time()
metadata.create_all(engine)
return time.time() - start | [
"def",
"init_db",
"(",
"sqlalchemy_url",
")",
":",
"engine",
"=",
"create_engine",
"(",
"sqlalchemy_url",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"metadata",
".",
"create_all",
"(",
"engine",
")",
"return",
"time",
".",
"time",
"(",
")",
"-",
"start"
] | Initialize database with gsshapy tables | [
"Initialize",
"database",
"with",
"gsshapy",
"tables"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L35-L42 | train |
CI-WATER/gsshapy | gsshapy/lib/db_tools.py | get_sessionmaker | def get_sessionmaker(sqlalchemy_url, engine=None):
"""
Create session with database to work in
"""
if engine is None:
engine = create_engine(sqlalchemy_url)
return sessionmaker(bind=engine) | python | def get_sessionmaker(sqlalchemy_url, engine=None):
"""
Create session with database to work in
"""
if engine is None:
engine = create_engine(sqlalchemy_url)
return sessionmaker(bind=engine) | [
"def",
"get_sessionmaker",
"(",
"sqlalchemy_url",
",",
"engine",
"=",
"None",
")",
":",
"if",
"engine",
"is",
"None",
":",
"engine",
"=",
"create_engine",
"(",
"sqlalchemy_url",
")",
"return",
"sessionmaker",
"(",
"bind",
"=",
"engine",
")"
] | Create session with database to work in | [
"Create",
"session",
"with",
"database",
"to",
"work",
"in"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L234-L240 | train |
CI-WATER/gsshapy | gsshapy/lib/db_tools.py | get_project_session | def get_project_session(project_name, project_directory, map_type=None):
"""
Load project manager and in memory sqlite db sessionmaker for GSSHA project
"""
sqlalchemy_url, sql_engine = init_sqlite_memory()
gdb_sessionmaker = get_sessionmaker(sqlalchemy_url, sql_engine)
project_manager = ProjectFile(name=project_name,
project_directory=project_directory,
map_type=map_type)
return project_manager, gdb_sessionmaker | python | def get_project_session(project_name, project_directory, map_type=None):
"""
Load project manager and in memory sqlite db sessionmaker for GSSHA project
"""
sqlalchemy_url, sql_engine = init_sqlite_memory()
gdb_sessionmaker = get_sessionmaker(sqlalchemy_url, sql_engine)
project_manager = ProjectFile(name=project_name,
project_directory=project_directory,
map_type=map_type)
return project_manager, gdb_sessionmaker | [
"def",
"get_project_session",
"(",
"project_name",
",",
"project_directory",
",",
"map_type",
"=",
"None",
")",
":",
"sqlalchemy_url",
",",
"sql_engine",
"=",
"init_sqlite_memory",
"(",
")",
"gdb_sessionmaker",
"=",
"get_sessionmaker",
"(",
"sqlalchemy_url",
",",
"sql_engine",
")",
"project_manager",
"=",
"ProjectFile",
"(",
"name",
"=",
"project_name",
",",
"project_directory",
"=",
"project_directory",
",",
"map_type",
"=",
"map_type",
")",
"return",
"project_manager",
",",
"gdb_sessionmaker"
] | Load project manager and in memory sqlite db sessionmaker for GSSHA project | [
"Load",
"project",
"manager",
"and",
"in",
"memory",
"sqlite",
"db",
"sessionmaker",
"for",
"GSSHA",
"project"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L243-L252 | train |
Pylons/plaster | src/plaster/loaders.py | get_settings | def get_settings(config_uri, section=None, defaults=None):
"""
Load the settings from a named section.
.. code-block:: python
settings = plaster.get_settings(...)
print(settings['foo'])
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param section: The name of the section in the config file. If this is
``None`` then it is up to the loader to determine a sensible default
usually derived from the fragment in the ``path#name`` syntax of the
``config_uri``.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in ``defaults``
may be overridden by the loader prior to returning the final
configuration dictionary.
:returns: A ``dict`` of settings. This should return a dictionary object
even if no data is available.
"""
loader = get_loader(config_uri)
return loader.get_settings(section, defaults) | python | def get_settings(config_uri, section=None, defaults=None):
"""
Load the settings from a named section.
.. code-block:: python
settings = plaster.get_settings(...)
print(settings['foo'])
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param section: The name of the section in the config file. If this is
``None`` then it is up to the loader to determine a sensible default
usually derived from the fragment in the ``path#name`` syntax of the
``config_uri``.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in ``defaults``
may be overridden by the loader prior to returning the final
configuration dictionary.
:returns: A ``dict`` of settings. This should return a dictionary object
even if no data is available.
"""
loader = get_loader(config_uri)
return loader.get_settings(section, defaults) | [
"def",
"get_settings",
"(",
"config_uri",
",",
"section",
"=",
"None",
",",
"defaults",
"=",
"None",
")",
":",
"loader",
"=",
"get_loader",
"(",
"config_uri",
")",
"return",
"loader",
".",
"get_settings",
"(",
"section",
",",
"defaults",
")"
] | Load the settings from a named section.
.. code-block:: python
settings = plaster.get_settings(...)
print(settings['foo'])
:param config_uri: Anything that can be parsed by
:func:`plaster.parse_uri`.
:param section: The name of the section in the config file. If this is
``None`` then it is up to the loader to determine a sensible default
usually derived from the fragment in the ``path#name`` syntax of the
``config_uri``.
:param defaults: A ``dict`` of default values used to populate the
settings and support variable interpolation. Any values in ``defaults``
may be overridden by the loader prior to returning the final
configuration dictionary.
:returns: A ``dict`` of settings. This should return a dictionary object
even if no data is available. | [
"Load",
"the",
"settings",
"from",
"a",
"named",
"section",
"."
] | e70e55c182a8300d7ccf67e54d47740c72e72cd8 | https://github.com/Pylons/plaster/blob/e70e55c182a8300d7ccf67e54d47740c72e72cd8/src/plaster/loaders.py#L33-L60 | train |
Pylons/plaster | src/plaster/loaders.py | find_loaders | def find_loaders(scheme, protocols=None):
"""
Find all loaders that match the requested scheme and protocols.
:param scheme: Any valid scheme. Examples would be something like ``ini``
or ``ini+pastedeploy``.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement. If ``None`` then only generic loaders will
be returned.
:returns: A list containing zero or more :class:`plaster.ILoaderInfo`
objects.
"""
# build a list of all required entry points
matching_groups = ['plaster.loader_factory']
if protocols:
matching_groups += [
'plaster.{0}_loader_factory'.format(proto)
for proto in protocols
]
scheme = scheme.lower()
# if a distribution is specified then it overrides the default search
parts = scheme.split('+', 1)
if len(parts) == 2:
try:
distro = pkg_resources.get_distribution(parts[0])
except pkg_resources.DistributionNotFound:
pass
else:
ep = _find_ep_in_dist(distro, parts[1], matching_groups)
# if we got one or more loaders from a specific distribution
# then they override everything else so we'll just return them
if ep:
return [EntryPointLoaderInfo(ep, protocols)]
# find any distributions supporting the default loader protocol
possible_entry_points = [
ep
for ep in pkg_resources.iter_entry_points('plaster.loader_factory')
if scheme is None or scheme == ep.name.lower()
]
distros = {ep.dist for ep in possible_entry_points}
matched_entry_points = list(filter(None, [
_find_ep_in_dist(distro, scheme, matching_groups)
for distro in distros
]))
return [
EntryPointLoaderInfo(ep, protocols=protocols)
for ep in matched_entry_points
] | python | def find_loaders(scheme, protocols=None):
"""
Find all loaders that match the requested scheme and protocols.
:param scheme: Any valid scheme. Examples would be something like ``ini``
or ``ini+pastedeploy``.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement. If ``None`` then only generic loaders will
be returned.
:returns: A list containing zero or more :class:`plaster.ILoaderInfo`
objects.
"""
# build a list of all required entry points
matching_groups = ['plaster.loader_factory']
if protocols:
matching_groups += [
'plaster.{0}_loader_factory'.format(proto)
for proto in protocols
]
scheme = scheme.lower()
# if a distribution is specified then it overrides the default search
parts = scheme.split('+', 1)
if len(parts) == 2:
try:
distro = pkg_resources.get_distribution(parts[0])
except pkg_resources.DistributionNotFound:
pass
else:
ep = _find_ep_in_dist(distro, parts[1], matching_groups)
# if we got one or more loaders from a specific distribution
# then they override everything else so we'll just return them
if ep:
return [EntryPointLoaderInfo(ep, protocols)]
# find any distributions supporting the default loader protocol
possible_entry_points = [
ep
for ep in pkg_resources.iter_entry_points('plaster.loader_factory')
if scheme is None or scheme == ep.name.lower()
]
distros = {ep.dist for ep in possible_entry_points}
matched_entry_points = list(filter(None, [
_find_ep_in_dist(distro, scheme, matching_groups)
for distro in distros
]))
return [
EntryPointLoaderInfo(ep, protocols=protocols)
for ep in matched_entry_points
] | [
"def",
"find_loaders",
"(",
"scheme",
",",
"protocols",
"=",
"None",
")",
":",
"# build a list of all required entry points",
"matching_groups",
"=",
"[",
"'plaster.loader_factory'",
"]",
"if",
"protocols",
":",
"matching_groups",
"+=",
"[",
"'plaster.{0}_loader_factory'",
".",
"format",
"(",
"proto",
")",
"for",
"proto",
"in",
"protocols",
"]",
"scheme",
"=",
"scheme",
".",
"lower",
"(",
")",
"# if a distribution is specified then it overrides the default search",
"parts",
"=",
"scheme",
".",
"split",
"(",
"'+'",
",",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"try",
":",
"distro",
"=",
"pkg_resources",
".",
"get_distribution",
"(",
"parts",
"[",
"0",
"]",
")",
"except",
"pkg_resources",
".",
"DistributionNotFound",
":",
"pass",
"else",
":",
"ep",
"=",
"_find_ep_in_dist",
"(",
"distro",
",",
"parts",
"[",
"1",
"]",
",",
"matching_groups",
")",
"# if we got one or more loaders from a specific distribution",
"# then they override everything else so we'll just return them",
"if",
"ep",
":",
"return",
"[",
"EntryPointLoaderInfo",
"(",
"ep",
",",
"protocols",
")",
"]",
"# find any distributions supporting the default loader protocol",
"possible_entry_points",
"=",
"[",
"ep",
"for",
"ep",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"'plaster.loader_factory'",
")",
"if",
"scheme",
"is",
"None",
"or",
"scheme",
"==",
"ep",
".",
"name",
".",
"lower",
"(",
")",
"]",
"distros",
"=",
"{",
"ep",
".",
"dist",
"for",
"ep",
"in",
"possible_entry_points",
"}",
"matched_entry_points",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"[",
"_find_ep_in_dist",
"(",
"distro",
",",
"scheme",
",",
"matching_groups",
")",
"for",
"distro",
"in",
"distros",
"]",
")",
")",
"return",
"[",
"EntryPointLoaderInfo",
"(",
"ep",
",",
"protocols",
"=",
"protocols",
")",
"for",
"ep",
"in",
"matched_entry_points",
"]"
] | Find all loaders that match the requested scheme and protocols.
:param scheme: Any valid scheme. Examples would be something like ``ini``
or ``ini+pastedeploy``.
:param protocols: Zero or more :term:`loader protocol` identifiers that
the loader must implement. If ``None`` then only generic loaders will
be returned.
:returns: A list containing zero or more :class:`plaster.ILoaderInfo`
objects. | [
"Find",
"all",
"loaders",
"that",
"match",
"the",
"requested",
"scheme",
"and",
"protocols",
"."
] | e70e55c182a8300d7ccf67e54d47740c72e72cd8 | https://github.com/Pylons/plaster/blob/e70e55c182a8300d7ccf67e54d47740c72e72cd8/src/plaster/loaders.py#L120-L173 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | combine_dicts | def combine_dicts(*dicts, copy=False, base=None):
"""
Combines multiple dicts in one.
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict.
:rtype: dict
Example::
>>> sorted(combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2}).items())
[('a', 1), ('b', 2), ('c', 3)]
"""
if len(dicts) == 1 and base is None: # Only one input dict.
cd = dicts[0].copy()
else:
cd = {} if base is None else base # Initialize empty dict.
for d in dicts: # Combine dicts.
if d:
# noinspection PyTypeChecker
cd.update(d)
# Return combined dict.
return {k: _copy.deepcopy(v) for k, v in cd.items()} if copy else cd | python | def combine_dicts(*dicts, copy=False, base=None):
"""
Combines multiple dicts in one.
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict.
:rtype: dict
Example::
>>> sorted(combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2}).items())
[('a', 1), ('b', 2), ('c', 3)]
"""
if len(dicts) == 1 and base is None: # Only one input dict.
cd = dicts[0].copy()
else:
cd = {} if base is None else base # Initialize empty dict.
for d in dicts: # Combine dicts.
if d:
# noinspection PyTypeChecker
cd.update(d)
# Return combined dict.
return {k: _copy.deepcopy(v) for k, v in cd.items()} if copy else cd | [
"def",
"combine_dicts",
"(",
"*",
"dicts",
",",
"copy",
"=",
"False",
",",
"base",
"=",
"None",
")",
":",
"if",
"len",
"(",
"dicts",
")",
"==",
"1",
"and",
"base",
"is",
"None",
":",
"# Only one input dict.",
"cd",
"=",
"dicts",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"else",
":",
"cd",
"=",
"{",
"}",
"if",
"base",
"is",
"None",
"else",
"base",
"# Initialize empty dict.",
"for",
"d",
"in",
"dicts",
":",
"# Combine dicts.",
"if",
"d",
":",
"# noinspection PyTypeChecker",
"cd",
".",
"update",
"(",
"d",
")",
"# Return combined dict.",
"return",
"{",
"k",
":",
"_copy",
".",
"deepcopy",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"cd",
".",
"items",
"(",
")",
"}",
"if",
"copy",
"else",
"cd"
] | Combines multiple dicts in one.
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict.
:rtype: dict
Example::
>>> sorted(combine_dicts({'a': 3, 'c': 3}, {'a': 1, 'b': 2}).items())
[('a', 1), ('b', 2), ('c', 3)] | [
"Combines",
"multiple",
"dicts",
"in",
"one",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L34-L71 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | kk_dict | def kk_dict(*kk, **adict):
"""
Merges and defines dictionaries with values identical to keys.
:param kk:
A sequence of keys and/or dictionaries.
:type kk: object | dict, optional
:param adict:
A dictionary.
:type adict: dict, optional
:return:
Merged dictionary.
:rtype: dict
Example::
>>> sorted(kk_dict('a', 'b', 'c').items())
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'a-c': 'c'}).items())
[('a', 'a'), ('a-c', 'c'), ('b', 'b')]
>>> sorted(kk_dict('a', {'b': 'c'}, 'c').items())
[('a', 'a'), ('b', 'c'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated
>>> sorted(kk_dict('a', {'b': 'd'}, **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated
"""
for k in kk:
if isinstance(k, dict):
if not set(k).isdisjoint(adict):
raise ValueError('keyword argument repeated')
adict.update(k)
elif k in adict:
raise ValueError('keyword argument repeated')
else:
adict[k] = k
return adict | python | def kk_dict(*kk, **adict):
"""
Merges and defines dictionaries with values identical to keys.
:param kk:
A sequence of keys and/or dictionaries.
:type kk: object | dict, optional
:param adict:
A dictionary.
:type adict: dict, optional
:return:
Merged dictionary.
:rtype: dict
Example::
>>> sorted(kk_dict('a', 'b', 'c').items())
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'a-c': 'c'}).items())
[('a', 'a'), ('a-c', 'c'), ('b', 'b')]
>>> sorted(kk_dict('a', {'b': 'c'}, 'c').items())
[('a', 'a'), ('b', 'c'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated
>>> sorted(kk_dict('a', {'b': 'd'}, **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated
"""
for k in kk:
if isinstance(k, dict):
if not set(k).isdisjoint(adict):
raise ValueError('keyword argument repeated')
adict.update(k)
elif k in adict:
raise ValueError('keyword argument repeated')
else:
adict[k] = k
return adict | [
"def",
"kk_dict",
"(",
"*",
"kk",
",",
"*",
"*",
"adict",
")",
":",
"for",
"k",
"in",
"kk",
":",
"if",
"isinstance",
"(",
"k",
",",
"dict",
")",
":",
"if",
"not",
"set",
"(",
"k",
")",
".",
"isdisjoint",
"(",
"adict",
")",
":",
"raise",
"ValueError",
"(",
"'keyword argument repeated'",
")",
"adict",
".",
"update",
"(",
"k",
")",
"elif",
"k",
"in",
"adict",
":",
"raise",
"ValueError",
"(",
"'keyword argument repeated'",
")",
"else",
":",
"adict",
"[",
"k",
"]",
"=",
"k",
"return",
"adict"
] | Merges and defines dictionaries with values identical to keys.
:param kk:
A sequence of keys and/or dictionaries.
:type kk: object | dict, optional
:param adict:
A dictionary.
:type adict: dict, optional
:return:
Merged dictionary.
:rtype: dict
Example::
>>> sorted(kk_dict('a', 'b', 'c').items())
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'a-c': 'c'}).items())
[('a', 'a'), ('a-c', 'c'), ('b', 'b')]
>>> sorted(kk_dict('a', {'b': 'c'}, 'c').items())
[('a', 'a'), ('b', 'c'), ('c', 'c')]
>>> sorted(kk_dict('a', 'b', **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated
>>> sorted(kk_dict('a', {'b': 'd'}, **{'b': 'c'}).items())
Traceback (most recent call last):
...
ValueError: keyword argument repeated | [
"Merges",
"and",
"defines",
"dictionaries",
"with",
"values",
"identical",
"to",
"keys",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L74-L121 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | bypass | def bypass(*inputs, copy=False):
"""
Returns the same arguments.
:param inputs:
Inputs values.
:type inputs: T
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:return:
Same input values.
:rtype: (T, ...), T
Example::
>>> bypass('a', 'b', 'c')
('a', 'b', 'c')
>>> bypass('a')
'a'
"""
if len(inputs) == 1:
inputs = inputs[0] # Same inputs.
return _copy.deepcopy(inputs) if copy else inputs | python | def bypass(*inputs, copy=False):
"""
Returns the same arguments.
:param inputs:
Inputs values.
:type inputs: T
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:return:
Same input values.
:rtype: (T, ...), T
Example::
>>> bypass('a', 'b', 'c')
('a', 'b', 'c')
>>> bypass('a')
'a'
"""
if len(inputs) == 1:
inputs = inputs[0] # Same inputs.
return _copy.deepcopy(inputs) if copy else inputs | [
"def",
"bypass",
"(",
"*",
"inputs",
",",
"copy",
"=",
"False",
")",
":",
"if",
"len",
"(",
"inputs",
")",
"==",
"1",
":",
"inputs",
"=",
"inputs",
"[",
"0",
"]",
"# Same inputs.",
"return",
"_copy",
".",
"deepcopy",
"(",
"inputs",
")",
"if",
"copy",
"else",
"inputs"
] | Returns the same arguments.
:param inputs:
Inputs values.
:type inputs: T
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:return:
Same input values.
:rtype: (T, ...), T
Example::
>>> bypass('a', 'b', 'c')
('a', 'b', 'c')
>>> bypass('a')
'a' | [
"Returns",
"the",
"same",
"arguments",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L124-L151 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | map_dict | def map_dict(key_map, *dicts, copy=False, base=None):
"""
Returns a dict with new key values.
:param key_map:
A dictionary that maps the dict keys ({old key: new key}
:type key_map: dict
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new key values.
:rtype: dict
Example::
>>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})
>>> sorted(d.items())
[('c', 1), ('d', 2)]
"""
it = combine_dicts(*dicts).items() # Combine dicts.
get = key_map.get # Namespace shortcut.
# Return mapped dict.
return combine_dicts({get(k, k): v for k, v in it}, copy=copy, base=base) | python | def map_dict(key_map, *dicts, copy=False, base=None):
"""
Returns a dict with new key values.
:param key_map:
A dictionary that maps the dict keys ({old key: new key}
:type key_map: dict
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new key values.
:rtype: dict
Example::
>>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})
>>> sorted(d.items())
[('c', 1), ('d', 2)]
"""
it = combine_dicts(*dicts).items() # Combine dicts.
get = key_map.get # Namespace shortcut.
# Return mapped dict.
return combine_dicts({get(k, k): v for k, v in it}, copy=copy, base=base) | [
"def",
"map_dict",
"(",
"key_map",
",",
"*",
"dicts",
",",
"copy",
"=",
"False",
",",
"base",
"=",
"None",
")",
":",
"it",
"=",
"combine_dicts",
"(",
"*",
"dicts",
")",
".",
"items",
"(",
")",
"# Combine dicts.",
"get",
"=",
"key_map",
".",
"get",
"# Namespace shortcut.",
"# Return mapped dict.",
"return",
"combine_dicts",
"(",
"{",
"get",
"(",
"k",
",",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"it",
"}",
",",
"copy",
"=",
"copy",
",",
"base",
"=",
"base",
")"
] | Returns a dict with new key values.
:param key_map:
A dictionary that maps the dict keys ({old key: new key}
:type key_map: dict
:param dicts:
A sequence of dicts.
:type dicts: dict
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new key values.
:rtype: dict
Example::
>>> d = map_dict({'a': 'c', 'b': 'd'}, {'a': 1, 'b': 1}, {'b': 2})
>>> sorted(d.items())
[('c', 1), ('d', 2)] | [
"Returns",
"a",
"dict",
"with",
"new",
"key",
"values",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L176-L212 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | map_list | def map_list(key_map, *inputs, copy=False, base=None):
"""
Returns a new dict.
:param key_map:
A list that maps the dict keys ({old key: new key}
:type key_map: list[str | dict | list]
:param inputs:
A sequence of data.
:type inputs: iterable | dict | int | float | list | tuple
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new values.
:rtype: dict
Example::
>>> key_map = [
... 'a',
... {'a': 'c'},
... [
... 'a',
... {'a': 'd'}
... ]
... ]
>>> inputs = (
... 2,
... {'a': 3, 'b': 2},
... [
... 1,
... {'a': 4}
... ]
... )
>>> d = map_list(key_map, *inputs)
>>> sorted(d.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
"""
d = {} if base is None else base # Initialize empty dict.
for m, v in zip(key_map, inputs):
if isinstance(m, dict):
map_dict(m, v, base=d) # Apply a map dict.
elif isinstance(m, list):
map_list(m, *v, base=d) # Apply a map list.
else:
d[m] = v # Apply map.
return combine_dicts(copy=copy, base=d) | python | def map_list(key_map, *inputs, copy=False, base=None):
"""
Returns a new dict.
:param key_map:
A list that maps the dict keys ({old key: new key}
:type key_map: list[str | dict | list]
:param inputs:
A sequence of data.
:type inputs: iterable | dict | int | float | list | tuple
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new values.
:rtype: dict
Example::
>>> key_map = [
... 'a',
... {'a': 'c'},
... [
... 'a',
... {'a': 'd'}
... ]
... ]
>>> inputs = (
... 2,
... {'a': 3, 'b': 2},
... [
... 1,
... {'a': 4}
... ]
... )
>>> d = map_list(key_map, *inputs)
>>> sorted(d.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
"""
d = {} if base is None else base # Initialize empty dict.
for m, v in zip(key_map, inputs):
if isinstance(m, dict):
map_dict(m, v, base=d) # Apply a map dict.
elif isinstance(m, list):
map_list(m, *v, base=d) # Apply a map list.
else:
d[m] = v # Apply map.
return combine_dicts(copy=copy, base=d) | [
"def",
"map_list",
"(",
"key_map",
",",
"*",
"inputs",
",",
"copy",
"=",
"False",
",",
"base",
"=",
"None",
")",
":",
"d",
"=",
"{",
"}",
"if",
"base",
"is",
"None",
"else",
"base",
"# Initialize empty dict.",
"for",
"m",
",",
"v",
"in",
"zip",
"(",
"key_map",
",",
"inputs",
")",
":",
"if",
"isinstance",
"(",
"m",
",",
"dict",
")",
":",
"map_dict",
"(",
"m",
",",
"v",
",",
"base",
"=",
"d",
")",
"# Apply a map dict.",
"elif",
"isinstance",
"(",
"m",
",",
"list",
")",
":",
"map_list",
"(",
"m",
",",
"*",
"v",
",",
"base",
"=",
"d",
")",
"# Apply a map list.",
"else",
":",
"d",
"[",
"m",
"]",
"=",
"v",
"# Apply map.",
"return",
"combine_dicts",
"(",
"copy",
"=",
"copy",
",",
"base",
"=",
"d",
")"
] | Returns a new dict.
:param key_map:
A list that maps the dict keys ({old key: new key}
:type key_map: list[str | dict | list]
:param inputs:
A sequence of data.
:type inputs: iterable | dict | int | float | list | tuple
:param copy:
If True, it returns a deepcopy of input values.
:type copy: bool, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
A unique dict with new values.
:rtype: dict
Example::
>>> key_map = [
... 'a',
... {'a': 'c'},
... [
... 'a',
... {'a': 'd'}
... ]
... ]
>>> inputs = (
... 2,
... {'a': 3, 'b': 2},
... [
... 1,
... {'a': 4}
... ]
... )
>>> d = map_list(key_map, *inputs)
>>> sorted(d.items())
[('a', 1), ('b', 2), ('c', 3), ('d', 4)] | [
"Returns",
"a",
"new",
"dict",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L215-L272 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | selector | def selector(keys, dictionary, copy=False, output_type='dict',
allow_miss=False):
"""
Selects the chosen dictionary keys from the given dictionary.
:param keys:
Keys to select.
:type keys: list, tuple, set
:param dictionary:
A dictionary.
:type dictionary: dict
:param copy:
If True the output contains deep-copies of the values.
:type copy: bool
:param output_type:
Type of function output:
+ 'list': a list with all values listed in `keys`.
+ 'dict': a dictionary with any outputs listed in `keys`.
+ 'values': if output length == 1 return a single value otherwise a
tuple with all values listed in `keys`.
:type output_type: str, optional
:param allow_miss:
If True it does not raise when some key is missing in the dictionary.
:type allow_miss: bool
:return:
A dictionary with chosen dictionary keys if present in the sequence of
dictionaries. These are combined with :func:`combine_dicts`.
:rtype: dict
Example::
>>> from functools import partial
>>> fun = partial(selector, ['a', 'b'])
>>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items())
[('a', 1), ('b', 2)]
"""
if not allow_miss:
# noinspection PyUnusedLocal
def check(key):
return True
else:
def check(key):
return key in dictionary
if output_type == 'list': # Select as list.
res = [dictionary[k] for k in keys if check(k)]
return _copy.deepcopy(res) if copy else res
elif output_type == 'values':
return bypass(*[dictionary[k] for k in keys if check(k)], copy=copy)
# Select as dict.
return bypass({k: dictionary[k] for k in keys if check(k)}, copy=copy) | python | def selector(keys, dictionary, copy=False, output_type='dict',
allow_miss=False):
"""
Selects the chosen dictionary keys from the given dictionary.
:param keys:
Keys to select.
:type keys: list, tuple, set
:param dictionary:
A dictionary.
:type dictionary: dict
:param copy:
If True the output contains deep-copies of the values.
:type copy: bool
:param output_type:
Type of function output:
+ 'list': a list with all values listed in `keys`.
+ 'dict': a dictionary with any outputs listed in `keys`.
+ 'values': if output length == 1 return a single value otherwise a
tuple with all values listed in `keys`.
:type output_type: str, optional
:param allow_miss:
If True it does not raise when some key is missing in the dictionary.
:type allow_miss: bool
:return:
A dictionary with chosen dictionary keys if present in the sequence of
dictionaries. These are combined with :func:`combine_dicts`.
:rtype: dict
Example::
>>> from functools import partial
>>> fun = partial(selector, ['a', 'b'])
>>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items())
[('a', 1), ('b', 2)]
"""
if not allow_miss:
# noinspection PyUnusedLocal
def check(key):
return True
else:
def check(key):
return key in dictionary
if output_type == 'list': # Select as list.
res = [dictionary[k] for k in keys if check(k)]
return _copy.deepcopy(res) if copy else res
elif output_type == 'values':
return bypass(*[dictionary[k] for k in keys if check(k)], copy=copy)
# Select as dict.
return bypass({k: dictionary[k] for k in keys if check(k)}, copy=copy) | [
"def",
"selector",
"(",
"keys",
",",
"dictionary",
",",
"copy",
"=",
"False",
",",
"output_type",
"=",
"'dict'",
",",
"allow_miss",
"=",
"False",
")",
":",
"if",
"not",
"allow_miss",
":",
"# noinspection PyUnusedLocal",
"def",
"check",
"(",
"key",
")",
":",
"return",
"True",
"else",
":",
"def",
"check",
"(",
"key",
")",
":",
"return",
"key",
"in",
"dictionary",
"if",
"output_type",
"==",
"'list'",
":",
"# Select as list.",
"res",
"=",
"[",
"dictionary",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"if",
"check",
"(",
"k",
")",
"]",
"return",
"_copy",
".",
"deepcopy",
"(",
"res",
")",
"if",
"copy",
"else",
"res",
"elif",
"output_type",
"==",
"'values'",
":",
"return",
"bypass",
"(",
"*",
"[",
"dictionary",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"if",
"check",
"(",
"k",
")",
"]",
",",
"copy",
"=",
"copy",
")",
"# Select as dict.",
"return",
"bypass",
"(",
"{",
"k",
":",
"dictionary",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"if",
"check",
"(",
"k",
")",
"}",
",",
"copy",
"=",
"copy",
")"
] | Selects the chosen dictionary keys from the given dictionary.
:param keys:
Keys to select.
:type keys: list, tuple, set
:param dictionary:
A dictionary.
:type dictionary: dict
:param copy:
If True the output contains deep-copies of the values.
:type copy: bool
:param output_type:
Type of function output:
+ 'list': a list with all values listed in `keys`.
+ 'dict': a dictionary with any outputs listed in `keys`.
+ 'values': if output length == 1 return a single value otherwise a
tuple with all values listed in `keys`.
:type output_type: str, optional
:param allow_miss:
If True it does not raise when some key is missing in the dictionary.
:type allow_miss: bool
:return:
A dictionary with chosen dictionary keys if present in the sequence of
dictionaries. These are combined with :func:`combine_dicts`.
:rtype: dict
Example::
>>> from functools import partial
>>> fun = partial(selector, ['a', 'b'])
>>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items())
[('a', 1), ('b', 2)] | [
"Selects",
"the",
"chosen",
"dictionary",
"keys",
"from",
"the",
"given",
"dictionary",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L275-L334 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | replicate_value | def replicate_value(value, n=2, copy=True):
"""
Replicates `n` times the input value.
:param n:
Number of replications.
:type n: int
:param value:
Value to be replicated.
:type value: T
:param copy:
If True the list contains deep-copies of the value.
:type copy: bool
:return:
A list with the value replicated `n` times.
:rtype: list
Example::
>>> from functools import partial
>>> fun = partial(replicate_value, n=5)
>>> fun({'a': 3})
({'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}, {'a': 3})
"""
return bypass(*[value] * n, copy=copy) | python | def replicate_value(value, n=2, copy=True):
"""
Replicates `n` times the input value.
:param n:
Number of replications.
:type n: int
:param value:
Value to be replicated.
:type value: T
:param copy:
If True the list contains deep-copies of the value.
:type copy: bool
:return:
A list with the value replicated `n` times.
:rtype: list
Example::
>>> from functools import partial
>>> fun = partial(replicate_value, n=5)
>>> fun({'a': 3})
({'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}, {'a': 3})
"""
return bypass(*[value] * n, copy=copy) | [
"def",
"replicate_value",
"(",
"value",
",",
"n",
"=",
"2",
",",
"copy",
"=",
"True",
")",
":",
"return",
"bypass",
"(",
"*",
"[",
"value",
"]",
"*",
"n",
",",
"copy",
"=",
"copy",
")"
] | Replicates `n` times the input value.
:param n:
Number of replications.
:type n: int
:param value:
Value to be replicated.
:type value: T
:param copy:
If True the list contains deep-copies of the value.
:type copy: bool
:return:
A list with the value replicated `n` times.
:rtype: list
Example::
>>> from functools import partial
>>> fun = partial(replicate_value, n=5)
>>> fun({'a': 3})
({'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}, {'a': 3}) | [
"Replicates",
"n",
"times",
"the",
"input",
"value",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L337-L365 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | stack_nested_keys | def stack_nested_keys(nested_dict, key=(), depth=-1):
"""
Stacks the keys of nested-dictionaries into tuples and yields a list of
k-v pairs.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param key:
Initial keys.
:type key: tuple, optional
:param depth:
Maximum keys depth.
:type depth: int, optional
:return:
List of k-v pairs.
:rtype: generator
"""
if depth != 0 and hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
yield from stack_nested_keys(v, key=key + (k,), depth=depth - 1)
else:
yield key, nested_dict | python | def stack_nested_keys(nested_dict, key=(), depth=-1):
"""
Stacks the keys of nested-dictionaries into tuples and yields a list of
k-v pairs.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param key:
Initial keys.
:type key: tuple, optional
:param depth:
Maximum keys depth.
:type depth: int, optional
:return:
List of k-v pairs.
:rtype: generator
"""
if depth != 0 and hasattr(nested_dict, 'items'):
for k, v in nested_dict.items():
yield from stack_nested_keys(v, key=key + (k,), depth=depth - 1)
else:
yield key, nested_dict | [
"def",
"stack_nested_keys",
"(",
"nested_dict",
",",
"key",
"=",
"(",
")",
",",
"depth",
"=",
"-",
"1",
")",
":",
"if",
"depth",
"!=",
"0",
"and",
"hasattr",
"(",
"nested_dict",
",",
"'items'",
")",
":",
"for",
"k",
",",
"v",
"in",
"nested_dict",
".",
"items",
"(",
")",
":",
"yield",
"from",
"stack_nested_keys",
"(",
"v",
",",
"key",
"=",
"key",
"+",
"(",
"k",
",",
")",
",",
"depth",
"=",
"depth",
"-",
"1",
")",
"else",
":",
"yield",
"key",
",",
"nested_dict"
] | Stacks the keys of nested-dictionaries into tuples and yields a list of
k-v pairs.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param key:
Initial keys.
:type key: tuple, optional
:param depth:
Maximum keys depth.
:type depth: int, optional
:return:
List of k-v pairs.
:rtype: generator | [
"Stacks",
"the",
"keys",
"of",
"nested",
"-",
"dictionaries",
"into",
"tuples",
"and",
"yields",
"a",
"list",
"of",
"k",
"-",
"v",
"pairs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L475-L501 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | are_in_nested_dicts | def are_in_nested_dicts(nested_dict, *keys):
"""
Nested keys are inside of nested-dictionaries.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param keys:
Nested keys.
:type keys: object
:return:
True if nested keys are inside of nested-dictionaries, otherwise False.
:rtype: bool
"""
if keys:
# noinspection PyBroadException
try:
return are_in_nested_dicts(nested_dict[keys[0]], *keys[1:])
except Exception: # Key error or not a dict.
return False
return True | python | def are_in_nested_dicts(nested_dict, *keys):
"""
Nested keys are inside of nested-dictionaries.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param keys:
Nested keys.
:type keys: object
:return:
True if nested keys are inside of nested-dictionaries, otherwise False.
:rtype: bool
"""
if keys:
# noinspection PyBroadException
try:
return are_in_nested_dicts(nested_dict[keys[0]], *keys[1:])
except Exception: # Key error or not a dict.
return False
return True | [
"def",
"are_in_nested_dicts",
"(",
"nested_dict",
",",
"*",
"keys",
")",
":",
"if",
"keys",
":",
"# noinspection PyBroadException",
"try",
":",
"return",
"are_in_nested_dicts",
"(",
"nested_dict",
"[",
"keys",
"[",
"0",
"]",
"]",
",",
"*",
"keys",
"[",
"1",
":",
"]",
")",
"except",
"Exception",
":",
"# Key error or not a dict.",
"return",
"False",
"return",
"True"
] | Nested keys are inside of nested-dictionaries.
:param nested_dict:
Nested dictionary.
:type nested_dict: dict
:param keys:
Nested keys.
:type keys: object
:return:
True if nested keys are inside of nested-dictionaries, otherwise False.
:rtype: bool | [
"Nested",
"keys",
"are",
"inside",
"of",
"nested",
"-",
"dictionaries",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L541-L564 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | combine_nested_dicts | def combine_nested_dicts(*nested_dicts, depth=-1, base=None):
"""
Merge nested-dictionaries.
:param nested_dicts:
Nested dictionaries.
:type nested_dicts: dict
:param depth:
Maximum keys depth.
:type depth: int, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
Combined nested-dictionary.
:rtype: dict
"""
if base is None:
base = {}
for nested_dict in nested_dicts:
for k, v in stack_nested_keys(nested_dict, depth=depth):
while k:
# noinspection PyBroadException
try:
get_nested_dicts(base, *k[:-1])[k[-1]] = v
break
except Exception:
# A branch of the nested_dict is longer than the base.
k = k[:-1]
v = get_nested_dicts(nested_dict, *k)
return base | python | def combine_nested_dicts(*nested_dicts, depth=-1, base=None):
"""
Merge nested-dictionaries.
:param nested_dicts:
Nested dictionaries.
:type nested_dicts: dict
:param depth:
Maximum keys depth.
:type depth: int, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
Combined nested-dictionary.
:rtype: dict
"""
if base is None:
base = {}
for nested_dict in nested_dicts:
for k, v in stack_nested_keys(nested_dict, depth=depth):
while k:
# noinspection PyBroadException
try:
get_nested_dicts(base, *k[:-1])[k[-1]] = v
break
except Exception:
# A branch of the nested_dict is longer than the base.
k = k[:-1]
v = get_nested_dicts(nested_dict, *k)
return base | [
"def",
"combine_nested_dicts",
"(",
"*",
"nested_dicts",
",",
"depth",
"=",
"-",
"1",
",",
"base",
"=",
"None",
")",
":",
"if",
"base",
"is",
"None",
":",
"base",
"=",
"{",
"}",
"for",
"nested_dict",
"in",
"nested_dicts",
":",
"for",
"k",
",",
"v",
"in",
"stack_nested_keys",
"(",
"nested_dict",
",",
"depth",
"=",
"depth",
")",
":",
"while",
"k",
":",
"# noinspection PyBroadException",
"try",
":",
"get_nested_dicts",
"(",
"base",
",",
"*",
"k",
"[",
":",
"-",
"1",
"]",
")",
"[",
"k",
"[",
"-",
"1",
"]",
"]",
"=",
"v",
"break",
"except",
"Exception",
":",
"# A branch of the nested_dict is longer than the base.",
"k",
"=",
"k",
"[",
":",
"-",
"1",
"]",
"v",
"=",
"get_nested_dicts",
"(",
"nested_dict",
",",
"*",
"k",
")",
"return",
"base"
] | Merge nested-dictionaries.
:param nested_dicts:
Nested dictionaries.
:type nested_dicts: dict
:param depth:
Maximum keys depth.
:type depth: int, optional
:param base:
Base dict where combine multiple dicts in one.
:type base: dict, optional
:return:
Combined nested-dictionary.
:rtype: dict | [
"Merge",
"nested",
"-",
"dictionaries",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L567-L603 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | add_function | def add_function(dsp, inputs_kwargs=False, inputs_defaults=False, **kw):
"""
Decorator to add a function to a dispatcher.
:param dsp:
A dispatcher.
:type dsp: schedula.Dispatcher
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param kw:
See :func:~`schedula.dispatcher.Dispatcher.add_function`.
:type kw: dict
:return:
Decorator.
:rtype: callable
**------------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> @sh.add_function(dsp, outputs=['e'])
... @sh.add_function(dsp, False, True, outputs=['i'], inputs='ecah')
... @sh.add_function(dsp, True, outputs=['l'])
... def f(a, b, c, d=1):
... return (a + b) - c + d
>>> @sh.add_function(dsp, True, outputs=['d'])
... def g(e, i, *args, d=0):
... return e + i + d
>>> sol = dsp({'a': 1, 'b': 2, 'c': 3}); sol
Solution([('a', 1), ('b', 2), ('c', 3), ('h', 1), ('e', 1), ('i', 4),
('d', 5), ('l', 5)])
"""
def decorator(f):
dsp.add_func(
f, inputs_kwargs=inputs_kwargs, inputs_defaults=inputs_defaults,
**kw
)
return f
return decorator | python | def add_function(dsp, inputs_kwargs=False, inputs_defaults=False, **kw):
"""
Decorator to add a function to a dispatcher.
:param dsp:
A dispatcher.
:type dsp: schedula.Dispatcher
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param kw:
See :func:~`schedula.dispatcher.Dispatcher.add_function`.
:type kw: dict
:return:
Decorator.
:rtype: callable
**------------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> @sh.add_function(dsp, outputs=['e'])
... @sh.add_function(dsp, False, True, outputs=['i'], inputs='ecah')
... @sh.add_function(dsp, True, outputs=['l'])
... def f(a, b, c, d=1):
... return (a + b) - c + d
>>> @sh.add_function(dsp, True, outputs=['d'])
... def g(e, i, *args, d=0):
... return e + i + d
>>> sol = dsp({'a': 1, 'b': 2, 'c': 3}); sol
Solution([('a', 1), ('b', 2), ('c', 3), ('h', 1), ('e', 1), ('i', 4),
('d', 5), ('l', 5)])
"""
def decorator(f):
dsp.add_func(
f, inputs_kwargs=inputs_kwargs, inputs_defaults=inputs_defaults,
**kw
)
return f
return decorator | [
"def",
"add_function",
"(",
"dsp",
",",
"inputs_kwargs",
"=",
"False",
",",
"inputs_defaults",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"dsp",
".",
"add_func",
"(",
"f",
",",
"inputs_kwargs",
"=",
"inputs_kwargs",
",",
"inputs_defaults",
"=",
"inputs_defaults",
",",
"*",
"*",
"kw",
")",
"return",
"f",
"return",
"decorator"
] | Decorator to add a function to a dispatcher.
:param dsp:
A dispatcher.
:type dsp: schedula.Dispatcher
:param inputs_kwargs:
Do you want to include kwargs as inputs?
:type inputs_kwargs: bool
:param inputs_defaults:
Do you want to set default values?
:type inputs_defaults: bool
:param kw:
See :func:~`schedula.dispatcher.Dispatcher.add_function`.
:type kw: dict
:return:
Decorator.
:rtype: callable
**------------------------------------------------------------------------**
**Example**:
.. dispatcher:: sol
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher(name='Dispatcher')
>>> @sh.add_function(dsp, outputs=['e'])
... @sh.add_function(dsp, False, True, outputs=['i'], inputs='ecah')
... @sh.add_function(dsp, True, outputs=['l'])
... def f(a, b, c, d=1):
... return (a + b) - c + d
>>> @sh.add_function(dsp, True, outputs=['d'])
... def g(e, i, *args, d=0):
... return e + i + d
>>> sol = dsp({'a': 1, 'b': 2, 'c': 3}); sol
Solution([('a', 1), ('b', 2), ('c', 3), ('h', 1), ('e', 1), ('i', 4),
('d', 5), ('l', 5)]) | [
"Decorator",
"to",
"add",
"a",
"function",
"to",
"a",
"dispatcher",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L1242-L1296 | train |
vinci1it2000/schedula | schedula/utils/dsp.py | SubDispatch.blue | def blue(self, memo=None):
"""
Constructs a Blueprint out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:return:
A Blueprint of the current object.
:rtype: schedula.utils.blue.Blueprint
"""
memo = {} if memo is None else memo
if self not in memo:
import inspect
from .blue import Blueprint, _parent_blue
keys = tuple(inspect.signature(self.__init__).parameters)
memo[self] = Blueprint(**{
k: _parent_blue(v, memo)
for k, v in self.__dict__.items() if k in keys
})._set_cls(self.__class__)
return memo[self] | python | def blue(self, memo=None):
"""
Constructs a Blueprint out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:return:
A Blueprint of the current object.
:rtype: schedula.utils.blue.Blueprint
"""
memo = {} if memo is None else memo
if self not in memo:
import inspect
from .blue import Blueprint, _parent_blue
keys = tuple(inspect.signature(self.__init__).parameters)
memo[self] = Blueprint(**{
k: _parent_blue(v, memo)
for k, v in self.__dict__.items() if k in keys
})._set_cls(self.__class__)
return memo[self] | [
"def",
"blue",
"(",
"self",
",",
"memo",
"=",
"None",
")",
":",
"memo",
"=",
"{",
"}",
"if",
"memo",
"is",
"None",
"else",
"memo",
"if",
"self",
"not",
"in",
"memo",
":",
"import",
"inspect",
"from",
".",
"blue",
"import",
"Blueprint",
",",
"_parent_blue",
"keys",
"=",
"tuple",
"(",
"inspect",
".",
"signature",
"(",
"self",
".",
"__init__",
")",
".",
"parameters",
")",
"memo",
"[",
"self",
"]",
"=",
"Blueprint",
"(",
"*",
"*",
"{",
"k",
":",
"_parent_blue",
"(",
"v",
",",
"memo",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"k",
"in",
"keys",
"}",
")",
".",
"_set_cls",
"(",
"self",
".",
"__class__",
")",
"return",
"memo",
"[",
"self",
"]"
] | Constructs a Blueprint out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:return:
A Blueprint of the current object.
:rtype: schedula.utils.blue.Blueprint | [
"Constructs",
"a",
"Blueprint",
"out",
"of",
"the",
"current",
"object",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L741-L762 | train |
leplatrem/django-sizefield | sizefield/widgets.py | FileSizeWidget.value_from_datadict | def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
value = super(FileSizeWidget, self).value_from_datadict(data, files, name)
if value not in EMPTY_VALUES:
try:
return parse_size(value)
except ValueError:
pass
return value | python | def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
value = super(FileSizeWidget, self).value_from_datadict(data, files, name)
if value not in EMPTY_VALUES:
try:
return parse_size(value)
except ValueError:
pass
return value | [
"def",
"value_from_datadict",
"(",
"self",
",",
"data",
",",
"files",
",",
"name",
")",
":",
"value",
"=",
"super",
"(",
"FileSizeWidget",
",",
"self",
")",
".",
"value_from_datadict",
"(",
"data",
",",
"files",
",",
"name",
")",
"if",
"value",
"not",
"in",
"EMPTY_VALUES",
":",
"try",
":",
"return",
"parse_size",
"(",
"value",
")",
"except",
"ValueError",
":",
"pass",
"return",
"value"
] | Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided. | [
"Given",
"a",
"dictionary",
"of",
"data",
"and",
"this",
"widget",
"s",
"name",
"returns",
"the",
"value",
"of",
"this",
"widget",
".",
"Returns",
"None",
"if",
"it",
"s",
"not",
"provided",
"."
] | 6a273a43a2e8d157ee438811c0824eae534bcdb2 | https://github.com/leplatrem/django-sizefield/blob/6a273a43a2e8d157ee438811c0824eae534bcdb2/sizefield/widgets.py#L17-L28 | train |
dsoprea/PySecure | pysecure/easy.py | connect_ssh_with_cb | def connect_ssh_with_cb(ssh_cb, user, host, auth_cb, allow_new=True,
verbosity=0):
"""A "managed" SSH session. When the session is ready, we'll invoke the
"ssh_cb" callback.
"""
with connect_ssh(user, host, auth_cb, allow_new=True, verbosity=0) as ssh:
ssh_cb(ssh) | python | def connect_ssh_with_cb(ssh_cb, user, host, auth_cb, allow_new=True,
verbosity=0):
"""A "managed" SSH session. When the session is ready, we'll invoke the
"ssh_cb" callback.
"""
with connect_ssh(user, host, auth_cb, allow_new=True, verbosity=0) as ssh:
ssh_cb(ssh) | [
"def",
"connect_ssh_with_cb",
"(",
"ssh_cb",
",",
"user",
",",
"host",
",",
"auth_cb",
",",
"allow_new",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
":",
"with",
"connect_ssh",
"(",
"user",
",",
"host",
",",
"auth_cb",
",",
"allow_new",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
"as",
"ssh",
":",
"ssh_cb",
"(",
"ssh",
")"
] | A "managed" SSH session. When the session is ready, we'll invoke the
"ssh_cb" callback. | [
"A",
"managed",
"SSH",
"session",
".",
"When",
"the",
"session",
"is",
"ready",
"we",
"ll",
"invoke",
"the",
"ssh_cb",
"callback",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L20-L27 | train |
dsoprea/PySecure | pysecure/easy.py | connect_sftp_with_cb | def connect_sftp_with_cb(sftp_cb, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with _connect_sftp(*args, **kwargs) as (ssh, sftp):
sftp_cb(ssh, sftp) | python | def connect_sftp_with_cb(sftp_cb, *args, **kwargs):
"""A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback.
"""
with _connect_sftp(*args, **kwargs) as (ssh, sftp):
sftp_cb(ssh, sftp) | [
"def",
"connect_sftp_with_cb",
"(",
"sftp_cb",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"_connect_sftp",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"as",
"(",
"ssh",
",",
"sftp",
")",
":",
"sftp_cb",
"(",
"ssh",
",",
"sftp",
")"
] | A "managed" SFTP session. When the SSH session and an additional SFTP
session are ready, invoke the sftp_cb callback. | [
"A",
"managed",
"SFTP",
"session",
".",
"When",
"the",
"SSH",
"session",
"and",
"an",
"additional",
"SFTP",
"session",
"are",
"ready",
"invoke",
"the",
"sftp_cb",
"callback",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L39-L45 | train |
dsoprea/PySecure | pysecure/easy.py | get_key_auth_cb | def get_key_auth_cb(key_filepath):
"""This is just a convenience function for key-based login."""
def auth_cb(ssh):
key = ssh_pki_import_privkey_file(key_filepath)
ssh.userauth_publickey(key)
return auth_cb | python | def get_key_auth_cb(key_filepath):
"""This is just a convenience function for key-based login."""
def auth_cb(ssh):
key = ssh_pki_import_privkey_file(key_filepath)
ssh.userauth_publickey(key)
return auth_cb | [
"def",
"get_key_auth_cb",
"(",
"key_filepath",
")",
":",
"def",
"auth_cb",
"(",
"ssh",
")",
":",
"key",
"=",
"ssh_pki_import_privkey_file",
"(",
"key_filepath",
")",
"ssh",
".",
"userauth_publickey",
"(",
"key",
")",
"return",
"auth_cb"
] | This is just a convenience function for key-based login. | [
"This",
"is",
"just",
"a",
"convenience",
"function",
"for",
"key",
"-",
"based",
"login",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/easy.py#L47-L54 | train |
vinci1it2000/schedula | schedula/utils/alg.py | add_edge_fun | def add_edge_fun(graph):
"""
Returns a function that adds an edge to the `graph` checking only the out
node.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
succ, pred, node = graph._succ, graph._pred, graph._node
def add_edge(u, v, **attr):
if v not in succ: # Add nodes.
succ[v], pred[v], node[v] = {}, {}, {}
succ[u][v] = pred[v][u] = attr # Add the edge.
return add_edge | python | def add_edge_fun(graph):
"""
Returns a function that adds an edge to the `graph` checking only the out
node.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
succ, pred, node = graph._succ, graph._pred, graph._node
def add_edge(u, v, **attr):
if v not in succ: # Add nodes.
succ[v], pred[v], node[v] = {}, {}, {}
succ[u][v] = pred[v][u] = attr # Add the edge.
return add_edge | [
"def",
"add_edge_fun",
"(",
"graph",
")",
":",
"# Namespace shortcut for speed.",
"succ",
",",
"pred",
",",
"node",
"=",
"graph",
".",
"_succ",
",",
"graph",
".",
"_pred",
",",
"graph",
".",
"_node",
"def",
"add_edge",
"(",
"u",
",",
"v",
",",
"*",
"*",
"attr",
")",
":",
"if",
"v",
"not",
"in",
"succ",
":",
"# Add nodes.",
"succ",
"[",
"v",
"]",
",",
"pred",
"[",
"v",
"]",
",",
"node",
"[",
"v",
"]",
"=",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"succ",
"[",
"u",
"]",
"[",
"v",
"]",
"=",
"pred",
"[",
"v",
"]",
"[",
"u",
"]",
"=",
"attr",
"# Add the edge.",
"return",
"add_edge"
] | Returns a function that adds an edge to the `graph` checking only the out
node.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that adds an edge to the `graph`.
:rtype: callable | [
"Returns",
"a",
"function",
"that",
"adds",
"an",
"edge",
"to",
"the",
"graph",
"checking",
"only",
"the",
"out",
"node",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L22-L45 | train |
vinci1it2000/schedula | schedula/utils/alg.py | remove_edge_fun | def remove_edge_fun(graph):
"""
Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
rm_edge, rm_node = graph.remove_edge, graph.remove_node
from networkx import is_isolate
def remove_edge(u, v):
rm_edge(u, v) # Remove the edge.
if is_isolate(graph, v): # Check if v is isolate.
rm_node(v) # Remove the isolate out node.
return remove_edge | python | def remove_edge_fun(graph):
"""
Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
rm_edge, rm_node = graph.remove_edge, graph.remove_node
from networkx import is_isolate
def remove_edge(u, v):
rm_edge(u, v) # Remove the edge.
if is_isolate(graph, v): # Check if v is isolate.
rm_node(v) # Remove the isolate out node.
return remove_edge | [
"def",
"remove_edge_fun",
"(",
"graph",
")",
":",
"# Namespace shortcut for speed.",
"rm_edge",
",",
"rm_node",
"=",
"graph",
".",
"remove_edge",
",",
"graph",
".",
"remove_node",
"from",
"networkx",
"import",
"is_isolate",
"def",
"remove_edge",
"(",
"u",
",",
"v",
")",
":",
"rm_edge",
"(",
"u",
",",
"v",
")",
"# Remove the edge.",
"if",
"is_isolate",
"(",
"graph",
",",
"v",
")",
":",
"# Check if v is isolate.",
"rm_node",
"(",
"v",
")",
"# Remove the isolate out node.",
"return",
"remove_edge"
] | Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable | [
"Returns",
"a",
"function",
"that",
"removes",
"an",
"edge",
"from",
"the",
"graph",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L48-L72 | train |
vinci1it2000/schedula | schedula/utils/alg.py | get_unused_node_id | def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'):
"""
Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str
"""
has_node = graph.has_node # Namespace shortcut for speed.
n = counter() # Counter.
node_id_format = _format.format(initial_guess) # Node id format.
node_id = initial_guess # Initial guess.
while has_node(node_id): # Check if node id is used.
node_id = node_id_format % n() # Guess.
return node_id | python | def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'):
"""
Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str
"""
has_node = graph.has_node # Namespace shortcut for speed.
n = counter() # Counter.
node_id_format = _format.format(initial_guess) # Node id format.
node_id = initial_guess # Initial guess.
while has_node(node_id): # Check if node id is used.
node_id = node_id_format % n() # Guess.
return node_id | [
"def",
"get_unused_node_id",
"(",
"graph",
",",
"initial_guess",
"=",
"'unknown'",
",",
"_format",
"=",
"'{}<%d>'",
")",
":",
"has_node",
"=",
"graph",
".",
"has_node",
"# Namespace shortcut for speed.",
"n",
"=",
"counter",
"(",
")",
"# Counter.",
"node_id_format",
"=",
"_format",
".",
"format",
"(",
"initial_guess",
")",
"# Node id format.",
"node_id",
"=",
"initial_guess",
"# Initial guess.",
"while",
"has_node",
"(",
"node_id",
")",
":",
"# Check if node id is used.",
"node_id",
"=",
"node_id_format",
"%",
"n",
"(",
")",
"# Guess.",
"return",
"node_id"
] | Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str | [
"Finds",
"an",
"unused",
"node",
"id",
"in",
"graph",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L75-L105 | train |
vinci1it2000/schedula | schedula/utils/alg.py | add_func_edges | def add_func_edges(dsp, fun_id, nodes_bunch, edge_weights=None, input=True,
data_nodes=None):
"""
Adds function node edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param fun_id:
Function node id.
:type fun_id: str
:param nodes_bunch:
A container of nodes which will be iterated through once.
:type nodes_bunch: iterable
:param edge_weights:
Edge weights.
:type edge_weights: dict, optional
:param input:
If True the nodes_bunch are input nodes, otherwise are output nodes.
:type input: bool, optional
:param data_nodes:
Data nodes to be deleted if something fail.
:type data_nodes: list
:return:
List of new data nodes.
:rtype: list
"""
# Namespace shortcut for speed.
add_edge = _add_edge_dmap_fun(dsp.dmap, edge_weights)
node, add_data = dsp.dmap.nodes, dsp.add_data
remove_nodes = dsp.dmap.remove_nodes_from
# Define an error message.
msg = 'Invalid %sput id: {} is not a data node' % ['out', 'in'][input]
i, j = ('i', 'o') if input else ('o', 'i')
data_nodes = data_nodes or [] # Update data nodes.
for u in nodes_bunch: # Iterate nodes.
try:
if node[u]['type'] != 'data': # The node is not a data node.
data_nodes.append(fun_id) # Add function id to be removed.
remove_nodes(data_nodes) # Remove function and new data nodes.
raise ValueError(msg.format(u)) # Raise error.
except KeyError:
data_nodes.append(add_data(data_id=u)) # Add new data node.
add_edge(**{i: u, j: fun_id, 'w': u}) # Add edge.
return data_nodes | python | def add_func_edges(dsp, fun_id, nodes_bunch, edge_weights=None, input=True,
data_nodes=None):
"""
Adds function node edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param fun_id:
Function node id.
:type fun_id: str
:param nodes_bunch:
A container of nodes which will be iterated through once.
:type nodes_bunch: iterable
:param edge_weights:
Edge weights.
:type edge_weights: dict, optional
:param input:
If True the nodes_bunch are input nodes, otherwise are output nodes.
:type input: bool, optional
:param data_nodes:
Data nodes to be deleted if something fail.
:type data_nodes: list
:return:
List of new data nodes.
:rtype: list
"""
# Namespace shortcut for speed.
add_edge = _add_edge_dmap_fun(dsp.dmap, edge_weights)
node, add_data = dsp.dmap.nodes, dsp.add_data
remove_nodes = dsp.dmap.remove_nodes_from
# Define an error message.
msg = 'Invalid %sput id: {} is not a data node' % ['out', 'in'][input]
i, j = ('i', 'o') if input else ('o', 'i')
data_nodes = data_nodes or [] # Update data nodes.
for u in nodes_bunch: # Iterate nodes.
try:
if node[u]['type'] != 'data': # The node is not a data node.
data_nodes.append(fun_id) # Add function id to be removed.
remove_nodes(data_nodes) # Remove function and new data nodes.
raise ValueError(msg.format(u)) # Raise error.
except KeyError:
data_nodes.append(add_data(data_id=u)) # Add new data node.
add_edge(**{i: u, j: fun_id, 'w': u}) # Add edge.
return data_nodes | [
"def",
"add_func_edges",
"(",
"dsp",
",",
"fun_id",
",",
"nodes_bunch",
",",
"edge_weights",
"=",
"None",
",",
"input",
"=",
"True",
",",
"data_nodes",
"=",
"None",
")",
":",
"# Namespace shortcut for speed.",
"add_edge",
"=",
"_add_edge_dmap_fun",
"(",
"dsp",
".",
"dmap",
",",
"edge_weights",
")",
"node",
",",
"add_data",
"=",
"dsp",
".",
"dmap",
".",
"nodes",
",",
"dsp",
".",
"add_data",
"remove_nodes",
"=",
"dsp",
".",
"dmap",
".",
"remove_nodes_from",
"# Define an error message.",
"msg",
"=",
"'Invalid %sput id: {} is not a data node'",
"%",
"[",
"'out'",
",",
"'in'",
"]",
"[",
"input",
"]",
"i",
",",
"j",
"=",
"(",
"'i'",
",",
"'o'",
")",
"if",
"input",
"else",
"(",
"'o'",
",",
"'i'",
")",
"data_nodes",
"=",
"data_nodes",
"or",
"[",
"]",
"# Update data nodes.",
"for",
"u",
"in",
"nodes_bunch",
":",
"# Iterate nodes.",
"try",
":",
"if",
"node",
"[",
"u",
"]",
"[",
"'type'",
"]",
"!=",
"'data'",
":",
"# The node is not a data node.",
"data_nodes",
".",
"append",
"(",
"fun_id",
")",
"# Add function id to be removed.",
"remove_nodes",
"(",
"data_nodes",
")",
"# Remove function and new data nodes.",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"u",
")",
")",
"# Raise error.",
"except",
"KeyError",
":",
"data_nodes",
".",
"append",
"(",
"add_data",
"(",
"data_id",
"=",
"u",
")",
")",
"# Add new data node.",
"add_edge",
"(",
"*",
"*",
"{",
"i",
":",
"u",
",",
"j",
":",
"fun_id",
",",
"'w'",
":",
"u",
"}",
")",
"# Add edge.",
"return",
"data_nodes"
] | Adds function node edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param fun_id:
Function node id.
:type fun_id: str
:param nodes_bunch:
A container of nodes which will be iterated through once.
:type nodes_bunch: iterable
:param edge_weights:
Edge weights.
:type edge_weights: dict, optional
:param input:
If True the nodes_bunch are input nodes, otherwise are output nodes.
:type input: bool, optional
:param data_nodes:
Data nodes to be deleted if something fail.
:type data_nodes: list
:return:
List of new data nodes.
:rtype: list | [
"Adds",
"function",
"node",
"edges",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L108-L166 | train |
vinci1it2000/schedula | schedula/utils/alg.py | _add_edge_dmap_fun | def _add_edge_dmap_fun(graph, edges_weights=None):
"""
Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
add = graph.add_edge # Namespace shortcut for speed.
if edges_weights is not None:
def add_edge(i, o, w):
if w in edges_weights:
add(i, o, weight=edges_weights[w]) # Weighted edge.
else:
add(i, o) # Normal edge.
else:
# noinspection PyUnusedLocal
def add_edge(i, o, w):
add(i, o) # Normal edge.
return add_edge | python | def _add_edge_dmap_fun(graph, edges_weights=None):
"""
Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
add = graph.add_edge # Namespace shortcut for speed.
if edges_weights is not None:
def add_edge(i, o, w):
if w in edges_weights:
add(i, o, weight=edges_weights[w]) # Weighted edge.
else:
add(i, o) # Normal edge.
else:
# noinspection PyUnusedLocal
def add_edge(i, o, w):
add(i, o) # Normal edge.
return add_edge | [
"def",
"_add_edge_dmap_fun",
"(",
"graph",
",",
"edges_weights",
"=",
"None",
")",
":",
"add",
"=",
"graph",
".",
"add_edge",
"# Namespace shortcut for speed.",
"if",
"edges_weights",
"is",
"not",
"None",
":",
"def",
"add_edge",
"(",
"i",
",",
"o",
",",
"w",
")",
":",
"if",
"w",
"in",
"edges_weights",
":",
"add",
"(",
"i",
",",
"o",
",",
"weight",
"=",
"edges_weights",
"[",
"w",
"]",
")",
"# Weighted edge.",
"else",
":",
"add",
"(",
"i",
",",
"o",
")",
"# Normal edge.",
"else",
":",
"# noinspection PyUnusedLocal",
"def",
"add_edge",
"(",
"i",
",",
"o",
",",
"w",
")",
":",
"add",
"(",
"i",
",",
"o",
")",
"# Normal edge.",
"return",
"add_edge"
] | Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable | [
"Adds",
"edge",
"to",
"the",
"dispatcher",
"map",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L169-L199 | train |
vinci1it2000/schedula | schedula/utils/alg.py | _get_node | def _get_node(nodes, node_id, fuzzy=True):
"""
Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict)
"""
try:
return node_id, nodes[node_id] # Return dispatcher node and its id.
except KeyError as ex:
if fuzzy:
it = sorted(nodes.items())
n = next(((k, v) for k, v in it if node_id in k), EMPTY)
if n is not EMPTY:
return n
raise ex | python | def _get_node(nodes, node_id, fuzzy=True):
"""
Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict)
"""
try:
return node_id, nodes[node_id] # Return dispatcher node and its id.
except KeyError as ex:
if fuzzy:
it = sorted(nodes.items())
n = next(((k, v) for k, v in it if node_id in k), EMPTY)
if n is not EMPTY:
return n
raise ex | [
"def",
"_get_node",
"(",
"nodes",
",",
"node_id",
",",
"fuzzy",
"=",
"True",
")",
":",
"try",
":",
"return",
"node_id",
",",
"nodes",
"[",
"node_id",
"]",
"# Return dispatcher node and its id.",
"except",
"KeyError",
"as",
"ex",
":",
"if",
"fuzzy",
":",
"it",
"=",
"sorted",
"(",
"nodes",
".",
"items",
"(",
")",
")",
"n",
"=",
"next",
"(",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"it",
"if",
"node_id",
"in",
"k",
")",
",",
"EMPTY",
")",
"if",
"n",
"is",
"not",
"EMPTY",
":",
"return",
"n",
"raise",
"ex"
] | Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict) | [
"Returns",
"a",
"dispatcher",
"node",
"that",
"match",
"the",
"given",
"node",
"id",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L202-L227 | train |
vinci1it2000/schedula | schedula/utils/alg.py | get_full_pipe | def get_full_pipe(sol, base=()):
"""
Returns the full pipe of a dispatch run.
:param sol:
A Solution object.
:type sol: schedula.utils.Solution
:param base:
Base node id.
:type base: tuple[str]
:return:
Full pipe of a dispatch run.
:rtype: DspPipe
"""
pipe, i = DspPipe(), len(base)
for p in sol._pipe:
n, s = p[-1]
d = s.dsp
p = {'task': p}
if n in s._errors:
p['error'] = s._errors[n]
node_id = s.full_name + (n,)
assert base == node_id[:i], '%s != %s' % (node_id[:i], base)
n_id = node_id[i:]
n, path = d.get_node(n, node_attr=None)
if n['type'] == 'function' and 'function' in n:
try:
sub_sol = s.workflow.node[path[-1]]['solution']
sp = get_full_pipe(sub_sol, base=node_id)
if sp:
p['sub_pipe'] = sp
except KeyError:
pass
pipe[bypass(*n_id)] = p
return pipe | python | def get_full_pipe(sol, base=()):
"""
Returns the full pipe of a dispatch run.
:param sol:
A Solution object.
:type sol: schedula.utils.Solution
:param base:
Base node id.
:type base: tuple[str]
:return:
Full pipe of a dispatch run.
:rtype: DspPipe
"""
pipe, i = DspPipe(), len(base)
for p in sol._pipe:
n, s = p[-1]
d = s.dsp
p = {'task': p}
if n in s._errors:
p['error'] = s._errors[n]
node_id = s.full_name + (n,)
assert base == node_id[:i], '%s != %s' % (node_id[:i], base)
n_id = node_id[i:]
n, path = d.get_node(n, node_attr=None)
if n['type'] == 'function' and 'function' in n:
try:
sub_sol = s.workflow.node[path[-1]]['solution']
sp = get_full_pipe(sub_sol, base=node_id)
if sp:
p['sub_pipe'] = sp
except KeyError:
pass
pipe[bypass(*n_id)] = p
return pipe | [
"def",
"get_full_pipe",
"(",
"sol",
",",
"base",
"=",
"(",
")",
")",
":",
"pipe",
",",
"i",
"=",
"DspPipe",
"(",
")",
",",
"len",
"(",
"base",
")",
"for",
"p",
"in",
"sol",
".",
"_pipe",
":",
"n",
",",
"s",
"=",
"p",
"[",
"-",
"1",
"]",
"d",
"=",
"s",
".",
"dsp",
"p",
"=",
"{",
"'task'",
":",
"p",
"}",
"if",
"n",
"in",
"s",
".",
"_errors",
":",
"p",
"[",
"'error'",
"]",
"=",
"s",
".",
"_errors",
"[",
"n",
"]",
"node_id",
"=",
"s",
".",
"full_name",
"+",
"(",
"n",
",",
")",
"assert",
"base",
"==",
"node_id",
"[",
":",
"i",
"]",
",",
"'%s != %s'",
"%",
"(",
"node_id",
"[",
":",
"i",
"]",
",",
"base",
")",
"n_id",
"=",
"node_id",
"[",
"i",
":",
"]",
"n",
",",
"path",
"=",
"d",
".",
"get_node",
"(",
"n",
",",
"node_attr",
"=",
"None",
")",
"if",
"n",
"[",
"'type'",
"]",
"==",
"'function'",
"and",
"'function'",
"in",
"n",
":",
"try",
":",
"sub_sol",
"=",
"s",
".",
"workflow",
".",
"node",
"[",
"path",
"[",
"-",
"1",
"]",
"]",
"[",
"'solution'",
"]",
"sp",
"=",
"get_full_pipe",
"(",
"sub_sol",
",",
"base",
"=",
"node_id",
")",
"if",
"sp",
":",
"p",
"[",
"'sub_pipe'",
"]",
"=",
"sp",
"except",
"KeyError",
":",
"pass",
"pipe",
"[",
"bypass",
"(",
"*",
"n_id",
")",
"]",
"=",
"p",
"return",
"pipe"
] | Returns the full pipe of a dispatch run.
:param sol:
A Solution object.
:type sol: schedula.utils.Solution
:param base:
Base node id.
:type base: tuple[str]
:return:
Full pipe of a dispatch run.
:rtype: DspPipe | [
"Returns",
"the",
"full",
"pipe",
"of",
"a",
"dispatch",
"run",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L426-L471 | train |
CI-WATER/gsshapy | gsshapy/lib/spn_chunk.py | connectChunk | def connectChunk(key, chunk):
"""
Parse Storm Pipe CONNECT Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'slinkNumber': schunk[1],
'upSjunc': schunk[2],
'downSjunc': schunk[3]}
return result | python | def connectChunk(key, chunk):
"""
Parse Storm Pipe CONNECT Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'slinkNumber': schunk[1],
'upSjunc': schunk[2],
'downSjunc': schunk[3]}
return result | [
"def",
"connectChunk",
"(",
"key",
",",
"chunk",
")",
":",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"result",
"=",
"{",
"'slinkNumber'",
":",
"schunk",
"[",
"1",
"]",
",",
"'upSjunc'",
":",
"schunk",
"[",
"2",
"]",
",",
"'downSjunc'",
":",
"schunk",
"[",
"3",
"]",
"}",
"return",
"result"
] | Parse Storm Pipe CONNECT Chunk Method | [
"Parse",
"Storm",
"Pipe",
"CONNECT",
"Chunk",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/spn_chunk.py#L15-L25 | train |
hellupline/flask-manager | flask_manager/ext/sqlalchemy.py | SQLAlchemyController.get_items | def get_items(self, page=1, order_by=None, filters=None):
"""
Fetch database for items matching.
Args:
page (int):
which page will be sliced
slice size is ``self.per_page``.
order_by (str):
a field name to order query by.
filters (dict):
a ``filter name``: ``value`` dict.
Returns:
tuple with:
items, sliced by page*self.per_page
total items without slice
"""
start = (page-1)*self.per_page
query = self.get_query()
if order_by is not None:
query = query.order_by(self._get_field(order_by))
if filters is not None:
query = self._filter(query, filters)
return query.offset(start).limit(self.per_page), self.count(query) | python | def get_items(self, page=1, order_by=None, filters=None):
"""
Fetch database for items matching.
Args:
page (int):
which page will be sliced
slice size is ``self.per_page``.
order_by (str):
a field name to order query by.
filters (dict):
a ``filter name``: ``value`` dict.
Returns:
tuple with:
items, sliced by page*self.per_page
total items without slice
"""
start = (page-1)*self.per_page
query = self.get_query()
if order_by is not None:
query = query.order_by(self._get_field(order_by))
if filters is not None:
query = self._filter(query, filters)
return query.offset(start).limit(self.per_page), self.count(query) | [
"def",
"get_items",
"(",
"self",
",",
"page",
"=",
"1",
",",
"order_by",
"=",
"None",
",",
"filters",
"=",
"None",
")",
":",
"start",
"=",
"(",
"page",
"-",
"1",
")",
"*",
"self",
".",
"per_page",
"query",
"=",
"self",
".",
"get_query",
"(",
")",
"if",
"order_by",
"is",
"not",
"None",
":",
"query",
"=",
"query",
".",
"order_by",
"(",
"self",
".",
"_get_field",
"(",
"order_by",
")",
")",
"if",
"filters",
"is",
"not",
"None",
":",
"query",
"=",
"self",
".",
"_filter",
"(",
"query",
",",
"filters",
")",
"return",
"query",
".",
"offset",
"(",
"start",
")",
".",
"limit",
"(",
"self",
".",
"per_page",
")",
",",
"self",
".",
"count",
"(",
"query",
")"
] | Fetch database for items matching.
Args:
page (int):
which page will be sliced
slice size is ``self.per_page``.
order_by (str):
a field name to order query by.
filters (dict):
a ``filter name``: ``value`` dict.
Returns:
tuple with:
items, sliced by page*self.per_page
total items without slice | [
"Fetch",
"database",
"for",
"items",
"matching",
"."
] | 70e48309f73aacf55f5c37b43165791ae1cf6861 | https://github.com/hellupline/flask-manager/blob/70e48309f73aacf55f5c37b43165791ae1cf6861/flask_manager/ext/sqlalchemy.py#L160-L184 | train |
CI-WATER/gsshapy | gsshapy/orm/generic.py | GenericFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Generic File Read from File Method
"""
# Persist name and extension of file
self.name = name
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
self.text = f.read() | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Generic File Read from File Method
"""
# Persist name and extension of file
self.name = name
self.fileExtension = extension
# Open file and parse into a data structure
with open(path, 'r') as f:
self.text = f.read() | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Persist name and extension of file",
"self",
".",
"name",
"=",
"name",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse into a data structure",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"text",
"=",
"f",
".",
"read",
"(",
")"
] | Generic File Read from File Method | [
"Generic",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/generic.py#L55-L65 | train |
wheerd/multiset | multiset.py | BaseMultiset.isdisjoint | def isdisjoint(self, other):
r"""Return True if the set has no elements in common with other.
Sets are disjoint iff their intersection is the empty set.
>>> ms = Multiset('aab')
>>> ms.isdisjoint('bc')
False
>>> ms.isdisjoint(Multiset('ccd'))
True
Args:
other: The other set to check disjointedness. Can also be an :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
if isinstance(other, _sequence_types + (BaseMultiset, )):
pass
elif not isinstance(other, Container):
other = self._as_multiset(other)
return all(element not in other for element in self._elements.keys()) | python | def isdisjoint(self, other):
r"""Return True if the set has no elements in common with other.
Sets are disjoint iff their intersection is the empty set.
>>> ms = Multiset('aab')
>>> ms.isdisjoint('bc')
False
>>> ms.isdisjoint(Multiset('ccd'))
True
Args:
other: The other set to check disjointedness. Can also be an :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
if isinstance(other, _sequence_types + (BaseMultiset, )):
pass
elif not isinstance(other, Container):
other = self._as_multiset(other)
return all(element not in other for element in self._elements.keys()) | [
"def",
"isdisjoint",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"_sequence_types",
"+",
"(",
"BaseMultiset",
",",
")",
")",
":",
"pass",
"elif",
"not",
"isinstance",
"(",
"other",
",",
"Container",
")",
":",
"other",
"=",
"self",
".",
"_as_multiset",
"(",
"other",
")",
"return",
"all",
"(",
"element",
"not",
"in",
"other",
"for",
"element",
"in",
"self",
".",
"_elements",
".",
"keys",
"(",
")",
")"
] | r"""Return True if the set has no elements in common with other.
Sets are disjoint iff their intersection is the empty set.
>>> ms = Multiset('aab')
>>> ms.isdisjoint('bc')
False
>>> ms.isdisjoint(Multiset('ccd'))
True
Args:
other: The other set to check disjointedness. Can also be an :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. | [
"r",
"Return",
"True",
"if",
"the",
"set",
"has",
"no",
"elements",
"in",
"common",
"with",
"other",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L148-L167 | train |
wheerd/multiset | multiset.py | BaseMultiset.difference | def difference(self, *others):
r"""Return a new multiset with all elements from the others removed.
>>> ms = Multiset('aab')
>>> sorted(ms.difference('bc'))
['a', 'a']
You can also use the ``-`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> sorted(ms - Multiset('abd'))
['a', 'b', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`difference_update`.
Args:
others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting difference multiset.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
if element in _elements:
old_multiplicity = _elements[element]
new_multiplicity = old_multiplicity - multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity
else:
del _elements[element]
_total -= old_multiplicity
result._total = _total
return result | python | def difference(self, *others):
r"""Return a new multiset with all elements from the others removed.
>>> ms = Multiset('aab')
>>> sorted(ms.difference('bc'))
['a', 'a']
You can also use the ``-`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> sorted(ms - Multiset('abd'))
['a', 'b', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`difference_update`.
Args:
others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting difference multiset.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
if element in _elements:
old_multiplicity = _elements[element]
new_multiplicity = old_multiplicity - multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity
else:
del _elements[element]
_total -= old_multiplicity
result._total = _total
return result | [
"def",
"difference",
"(",
"self",
",",
"*",
"others",
")",
":",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"_total",
"=",
"result",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_multiset",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"other",
".",
"items",
"(",
")",
":",
"if",
"element",
"in",
"_elements",
":",
"old_multiplicity",
"=",
"_elements",
"[",
"element",
"]",
"new_multiplicity",
"=",
"old_multiplicity",
"-",
"multiplicity",
"if",
"new_multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"=",
"new_multiplicity",
"_total",
"-=",
"multiplicity",
"else",
":",
"del",
"_elements",
"[",
"element",
"]",
"_total",
"-=",
"old_multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] | r"""Return a new multiset with all elements from the others removed.
>>> ms = Multiset('aab')
>>> sorted(ms.difference('bc'))
['a', 'a']
You can also use the ``-`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> sorted(ms - Multiset('abd'))
['a', 'b', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`difference_update`.
Args:
others: The other sets to remove from the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting difference multiset. | [
"r",
"Return",
"a",
"new",
"multiset",
"with",
"all",
"elements",
"from",
"the",
"others",
"removed",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L169-L208 | train |
wheerd/multiset | multiset.py | BaseMultiset.union | def union(self, *others):
r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities.
>>> ms = Multiset('aab')
>>> sorted(ms.union('bc'))
['a', 'a', 'b', 'c']
You can also use the ``|`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms | Multiset('aaa'))
['a', 'a', 'a', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`union_update`.
Args:
*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the union.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in other.items():
old_multiplicity = _elements.get(element, 0)
if multiplicity > old_multiplicity:
_elements[element] = multiplicity
_total += multiplicity - old_multiplicity
result._total = _total
return result | python | def union(self, *others):
r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities.
>>> ms = Multiset('aab')
>>> sorted(ms.union('bc'))
['a', 'a', 'b', 'c']
You can also use the ``|`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms | Multiset('aaa'))
['a', 'a', 'a', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`union_update`.
Args:
*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the union.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in other.items():
old_multiplicity = _elements.get(element, 0)
if multiplicity > old_multiplicity:
_elements[element] = multiplicity
_total += multiplicity - old_multiplicity
result._total = _total
return result | [
"def",
"union",
"(",
"self",
",",
"*",
"others",
")",
":",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"_total",
"=",
"result",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"other",
".",
"items",
"(",
")",
":",
"old_multiplicity",
"=",
"_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"multiplicity",
">",
"old_multiplicity",
":",
"_elements",
"[",
"element",
"]",
"=",
"multiplicity",
"_total",
"+=",
"multiplicity",
"-",
"old_multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] | r"""Return a new multiset with all elements from the multiset and the others with maximal multiplicities.
>>> ms = Multiset('aab')
>>> sorted(ms.union('bc'))
['a', 'a', 'b', 'c']
You can also use the ``|`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms | Multiset('aaa'))
['a', 'a', 'a', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`union_update`.
Args:
*others: The other sets to union the multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the union. | [
"r",
"Return",
"a",
"new",
"multiset",
"with",
"all",
"elements",
"from",
"the",
"multiset",
"and",
"the",
"others",
"with",
"maximal",
"multiplicities",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L222-L256 | train |
wheerd/multiset | multiset.py | BaseMultiset.intersection | def intersection(self, *others):
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if new_multiplicity < multiplicity:
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity - new_multiplicity
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result | python | def intersection(self, *others):
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if new_multiplicity < multiplicity:
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity - new_multiplicity
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result | [
"def",
"intersection",
"(",
"self",
",",
"*",
"others",
")",
":",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"_total",
"=",
"result",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"list",
"(",
"_elements",
".",
"items",
"(",
")",
")",
":",
"new_multiplicity",
"=",
"other",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"new_multiplicity",
"<",
"multiplicity",
":",
"if",
"new_multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"=",
"new_multiplicity",
"_total",
"-=",
"multiplicity",
"-",
"new_multiplicity",
"else",
":",
"del",
"_elements",
"[",
"element",
"]",
"_total",
"-=",
"multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] | r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets. | [
"r",
"Return",
"a",
"new",
"multiset",
"with",
"elements",
"common",
"to",
"the",
"multiset",
"and",
"all",
"others",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L316-L354 | train |
wheerd/multiset | multiset.py | BaseMultiset.symmetric_difference | def symmetric_difference(self, other):
r"""Return a new set with elements in either the set or other but not both.
>>> ms = Multiset('aab')
>>> sorted(ms.symmetric_difference('abc'))
['a', 'c']
You can also use the ``^`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms ^ Multiset('aaac'))
['a', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`symmetric_difference_update`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting symmetric difference multiset.
"""
other = self._as_multiset(other)
result = self.__class__()
_total = 0
_elements = result._elements
self_elements = self._elements
other_elements = other._elements
dist_elements = set(self_elements.keys()) | set(other_elements.keys())
for element in dist_elements:
multiplicity = self_elements.get(element, 0)
other_multiplicity = other_elements.get(element, 0)
new_multiplicity = (multiplicity - other_multiplicity
if multiplicity > other_multiplicity else other_multiplicity - multiplicity)
_total += new_multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
result._total = _total
return result | python | def symmetric_difference(self, other):
r"""Return a new set with elements in either the set or other but not both.
>>> ms = Multiset('aab')
>>> sorted(ms.symmetric_difference('abc'))
['a', 'c']
You can also use the ``^`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms ^ Multiset('aaac'))
['a', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`symmetric_difference_update`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting symmetric difference multiset.
"""
other = self._as_multiset(other)
result = self.__class__()
_total = 0
_elements = result._elements
self_elements = self._elements
other_elements = other._elements
dist_elements = set(self_elements.keys()) | set(other_elements.keys())
for element in dist_elements:
multiplicity = self_elements.get(element, 0)
other_multiplicity = other_elements.get(element, 0)
new_multiplicity = (multiplicity - other_multiplicity
if multiplicity > other_multiplicity else other_multiplicity - multiplicity)
_total += new_multiplicity
if new_multiplicity > 0:
_elements[element] = new_multiplicity
result._total = _total
return result | [
"def",
"symmetric_difference",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"self",
".",
"_as_multiset",
"(",
"other",
")",
"result",
"=",
"self",
".",
"__class__",
"(",
")",
"_total",
"=",
"0",
"_elements",
"=",
"result",
".",
"_elements",
"self_elements",
"=",
"self",
".",
"_elements",
"other_elements",
"=",
"other",
".",
"_elements",
"dist_elements",
"=",
"set",
"(",
"self_elements",
".",
"keys",
"(",
")",
")",
"|",
"set",
"(",
"other_elements",
".",
"keys",
"(",
")",
")",
"for",
"element",
"in",
"dist_elements",
":",
"multiplicity",
"=",
"self_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"other_multiplicity",
"=",
"other_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"new_multiplicity",
"=",
"(",
"multiplicity",
"-",
"other_multiplicity",
"if",
"multiplicity",
">",
"other_multiplicity",
"else",
"other_multiplicity",
"-",
"multiplicity",
")",
"_total",
"+=",
"new_multiplicity",
"if",
"new_multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"=",
"new_multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] | r"""Return a new set with elements in either the set or other but not both.
>>> ms = Multiset('aab')
>>> sorted(ms.symmetric_difference('abc'))
['a', 'c']
You can also use the ``^`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms ^ Multiset('aaac'))
['a', 'b', 'c']
For a variant of the operation which modifies the multiset in place see
:meth:`symmetric_difference_update`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The resulting symmetric difference multiset. | [
"r",
"Return",
"a",
"new",
"set",
"with",
"elements",
"in",
"either",
"the",
"set",
"or",
"other",
"but",
"not",
"both",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L365-L405 | train |
wheerd/multiset | multiset.py | BaseMultiset.times | def times(self, factor):
"""Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor == 0:
return self.__class__()
if factor < 0:
raise ValueError('The factor must no be negative.')
result = self.__copy__()
_elements = result._elements
for element in _elements:
_elements[element] *= factor
result._total *= factor
return result | python | def times(self, factor):
"""Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor == 0:
return self.__class__()
if factor < 0:
raise ValueError('The factor must no be negative.')
result = self.__copy__()
_elements = result._elements
for element in _elements:
_elements[element] *= factor
result._total *= factor
return result | [
"def",
"times",
"(",
"self",
",",
"factor",
")",
":",
"if",
"factor",
"==",
"0",
":",
"return",
"self",
".",
"__class__",
"(",
")",
"if",
"factor",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'The factor must no be negative.'",
")",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"for",
"element",
"in",
"_elements",
":",
"_elements",
"[",
"element",
"]",
"*=",
"factor",
"result",
".",
"_total",
"*=",
"factor",
"return",
"result"
] | Return a new set with each element's multiplicity multiplied with the given scalar factor.
>>> ms = Multiset('aab')
>>> sorted(ms.times(2))
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*`` operator for the same effect:
>>> sorted(ms * 3)
['a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b']
For a variant of the operation which modifies the multiset in place see
:meth:`times_update`.
Args:
factor: The factor to multiply each multiplicity with. | [
"Return",
"a",
"new",
"set",
"with",
"each",
"element",
"s",
"multiplicity",
"multiplied",
"with",
"the",
"given",
"scalar",
"factor",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L416-L443 | train |
wheerd/multiset | multiset.py | Multiset.union_update | def union_update(self, *others):
r"""Update the multiset, adding elements from all others using the maximum multiplicity.
>>> ms = Multiset('aab')
>>> ms.union_update('bc')
>>> sorted(ms)
['a', 'a', 'b', 'c']
You can also use the ``|=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> ms |= Multiset('bccd')
>>> sorted(ms)
['a', 'a', 'b', 'c', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`union`.
Args:
others: The other sets to union this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
_elements = self._elements
_total = self._total
for other in map(self._as_mapping, others):
for element, multiplicity in other.items():
old_multiplicity = _elements.get(element, 0)
if multiplicity > old_multiplicity:
_elements[element] = multiplicity
_total += multiplicity - old_multiplicity
self._total = _total | python | def union_update(self, *others):
r"""Update the multiset, adding elements from all others using the maximum multiplicity.
>>> ms = Multiset('aab')
>>> ms.union_update('bc')
>>> sorted(ms)
['a', 'a', 'b', 'c']
You can also use the ``|=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> ms |= Multiset('bccd')
>>> sorted(ms)
['a', 'a', 'b', 'c', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`union`.
Args:
others: The other sets to union this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
_elements = self._elements
_total = self._total
for other in map(self._as_mapping, others):
for element, multiplicity in other.items():
old_multiplicity = _elements.get(element, 0)
if multiplicity > old_multiplicity:
_elements[element] = multiplicity
_total += multiplicity - old_multiplicity
self._total = _total | [
"def",
"union_update",
"(",
"self",
",",
"*",
"others",
")",
":",
"_elements",
"=",
"self",
".",
"_elements",
"_total",
"=",
"self",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"other",
".",
"items",
"(",
")",
":",
"old_multiplicity",
"=",
"_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"multiplicity",
">",
"old_multiplicity",
":",
"_elements",
"[",
"element",
"]",
"=",
"multiplicity",
"_total",
"+=",
"multiplicity",
"-",
"old_multiplicity",
"self",
".",
"_total",
"=",
"_total"
] | r"""Update the multiset, adding elements from all others using the maximum multiplicity.
>>> ms = Multiset('aab')
>>> ms.union_update('bc')
>>> sorted(ms)
['a', 'a', 'b', 'c']
You can also use the ``|=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> ms |= Multiset('bccd')
>>> sorted(ms)
['a', 'a', 'b', 'c', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`union`.
Args:
others: The other sets to union this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. | [
"r",
"Update",
"the",
"multiset",
"adding",
"elements",
"from",
"all",
"others",
"using",
"the",
"maximum",
"multiplicity",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L719-L750 | train |
wheerd/multiset | multiset.py | Multiset.intersection_update | def intersection_update(self, *others):
r"""Update the multiset, keeping only elements found in it and all others.
>>> ms = Multiset('aab')
>>> ms.intersection_update('bc')
>>> sorted(ms)
['b']
You can also use the ``&=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabc')
>>> ms &= Multiset('abbd')
>>> sorted(ms)
['a', 'b']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`intersection`.
Args:
others: The other sets to intersect this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
for other in map(self._as_mapping, others):
for element, current_count in list(self.items()):
multiplicity = other.get(element, 0)
if multiplicity < current_count:
self[element] = multiplicity | python | def intersection_update(self, *others):
r"""Update the multiset, keeping only elements found in it and all others.
>>> ms = Multiset('aab')
>>> ms.intersection_update('bc')
>>> sorted(ms)
['b']
You can also use the ``&=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabc')
>>> ms &= Multiset('abbd')
>>> sorted(ms)
['a', 'b']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`intersection`.
Args:
others: The other sets to intersect this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
for other in map(self._as_mapping, others):
for element, current_count in list(self.items()):
multiplicity = other.get(element, 0)
if multiplicity < current_count:
self[element] = multiplicity | [
"def",
"intersection_update",
"(",
"self",
",",
"*",
"others",
")",
":",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"current_count",
"in",
"list",
"(",
"self",
".",
"items",
"(",
")",
")",
":",
"multiplicity",
"=",
"other",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"multiplicity",
"<",
"current_count",
":",
"self",
"[",
"element",
"]",
"=",
"multiplicity"
] | r"""Update the multiset, keeping only elements found in it and all others.
>>> ms = Multiset('aab')
>>> ms.intersection_update('bc')
>>> sorted(ms)
['b']
You can also use the ``&=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabc')
>>> ms &= Multiset('abbd')
>>> sorted(ms)
['a', 'b']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`intersection`.
Args:
others: The other sets to intersect this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. | [
"r",
"Update",
"the",
"multiset",
"keeping",
"only",
"elements",
"found",
"in",
"it",
"and",
"all",
"others",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L760-L787 | train |
wheerd/multiset | multiset.py | Multiset.difference_update | def difference_update(self, *others):
r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
self.discard(element, multiplicity) | python | def difference_update(self, *others):
r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
for other in map(self._as_multiset, others):
for element, multiplicity in other.items():
self.discard(element, multiplicity) | [
"def",
"difference_update",
"(",
"self",
",",
"*",
"others",
")",
":",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_multiset",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"other",
".",
"items",
"(",
")",
":",
"self",
".",
"discard",
"(",
"element",
",",
"multiplicity",
")"
] | r"""Remove all elements contained the others from this multiset.
>>> ms = Multiset('aab')
>>> ms.difference_update('abc')
>>> sorted(ms)
['a']
You can also use the ``-=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms -= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`difference`.
Args:
others: The other sets to remove from this multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. | [
"r",
"Remove",
"all",
"elements",
"contained",
"the",
"others",
"from",
"this",
"multiset",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L797-L822 | train |
wheerd/multiset | multiset.py | Multiset.symmetric_difference_update | def symmetric_difference_update(self, other):
r"""Update the multiset to contain only elements in either this multiset or the other but not both.
>>> ms = Multiset('aab')
>>> ms.symmetric_difference_update('abc')
>>> sorted(ms)
['a', 'c']
You can also use the ``^=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms ^= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`symmetric_difference`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
other = self._as_multiset(other)
elements = set(self.distinct_elements()) | set(other.distinct_elements())
for element in elements:
multiplicity = self[element]
other_count = other[element]
self[element] = (multiplicity - other_count if multiplicity > other_count else other_count - multiplicity) | python | def symmetric_difference_update(self, other):
r"""Update the multiset to contain only elements in either this multiset or the other but not both.
>>> ms = Multiset('aab')
>>> ms.symmetric_difference_update('abc')
>>> sorted(ms)
['a', 'c']
You can also use the ``^=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms ^= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`symmetric_difference`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
"""
other = self._as_multiset(other)
elements = set(self.distinct_elements()) | set(other.distinct_elements())
for element in elements:
multiplicity = self[element]
other_count = other[element]
self[element] = (multiplicity - other_count if multiplicity > other_count else other_count - multiplicity) | [
"def",
"symmetric_difference_update",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"self",
".",
"_as_multiset",
"(",
"other",
")",
"elements",
"=",
"set",
"(",
"self",
".",
"distinct_elements",
"(",
")",
")",
"|",
"set",
"(",
"other",
".",
"distinct_elements",
"(",
")",
")",
"for",
"element",
"in",
"elements",
":",
"multiplicity",
"=",
"self",
"[",
"element",
"]",
"other_count",
"=",
"other",
"[",
"element",
"]",
"self",
"[",
"element",
"]",
"=",
"(",
"multiplicity",
"-",
"other_count",
"if",
"multiplicity",
">",
"other_count",
"else",
"other_count",
"-",
"multiplicity",
")"
] | r"""Update the multiset to contain only elements in either this multiset or the other but not both.
>>> ms = Multiset('aab')
>>> ms.symmetric_difference_update('abc')
>>> sorted(ms)
['a', 'c']
You can also use the ``^=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aabbbc')
>>> ms ^= Multiset('abd')
>>> sorted(ms)
['a', 'b', 'b', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`symmetric_difference`.
Args:
other: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T]. | [
"r",
"Update",
"the",
"multiset",
"to",
"contain",
"only",
"elements",
"in",
"either",
"this",
"multiset",
"or",
"the",
"other",
"but",
"not",
"both",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L832-L860 | train |
wheerd/multiset | multiset.py | Multiset.times_update | def times_update(self, factor):
"""Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a', 'c', 'c', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`times`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor < 0:
raise ValueError("The factor must not be negative.")
elif factor == 0:
self.clear()
else:
_elements = self._elements
for element in _elements:
_elements[element] *= factor
self._total *= factor | python | def times_update(self, factor):
"""Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a', 'c', 'c', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`times`.
Args:
factor: The factor to multiply each multiplicity with.
"""
if factor < 0:
raise ValueError("The factor must not be negative.")
elif factor == 0:
self.clear()
else:
_elements = self._elements
for element in _elements:
_elements[element] *= factor
self._total *= factor | [
"def",
"times_update",
"(",
"self",
",",
"factor",
")",
":",
"if",
"factor",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"The factor must not be negative.\"",
")",
"elif",
"factor",
"==",
"0",
":",
"self",
".",
"clear",
"(",
")",
"else",
":",
"_elements",
"=",
"self",
".",
"_elements",
"for",
"element",
"in",
"_elements",
":",
"_elements",
"[",
"element",
"]",
"*=",
"factor",
"self",
".",
"_total",
"*=",
"factor"
] | Update each this multiset by multiplying each element's multiplicity with the given scalar factor.
>>> ms = Multiset('aab')
>>> ms.times_update(2)
>>> sorted(ms)
['a', 'a', 'a', 'a', 'b', 'b']
You can also use the ``*=`` operator for the same effect:
>>> ms = Multiset('ac')
>>> ms *= 3
>>> sorted(ms)
['a', 'a', 'a', 'c', 'c', 'c']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`times`.
Args:
factor: The factor to multiply each multiplicity with. | [
"Update",
"each",
"this",
"multiset",
"by",
"multiplying",
"each",
"element",
"s",
"multiplicity",
"with",
"the",
"given",
"scalar",
"factor",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L870-L899 | train |
wheerd/multiset | multiset.py | Multiset.add | def add(self, element, multiplicity=1):
"""Adds an element to the multiset.
>>> ms = Multiset()
>>> ms.add('a')
>>> sorted(ms)
['a']
An optional multiplicity can be specified to define how many of the element are added:
>>> ms.add('b', 2)
>>> sorted(ms)
['a', 'b', 'b']
This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity.
Args:
element:
The element to add to the multiset.
multiplicity:
The multiplicity i.e. count of elements to add.
"""
if multiplicity < 1:
raise ValueError("Multiplicity must be positive")
self._elements[element] += multiplicity
self._total += multiplicity | python | def add(self, element, multiplicity=1):
"""Adds an element to the multiset.
>>> ms = Multiset()
>>> ms.add('a')
>>> sorted(ms)
['a']
An optional multiplicity can be specified to define how many of the element are added:
>>> ms.add('b', 2)
>>> sorted(ms)
['a', 'b', 'b']
This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity.
Args:
element:
The element to add to the multiset.
multiplicity:
The multiplicity i.e. count of elements to add.
"""
if multiplicity < 1:
raise ValueError("Multiplicity must be positive")
self._elements[element] += multiplicity
self._total += multiplicity | [
"def",
"add",
"(",
"self",
",",
"element",
",",
"multiplicity",
"=",
"1",
")",
":",
"if",
"multiplicity",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Multiplicity must be positive\"",
")",
"self",
".",
"_elements",
"[",
"element",
"]",
"+=",
"multiplicity",
"self",
".",
"_total",
"+=",
"multiplicity"
] | Adds an element to the multiset.
>>> ms = Multiset()
>>> ms.add('a')
>>> sorted(ms)
['a']
An optional multiplicity can be specified to define how many of the element are added:
>>> ms.add('b', 2)
>>> sorted(ms)
['a', 'b', 'b']
This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity.
Args:
element:
The element to add to the multiset.
multiplicity:
The multiplicity i.e. count of elements to add. | [
"Adds",
"an",
"element",
"to",
"the",
"multiset",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L907-L932 | train |
wheerd/multiset | multiset.py | Multiset.remove | def remove(self, element, multiplicity=None):
"""Removes an element from the multiset.
If no multiplicity is specified, the element is completely removed from the multiset:
>>> ms = Multiset('aabbbc')
>>> ms.remove('a')
2
>>> sorted(ms)
['b', 'b', 'b', 'c']
If the multiplicity is given, it is subtracted from the element's multiplicity in the multiset:
>>> ms.remove('b', 2)
3
>>> sorted(ms)
['b', 'c']
It is not an error to remove more elements than are in the set:
>>> ms.remove('b', 2)
1
>>> sorted(ms)
['c']
This extends the :meth:`MutableSet.remove` signature to allow specifying the multiplicity.
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
Raises:
KeyError: if the element is not contained in the set. Use :meth:`discard` if
you do not want an exception to be raised.
"""
_elements = self._elements
if element not in _elements:
raise KeyError
old_multiplicity = _elements.get(element, 0)
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must be not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity | python | def remove(self, element, multiplicity=None):
"""Removes an element from the multiset.
If no multiplicity is specified, the element is completely removed from the multiset:
>>> ms = Multiset('aabbbc')
>>> ms.remove('a')
2
>>> sorted(ms)
['b', 'b', 'b', 'c']
If the multiplicity is given, it is subtracted from the element's multiplicity in the multiset:
>>> ms.remove('b', 2)
3
>>> sorted(ms)
['b', 'c']
It is not an error to remove more elements than are in the set:
>>> ms.remove('b', 2)
1
>>> sorted(ms)
['c']
This extends the :meth:`MutableSet.remove` signature to allow specifying the multiplicity.
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
Raises:
KeyError: if the element is not contained in the set. Use :meth:`discard` if
you do not want an exception to be raised.
"""
_elements = self._elements
if element not in _elements:
raise KeyError
old_multiplicity = _elements.get(element, 0)
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must be not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity | [
"def",
"remove",
"(",
"self",
",",
"element",
",",
"multiplicity",
"=",
"None",
")",
":",
"_elements",
"=",
"self",
".",
"_elements",
"if",
"element",
"not",
"in",
"_elements",
":",
"raise",
"KeyError",
"old_multiplicity",
"=",
"_elements",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"multiplicity",
"is",
"None",
"or",
"multiplicity",
">=",
"old_multiplicity",
":",
"del",
"_elements",
"[",
"element",
"]",
"self",
".",
"_total",
"-=",
"old_multiplicity",
"elif",
"multiplicity",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Multiplicity must be not be negative\"",
")",
"elif",
"multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"-=",
"multiplicity",
"self",
".",
"_total",
"-=",
"multiplicity",
"return",
"old_multiplicity"
] | Removes an element from the multiset.
If no multiplicity is specified, the element is completely removed from the multiset:
>>> ms = Multiset('aabbbc')
>>> ms.remove('a')
2
>>> sorted(ms)
['b', 'b', 'b', 'c']
If the multiplicity is given, it is subtracted from the element's multiplicity in the multiset:
>>> ms.remove('b', 2)
3
>>> sorted(ms)
['b', 'c']
It is not an error to remove more elements than are in the set:
>>> ms.remove('b', 2)
1
>>> sorted(ms)
['c']
This extends the :meth:`MutableSet.remove` signature to allow specifying the multiplicity.
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
Raises:
KeyError: if the element is not contained in the set. Use :meth:`discard` if
you do not want an exception to be raised. | [
"Removes",
"an",
"element",
"from",
"the",
"multiset",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L934-L987 | train |
wheerd/multiset | multiset.py | Multiset.discard | def discard(self, element, multiplicity=None):
"""Removes the `element` from the multiset.
If multiplicity is ``None``, all occurrences of the element are removed:
>>> ms = Multiset('aab')
>>> ms.discard('a')
2
>>> sorted(ms)
['b']
Otherwise, the multiplicity is subtracted from the one in the multiset and the
old multiplicity is removed:
>>> ms = Multiset('aab')
>>> ms.discard('a', 1)
2
>>> sorted(ms)
['a', 'b']
In contrast to :meth:`remove`, this does not raise an error if the
element is not in the multiset:
>>> ms = Multiset('a')
>>> ms.discard('b')
0
>>> sorted(ms)
['a']
It is also not an error to remove more elements than are in the set:
>>> ms.remove('a', 2)
1
>>> sorted(ms)
[]
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
"""
_elements = self._elements
if element in _elements:
old_multiplicity = _elements[element]
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity
else:
return 0 | python | def discard(self, element, multiplicity=None):
"""Removes the `element` from the multiset.
If multiplicity is ``None``, all occurrences of the element are removed:
>>> ms = Multiset('aab')
>>> ms.discard('a')
2
>>> sorted(ms)
['b']
Otherwise, the multiplicity is subtracted from the one in the multiset and the
old multiplicity is removed:
>>> ms = Multiset('aab')
>>> ms.discard('a', 1)
2
>>> sorted(ms)
['a', 'b']
In contrast to :meth:`remove`, this does not raise an error if the
element is not in the multiset:
>>> ms = Multiset('a')
>>> ms.discard('b')
0
>>> sorted(ms)
['a']
It is also not an error to remove more elements than are in the set:
>>> ms.remove('a', 2)
1
>>> sorted(ms)
[]
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
"""
_elements = self._elements
if element in _elements:
old_multiplicity = _elements[element]
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity
else:
return 0 | [
"def",
"discard",
"(",
"self",
",",
"element",
",",
"multiplicity",
"=",
"None",
")",
":",
"_elements",
"=",
"self",
".",
"_elements",
"if",
"element",
"in",
"_elements",
":",
"old_multiplicity",
"=",
"_elements",
"[",
"element",
"]",
"if",
"multiplicity",
"is",
"None",
"or",
"multiplicity",
">=",
"old_multiplicity",
":",
"del",
"_elements",
"[",
"element",
"]",
"self",
".",
"_total",
"-=",
"old_multiplicity",
"elif",
"multiplicity",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Multiplicity must not be negative\"",
")",
"elif",
"multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"-=",
"multiplicity",
"self",
".",
"_total",
"-=",
"multiplicity",
"return",
"old_multiplicity",
"else",
":",
"return",
"0"
] | Removes the `element` from the multiset.
If multiplicity is ``None``, all occurrences of the element are removed:
>>> ms = Multiset('aab')
>>> ms.discard('a')
2
>>> sorted(ms)
['b']
Otherwise, the multiplicity is subtracted from the one in the multiset and the
old multiplicity is removed:
>>> ms = Multiset('aab')
>>> ms.discard('a', 1)
2
>>> sorted(ms)
['a', 'b']
In contrast to :meth:`remove`, this does not raise an error if the
element is not in the multiset:
>>> ms = Multiset('a')
>>> ms.discard('b')
0
>>> sorted(ms)
['a']
It is also not an error to remove more elements than are in the set:
>>> ms.remove('a', 2)
1
>>> sorted(ms)
[]
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal. | [
"Removes",
"the",
"element",
"from",
"the",
"multiset",
"."
] | 1f002397096edae3da32d004e3159345a476999c | https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L989-L1048 | train |
vinci1it2000/schedula | schedula/utils/asy.py | shutdown_executors | def shutdown_executors(wait=True):
"""
Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:return:
Shutdown pool executor.
:rtype: dict[str,dict]
"""
return {k: shutdown_executor(k, wait) for k in list(_EXECUTORS.keys())} | python | def shutdown_executors(wait=True):
"""
Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:return:
Shutdown pool executor.
:rtype: dict[str,dict]
"""
return {k: shutdown_executor(k, wait) for k in list(_EXECUTORS.keys())} | [
"def",
"shutdown_executors",
"(",
"wait",
"=",
"True",
")",
":",
"return",
"{",
"k",
":",
"shutdown_executor",
"(",
"k",
",",
"wait",
")",
"for",
"k",
"in",
"list",
"(",
"_EXECUTORS",
".",
"keys",
"(",
")",
")",
"}"
] | Clean-up the resources of all initialized executors.
:param wait:
If True then shutdown will not return until all running futures have
finished executing and the resources used by the executors have been
reclaimed.
:type wait: bool
:return:
Shutdown pool executor.
:rtype: dict[str,dict] | [
"Clean",
"-",
"up",
"the",
"resources",
"of",
"all",
"initialized",
"executors",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/asy.py#L88-L102 | train |
vinci1it2000/schedula | schedula/utils/asy.py | async_thread | def async_thread(sol, args, node_attr, node_id, *a, **kw):
"""
Execute `sol._evaluate_node` in an asynchronous thread.
:param sol:
Solution to be updated.
:type sol: schedula.utils.sol.Solution
:param args:
Arguments to be passed to node calls.
:type args: tuple
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data or function node id.
:type node_id: str
:param a:
Extra args to invoke `sol._evaluate_node`.
:type a: tuple
:param kw:
Extra kwargs to invoke `sol._evaluate_node`.
:type kw: dict
:return:
Function result.
:rtype: concurrent.futures.Future | AsyncList
"""
executor = _get_executor(_executor_name(kw.get('executor', False), sol.dsp))
if not executor:
return sol._evaluate_node(args, node_attr, node_id, *a, **kw)
futures = args
if node_attr['type'] == 'data' and (
node_attr['wait_inputs'] or 'function' in node_attr):
futures = args[0].values()
from concurrent.futures import Future
futures = {v for v in futures if isinstance(v, Future)}
def _submit():
return executor.thread(
_async_eval, sol, args, node_attr, node_id, *a, **kw
)
if futures: # Chain results.
result = Future()
def _set_res(fut):
try:
result.set_result(fut.result())
except BaseException as ex:
result.set_exception(ex)
def _submit_task(fut=None):
futures.discard(fut)
not futures and _submit().add_done_callback(_set_res)
for f in list(futures):
f.add_done_callback(_submit_task)
else:
result = _submit()
timeout = node_attr.get('await_result', False)
if timeout is not False:
return _await_result(result, timeout, sol, node_id)
n = len(node_attr.get('outputs', []))
return AsyncList(future=result, n=n) if n > 1 else result | python | def async_thread(sol, args, node_attr, node_id, *a, **kw):
"""
Execute `sol._evaluate_node` in an asynchronous thread.
:param sol:
Solution to be updated.
:type sol: schedula.utils.sol.Solution
:param args:
Arguments to be passed to node calls.
:type args: tuple
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data or function node id.
:type node_id: str
:param a:
Extra args to invoke `sol._evaluate_node`.
:type a: tuple
:param kw:
Extra kwargs to invoke `sol._evaluate_node`.
:type kw: dict
:return:
Function result.
:rtype: concurrent.futures.Future | AsyncList
"""
executor = _get_executor(_executor_name(kw.get('executor', False), sol.dsp))
if not executor:
return sol._evaluate_node(args, node_attr, node_id, *a, **kw)
futures = args
if node_attr['type'] == 'data' and (
node_attr['wait_inputs'] or 'function' in node_attr):
futures = args[0].values()
from concurrent.futures import Future
futures = {v for v in futures if isinstance(v, Future)}
def _submit():
return executor.thread(
_async_eval, sol, args, node_attr, node_id, *a, **kw
)
if futures: # Chain results.
result = Future()
def _set_res(fut):
try:
result.set_result(fut.result())
except BaseException as ex:
result.set_exception(ex)
def _submit_task(fut=None):
futures.discard(fut)
not futures and _submit().add_done_callback(_set_res)
for f in list(futures):
f.add_done_callback(_submit_task)
else:
result = _submit()
timeout = node_attr.get('await_result', False)
if timeout is not False:
return _await_result(result, timeout, sol, node_id)
n = len(node_attr.get('outputs', []))
return AsyncList(future=result, n=n) if n > 1 else result | [
"def",
"async_thread",
"(",
"sol",
",",
"args",
",",
"node_attr",
",",
"node_id",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"executor",
"=",
"_get_executor",
"(",
"_executor_name",
"(",
"kw",
".",
"get",
"(",
"'executor'",
",",
"False",
")",
",",
"sol",
".",
"dsp",
")",
")",
"if",
"not",
"executor",
":",
"return",
"sol",
".",
"_evaluate_node",
"(",
"args",
",",
"node_attr",
",",
"node_id",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"futures",
"=",
"args",
"if",
"node_attr",
"[",
"'type'",
"]",
"==",
"'data'",
"and",
"(",
"node_attr",
"[",
"'wait_inputs'",
"]",
"or",
"'function'",
"in",
"node_attr",
")",
":",
"futures",
"=",
"args",
"[",
"0",
"]",
".",
"values",
"(",
")",
"from",
"concurrent",
".",
"futures",
"import",
"Future",
"futures",
"=",
"{",
"v",
"for",
"v",
"in",
"futures",
"if",
"isinstance",
"(",
"v",
",",
"Future",
")",
"}",
"def",
"_submit",
"(",
")",
":",
"return",
"executor",
".",
"thread",
"(",
"_async_eval",
",",
"sol",
",",
"args",
",",
"node_attr",
",",
"node_id",
",",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"if",
"futures",
":",
"# Chain results.",
"result",
"=",
"Future",
"(",
")",
"def",
"_set_res",
"(",
"fut",
")",
":",
"try",
":",
"result",
".",
"set_result",
"(",
"fut",
".",
"result",
"(",
")",
")",
"except",
"BaseException",
"as",
"ex",
":",
"result",
".",
"set_exception",
"(",
"ex",
")",
"def",
"_submit_task",
"(",
"fut",
"=",
"None",
")",
":",
"futures",
".",
"discard",
"(",
"fut",
")",
"not",
"futures",
"and",
"_submit",
"(",
")",
".",
"add_done_callback",
"(",
"_set_res",
")",
"for",
"f",
"in",
"list",
"(",
"futures",
")",
":",
"f",
".",
"add_done_callback",
"(",
"_submit_task",
")",
"else",
":",
"result",
"=",
"_submit",
"(",
")",
"timeout",
"=",
"node_attr",
".",
"get",
"(",
"'await_result'",
",",
"False",
")",
"if",
"timeout",
"is",
"not",
"False",
":",
"return",
"_await_result",
"(",
"result",
",",
"timeout",
",",
"sol",
",",
"node_id",
")",
"n",
"=",
"len",
"(",
"node_attr",
".",
"get",
"(",
"'outputs'",
",",
"[",
"]",
")",
")",
"return",
"AsyncList",
"(",
"future",
"=",
"result",
",",
"n",
"=",
"n",
")",
"if",
"n",
">",
"1",
"else",
"result"
] | Execute `sol._evaluate_node` in an asynchronous thread.
:param sol:
Solution to be updated.
:type sol: schedula.utils.sol.Solution
:param args:
Arguments to be passed to node calls.
:type args: tuple
:param node_attr:
Dictionary of node attributes.
:type node_attr: dict
:param node_id:
Data or function node id.
:type node_id: str
:param a:
Extra args to invoke `sol._evaluate_node`.
:type a: tuple
:param kw:
Extra kwargs to invoke `sol._evaluate_node`.
:type kw: dict
:return:
Function result.
:rtype: concurrent.futures.Future | AsyncList | [
"Execute",
"sol",
".",
"_evaluate_node",
"in",
"an",
"asynchronous",
"thread",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/asy.py#L206-L277 | train |
vinci1it2000/schedula | schedula/utils/asy.py | await_result | def await_result(obj, timeout=None):
"""
Return the result of a `Future` object.
:param obj:
Value object.
:type obj: concurrent.futures.Future | object
:param timeout:
The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
:type timeout: int
:return:
Result.
:rtype: object
Example::
>>> from concurrent.futures import Future
>>> fut = Future()
>>> fut.set_result(3)
>>> await_result(fut), await_result(4)
(3, 4)
"""
from concurrent.futures import Future
return obj.result(timeout) if isinstance(obj, Future) else obj | python | def await_result(obj, timeout=None):
"""
Return the result of a `Future` object.
:param obj:
Value object.
:type obj: concurrent.futures.Future | object
:param timeout:
The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
:type timeout: int
:return:
Result.
:rtype: object
Example::
>>> from concurrent.futures import Future
>>> fut = Future()
>>> fut.set_result(3)
>>> await_result(fut), await_result(4)
(3, 4)
"""
from concurrent.futures import Future
return obj.result(timeout) if isinstance(obj, Future) else obj | [
"def",
"await_result",
"(",
"obj",
",",
"timeout",
"=",
"None",
")",
":",
"from",
"concurrent",
".",
"futures",
"import",
"Future",
"return",
"obj",
".",
"result",
"(",
"timeout",
")",
"if",
"isinstance",
"(",
"obj",
",",
"Future",
")",
"else",
"obj"
] | Return the result of a `Future` object.
:param obj:
Value object.
:type obj: concurrent.futures.Future | object
:param timeout:
The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
:type timeout: int
:return:
Result.
:rtype: object
Example::
>>> from concurrent.futures import Future
>>> fut = Future()
>>> fut.set_result(3)
>>> await_result(fut), await_result(4)
(3, 4) | [
"Return",
"the",
"result",
"of",
"a",
"Future",
"object",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/asy.py#L440-L466 | train |
CI-WATER/gsshapy | gsshapy/lib/pivot.py | pivot | def pivot(table, left, top, value):
"""
Creates a cross-tab or pivot table from a normalised input table. Use this
function to 'denormalize' a table of normalized records.
* The table argument can be a list of dictionaries or a Table object.
(http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/334621)
* The left argument is a tuple of headings which are displayed down the
left side of the new table.
* The top argument is a tuple of headings which are displayed across the
top of the new table.
Tuples are used so that multiple element headings and columns can be used.
E.g. To transform the list (listOfDicts):
Name, Year, Value
-----------------------
'Simon', 2004, 32
'Simon', 2005, 128
'Russel', 2004, 64
'Eric', 2004, 52
'Russel', 2005, 32
into the new list:
'Name', 2004, 2005
------------------------
'Simon', 32, 128
'Russel', 64, 32
'Eric', 52, NA
you would call pivot with the arguments:
newList = pivot(listOfDicts, ('Name',), ('Year',), 'Value')
"""
rs = {}
ysort = []
xsort = []
for row in table:
yaxis = tuple([row[c] for c in left]) # e.g. yaxis = ('Simon',)
if yaxis not in ysort: ysort.append(yaxis)
xaxis = tuple([row[c] for c in top]) # e.g. xaxis = ('2004',)
if xaxis not in xsort: xsort.append(xaxis)
try:
rs[yaxis]
except KeyError:
rs[yaxis] = {}
if xaxis not in rs[yaxis]:
rs[yaxis][xaxis] = 0
rs[yaxis][xaxis] += row[value]
"""
In the following loop we take care of missing data,
e.g 'Eric' has a value in 2004 but not in 2005
"""
for key in rs:
if len(rs[key]) - len(xsort):
for var in xsort:
if var not in rs[key].keys():
rs[key][var] = ''
headings = list(left)
headings.extend(xsort)
t = []
"""
The lists 'sortedkeys' and 'sortedvalues' make sure that
even if the field 'top' is unordered, data will be transposed correctly.
E.g. in the example above the table rows are not ordered by the year
"""
for left in ysort:
row = list(left)
sortedkeys = sorted(rs[left].keys())
sortedvalues = map(rs[left].get, sortedkeys)
row.extend(sortedvalues)
t.append(dict(zip(headings,row)))
return t | python | def pivot(table, left, top, value):
"""
Creates a cross-tab or pivot table from a normalised input table. Use this
function to 'denormalize' a table of normalized records.
* The table argument can be a list of dictionaries or a Table object.
(http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/334621)
* The left argument is a tuple of headings which are displayed down the
left side of the new table.
* The top argument is a tuple of headings which are displayed across the
top of the new table.
Tuples are used so that multiple element headings and columns can be used.
E.g. To transform the list (listOfDicts):
Name, Year, Value
-----------------------
'Simon', 2004, 32
'Simon', 2005, 128
'Russel', 2004, 64
'Eric', 2004, 52
'Russel', 2005, 32
into the new list:
'Name', 2004, 2005
------------------------
'Simon', 32, 128
'Russel', 64, 32
'Eric', 52, NA
you would call pivot with the arguments:
newList = pivot(listOfDicts, ('Name',), ('Year',), 'Value')
"""
rs = {}
ysort = []
xsort = []
for row in table:
yaxis = tuple([row[c] for c in left]) # e.g. yaxis = ('Simon',)
if yaxis not in ysort: ysort.append(yaxis)
xaxis = tuple([row[c] for c in top]) # e.g. xaxis = ('2004',)
if xaxis not in xsort: xsort.append(xaxis)
try:
rs[yaxis]
except KeyError:
rs[yaxis] = {}
if xaxis not in rs[yaxis]:
rs[yaxis][xaxis] = 0
rs[yaxis][xaxis] += row[value]
"""
In the following loop we take care of missing data,
e.g 'Eric' has a value in 2004 but not in 2005
"""
for key in rs:
if len(rs[key]) - len(xsort):
for var in xsort:
if var not in rs[key].keys():
rs[key][var] = ''
headings = list(left)
headings.extend(xsort)
t = []
"""
The lists 'sortedkeys' and 'sortedvalues' make sure that
even if the field 'top' is unordered, data will be transposed correctly.
E.g. in the example above the table rows are not ordered by the year
"""
for left in ysort:
row = list(left)
sortedkeys = sorted(rs[left].keys())
sortedvalues = map(rs[left].get, sortedkeys)
row.extend(sortedvalues)
t.append(dict(zip(headings,row)))
return t | [
"def",
"pivot",
"(",
"table",
",",
"left",
",",
"top",
",",
"value",
")",
":",
"rs",
"=",
"{",
"}",
"ysort",
"=",
"[",
"]",
"xsort",
"=",
"[",
"]",
"for",
"row",
"in",
"table",
":",
"yaxis",
"=",
"tuple",
"(",
"[",
"row",
"[",
"c",
"]",
"for",
"c",
"in",
"left",
"]",
")",
"# e.g. yaxis = ('Simon',)",
"if",
"yaxis",
"not",
"in",
"ysort",
":",
"ysort",
".",
"append",
"(",
"yaxis",
")",
"xaxis",
"=",
"tuple",
"(",
"[",
"row",
"[",
"c",
"]",
"for",
"c",
"in",
"top",
"]",
")",
"# e.g. xaxis = ('2004',)",
"if",
"xaxis",
"not",
"in",
"xsort",
":",
"xsort",
".",
"append",
"(",
"xaxis",
")",
"try",
":",
"rs",
"[",
"yaxis",
"]",
"except",
"KeyError",
":",
"rs",
"[",
"yaxis",
"]",
"=",
"{",
"}",
"if",
"xaxis",
"not",
"in",
"rs",
"[",
"yaxis",
"]",
":",
"rs",
"[",
"yaxis",
"]",
"[",
"xaxis",
"]",
"=",
"0",
"rs",
"[",
"yaxis",
"]",
"[",
"xaxis",
"]",
"+=",
"row",
"[",
"value",
"]",
"\"\"\"\n In the following loop we take care of missing data,\n e.g 'Eric' has a value in 2004 but not in 2005\n \"\"\"",
"for",
"key",
"in",
"rs",
":",
"if",
"len",
"(",
"rs",
"[",
"key",
"]",
")",
"-",
"len",
"(",
"xsort",
")",
":",
"for",
"var",
"in",
"xsort",
":",
"if",
"var",
"not",
"in",
"rs",
"[",
"key",
"]",
".",
"keys",
"(",
")",
":",
"rs",
"[",
"key",
"]",
"[",
"var",
"]",
"=",
"''",
"headings",
"=",
"list",
"(",
"left",
")",
"headings",
".",
"extend",
"(",
"xsort",
")",
"t",
"=",
"[",
"]",
"\"\"\"\n The lists 'sortedkeys' and 'sortedvalues' make sure that\n even if the field 'top' is unordered, data will be transposed correctly.\n E.g. in the example above the table rows are not ordered by the year\n \"\"\"",
"for",
"left",
"in",
"ysort",
":",
"row",
"=",
"list",
"(",
"left",
")",
"sortedkeys",
"=",
"sorted",
"(",
"rs",
"[",
"left",
"]",
".",
"keys",
"(",
")",
")",
"sortedvalues",
"=",
"map",
"(",
"rs",
"[",
"left",
"]",
".",
"get",
",",
"sortedkeys",
")",
"row",
".",
"extend",
"(",
"sortedvalues",
")",
"t",
".",
"append",
"(",
"dict",
"(",
"zip",
"(",
"headings",
",",
"row",
")",
")",
")",
"return",
"t"
] | Creates a cross-tab or pivot table from a normalised input table. Use this
function to 'denormalize' a table of normalized records.
* The table argument can be a list of dictionaries or a Table object.
(http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/334621)
* The left argument is a tuple of headings which are displayed down the
left side of the new table.
* The top argument is a tuple of headings which are displayed across the
top of the new table.
Tuples are used so that multiple element headings and columns can be used.
E.g. To transform the list (listOfDicts):
Name, Year, Value
-----------------------
'Simon', 2004, 32
'Simon', 2005, 128
'Russel', 2004, 64
'Eric', 2004, 52
'Russel', 2005, 32
into the new list:
'Name', 2004, 2005
------------------------
'Simon', 32, 128
'Russel', 64, 32
'Eric', 52, NA
you would call pivot with the arguments:
newList = pivot(listOfDicts, ('Name',), ('Year',), 'Value') | [
"Creates",
"a",
"cross",
"-",
"tab",
"or",
"pivot",
"table",
"from",
"a",
"normalised",
"input",
"table",
".",
"Use",
"this",
"function",
"to",
"denormalize",
"a",
"table",
"of",
"normalized",
"records",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/pivot.py#L14-L94 | train |
CI-WATER/gsshapy | gsshapy/grid/hrrr_to_gssha.py | download_hrrr_for_gssha | def download_hrrr_for_gssha(main_directory,
forecast_start_date_string, #EX. '20160913'
forecast_start_hour_string, #EX. '00' to '23'
leftlon=-180, rightlon=180,
toplat=90,bottomlat=-90):
"""
Function to download HRRR data for GSSHA
URL:
http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl
Args:
main_directory(str): Location of the output for the forecast data.
forecast_start_date_string(str): String for day of forecast. Ex. '20160913'
forecast_start_hour_string(str): String for hour of forecast start. Ex. '02'
leftlon(Optional[double,int]): Left bound for longitude. Default is -180.
rightlon(Optional[double,int]): Right bound for longitude. Default is 180.
toplat(Optional[double,int]): Top bound for latitude. Default is 90.
bottomlat(Optional[double,int]): Bottom bound for latitude. Default is -90.
Returns:
downloaded_file_list(list): List of paths to downloaded files.
Example::
from gsshapy.grid.hrrr_to_gssha import download_hrrr_for_gssha
hrrr_folder = '/HRRR'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
downloaded_file_list = download_hrrr_for_gssha(hrrr_folder,'20160914','01',
leftlon,rightlon,toplat,bottomlat)
"""
out_directory = path.join(main_directory, forecast_start_date_string)
try:
mkdir(out_directory)
except OSError:
pass
forecast_timestep_hour_string_array = ['00', '01', '02', '03', '04',
'05', '06', '07', '08', '09',
'10', '11', '12', '13', '14',
'15', '16', '17', '18']
downloaded_file_list = []
for forecast_timestep_hour_string in forecast_timestep_hour_string_array:
file_name = 'hrrr.t{0}z.wrfsfcf{1}.grib2'.format(forecast_start_hour_string, forecast_timestep_hour_string)
payload = {
'file': file_name,
'lev_10_m_above_ground': 'on',
'lev_2_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_PRATE': 'on',
'var_PRES': 'on',
'var_RH': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'var_TCDC': 'on',
'subregion': '',
'leftlon': str(leftlon),
'rightlon': str(rightlon),
'toplat': str(toplat),
'bottomlat': str(bottomlat),
'dir': '/hrrr.{0}'.format(forecast_start_date_string),
}
r = requests.get('http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl', params=payload, stream=True)
if r.status_code == requests.codes.ok:
out_file = path.join(out_directory, file_name)
downloaded_file_list.append(out_file)
with open(out_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
else:
log.error("Problem downloading {0}".format(file_name))
for filename in downloaded_file_list:
try:
remove(filename)
except OSError:
pass
downloaded_file_list = []
break
return downloaded_file_list | python | def download_hrrr_for_gssha(main_directory,
forecast_start_date_string, #EX. '20160913'
forecast_start_hour_string, #EX. '00' to '23'
leftlon=-180, rightlon=180,
toplat=90,bottomlat=-90):
"""
Function to download HRRR data for GSSHA
URL:
http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl
Args:
main_directory(str): Location of the output for the forecast data.
forecast_start_date_string(str): String for day of forecast. Ex. '20160913'
forecast_start_hour_string(str): String for hour of forecast start. Ex. '02'
leftlon(Optional[double,int]): Left bound for longitude. Default is -180.
rightlon(Optional[double,int]): Right bound for longitude. Default is 180.
toplat(Optional[double,int]): Top bound for latitude. Default is 90.
bottomlat(Optional[double,int]): Bottom bound for latitude. Default is -90.
Returns:
downloaded_file_list(list): List of paths to downloaded files.
Example::
from gsshapy.grid.hrrr_to_gssha import download_hrrr_for_gssha
hrrr_folder = '/HRRR'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
downloaded_file_list = download_hrrr_for_gssha(hrrr_folder,'20160914','01',
leftlon,rightlon,toplat,bottomlat)
"""
out_directory = path.join(main_directory, forecast_start_date_string)
try:
mkdir(out_directory)
except OSError:
pass
forecast_timestep_hour_string_array = ['00', '01', '02', '03', '04',
'05', '06', '07', '08', '09',
'10', '11', '12', '13', '14',
'15', '16', '17', '18']
downloaded_file_list = []
for forecast_timestep_hour_string in forecast_timestep_hour_string_array:
file_name = 'hrrr.t{0}z.wrfsfcf{1}.grib2'.format(forecast_start_hour_string, forecast_timestep_hour_string)
payload = {
'file': file_name,
'lev_10_m_above_ground': 'on',
'lev_2_m_above_ground': 'on',
'lev_entire_atmosphere': 'on',
'lev_surface': 'on',
'var_DSWRF': 'on',
'var_PRATE': 'on',
'var_PRES': 'on',
'var_RH': 'on',
'var_TMP': 'on',
'var_UGRD': 'on',
'var_VGRD': 'on',
'var_TCDC': 'on',
'subregion': '',
'leftlon': str(leftlon),
'rightlon': str(rightlon),
'toplat': str(toplat),
'bottomlat': str(bottomlat),
'dir': '/hrrr.{0}'.format(forecast_start_date_string),
}
r = requests.get('http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl', params=payload, stream=True)
if r.status_code == requests.codes.ok:
out_file = path.join(out_directory, file_name)
downloaded_file_list.append(out_file)
with open(out_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
else:
log.error("Problem downloading {0}".format(file_name))
for filename in downloaded_file_list:
try:
remove(filename)
except OSError:
pass
downloaded_file_list = []
break
return downloaded_file_list | [
"def",
"download_hrrr_for_gssha",
"(",
"main_directory",
",",
"forecast_start_date_string",
",",
"#EX. '20160913'",
"forecast_start_hour_string",
",",
"#EX. '00' to '23'",
"leftlon",
"=",
"-",
"180",
",",
"rightlon",
"=",
"180",
",",
"toplat",
"=",
"90",
",",
"bottomlat",
"=",
"-",
"90",
")",
":",
"out_directory",
"=",
"path",
".",
"join",
"(",
"main_directory",
",",
"forecast_start_date_string",
")",
"try",
":",
"mkdir",
"(",
"out_directory",
")",
"except",
"OSError",
":",
"pass",
"forecast_timestep_hour_string_array",
"=",
"[",
"'00'",
",",
"'01'",
",",
"'02'",
",",
"'03'",
",",
"'04'",
",",
"'05'",
",",
"'06'",
",",
"'07'",
",",
"'08'",
",",
"'09'",
",",
"'10'",
",",
"'11'",
",",
"'12'",
",",
"'13'",
",",
"'14'",
",",
"'15'",
",",
"'16'",
",",
"'17'",
",",
"'18'",
"]",
"downloaded_file_list",
"=",
"[",
"]",
"for",
"forecast_timestep_hour_string",
"in",
"forecast_timestep_hour_string_array",
":",
"file_name",
"=",
"'hrrr.t{0}z.wrfsfcf{1}.grib2'",
".",
"format",
"(",
"forecast_start_hour_string",
",",
"forecast_timestep_hour_string",
")",
"payload",
"=",
"{",
"'file'",
":",
"file_name",
",",
"'lev_10_m_above_ground'",
":",
"'on'",
",",
"'lev_2_m_above_ground'",
":",
"'on'",
",",
"'lev_entire_atmosphere'",
":",
"'on'",
",",
"'lev_surface'",
":",
"'on'",
",",
"'var_DSWRF'",
":",
"'on'",
",",
"'var_PRATE'",
":",
"'on'",
",",
"'var_PRES'",
":",
"'on'",
",",
"'var_RH'",
":",
"'on'",
",",
"'var_TMP'",
":",
"'on'",
",",
"'var_UGRD'",
":",
"'on'",
",",
"'var_VGRD'",
":",
"'on'",
",",
"'var_TCDC'",
":",
"'on'",
",",
"'subregion'",
":",
"''",
",",
"'leftlon'",
":",
"str",
"(",
"leftlon",
")",
",",
"'rightlon'",
":",
"str",
"(",
"rightlon",
")",
",",
"'toplat'",
":",
"str",
"(",
"toplat",
")",
",",
"'bottomlat'",
":",
"str",
"(",
"bottomlat",
")",
",",
"'dir'",
":",
"'/hrrr.{0}'",
".",
"format",
"(",
"forecast_start_date_string",
")",
",",
"}",
"r",
"=",
"requests",
".",
"get",
"(",
"'http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl'",
",",
"params",
"=",
"payload",
",",
"stream",
"=",
"True",
")",
"if",
"r",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok",
":",
"out_file",
"=",
"path",
".",
"join",
"(",
"out_directory",
",",
"file_name",
")",
"downloaded_file_list",
".",
"append",
"(",
"out_file",
")",
"with",
"open",
"(",
"out_file",
",",
"'wb'",
")",
"as",
"fd",
":",
"for",
"chunk",
"in",
"r",
".",
"iter_content",
"(",
"chunk_size",
"=",
"1024",
")",
":",
"fd",
".",
"write",
"(",
"chunk",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Problem downloading {0}\"",
".",
"format",
"(",
"file_name",
")",
")",
"for",
"filename",
"in",
"downloaded_file_list",
":",
"try",
":",
"remove",
"(",
"filename",
")",
"except",
"OSError",
":",
"pass",
"downloaded_file_list",
"=",
"[",
"]",
"break",
"return",
"downloaded_file_list"
] | Function to download HRRR data for GSSHA
URL:
http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl
Args:
main_directory(str): Location of the output for the forecast data.
forecast_start_date_string(str): String for day of forecast. Ex. '20160913'
forecast_start_hour_string(str): String for hour of forecast start. Ex. '02'
leftlon(Optional[double,int]): Left bound for longitude. Default is -180.
rightlon(Optional[double,int]): Right bound for longitude. Default is 180.
toplat(Optional[double,int]): Top bound for latitude. Default is 90.
bottomlat(Optional[double,int]): Bottom bound for latitude. Default is -90.
Returns:
downloaded_file_list(list): List of paths to downloaded files.
Example::
from gsshapy.grid.hrrr_to_gssha import download_hrrr_for_gssha
hrrr_folder = '/HRRR'
leftlon = -95
rightlon = -75
toplat = 35
bottomlat = 30
downloaded_file_list = download_hrrr_for_gssha(hrrr_folder,'20160914','01',
leftlon,rightlon,toplat,bottomlat) | [
"Function",
"to",
"download",
"HRRR",
"data",
"for",
"GSSHA"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/hrrr_to_gssha.py#L24-L114 | train |
timofurrer/ramlient | ramlient/core.py | Node._patch_resource | def _patch_resource(self, method):
"""
Patch the current RAML ResourceNode by the resource with the
correct method if it exists
If the resource with the specified method does not exist
an exception is raised.
:param str method: the method of the resource
:raises UnsupportedResourceMethodError: if resource does not support the method
"""
resource = self.client.get_resource("", self.resource.path, method)
if not resource:
raise UnsupportedResourceMethodError(self.resource.path, method)
self.resource = resource | python | def _patch_resource(self, method):
"""
Patch the current RAML ResourceNode by the resource with the
correct method if it exists
If the resource with the specified method does not exist
an exception is raised.
:param str method: the method of the resource
:raises UnsupportedResourceMethodError: if resource does not support the method
"""
resource = self.client.get_resource("", self.resource.path, method)
if not resource:
raise UnsupportedResourceMethodError(self.resource.path, method)
self.resource = resource | [
"def",
"_patch_resource",
"(",
"self",
",",
"method",
")",
":",
"resource",
"=",
"self",
".",
"client",
".",
"get_resource",
"(",
"\"\"",
",",
"self",
".",
"resource",
".",
"path",
",",
"method",
")",
"if",
"not",
"resource",
":",
"raise",
"UnsupportedResourceMethodError",
"(",
"self",
".",
"resource",
".",
"path",
",",
"method",
")",
"self",
".",
"resource",
"=",
"resource"
] | Patch the current RAML ResourceNode by the resource with the
correct method if it exists
If the resource with the specified method does not exist
an exception is raised.
:param str method: the method of the resource
:raises UnsupportedResourceMethodError: if resource does not support the method | [
"Patch",
"the",
"current",
"RAML",
"ResourceNode",
"by",
"the",
"resource",
"with",
"the",
"correct",
"method",
"if",
"it",
"exists"
] | e93092252635a6b3b0aca2c390b9f820368b791c | https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/core.py#L61-L77 | train |
timofurrer/ramlient | ramlient/core.py | Client.parse_raml | def parse_raml(self):
"""
Parse RAML file
"""
if utils.is_url(self.ramlfile):
raml = utils.download_file(self.ramlfile)
else:
with codecs.open(self.ramlfile, "rb", encoding="utf-8") as raml_f:
raml = raml_f.read()
loader = ramlfications.loads(raml)
config = ramlfications.setup_config(self.ramlconfig)
self.raml = ramlfications.parse_raml(loader, config) | python | def parse_raml(self):
"""
Parse RAML file
"""
if utils.is_url(self.ramlfile):
raml = utils.download_file(self.ramlfile)
else:
with codecs.open(self.ramlfile, "rb", encoding="utf-8") as raml_f:
raml = raml_f.read()
loader = ramlfications.loads(raml)
config = ramlfications.setup_config(self.ramlconfig)
self.raml = ramlfications.parse_raml(loader, config) | [
"def",
"parse_raml",
"(",
"self",
")",
":",
"if",
"utils",
".",
"is_url",
"(",
"self",
".",
"ramlfile",
")",
":",
"raml",
"=",
"utils",
".",
"download_file",
"(",
"self",
".",
"ramlfile",
")",
"else",
":",
"with",
"codecs",
".",
"open",
"(",
"self",
".",
"ramlfile",
",",
"\"rb\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"raml_f",
":",
"raml",
"=",
"raml_f",
".",
"read",
"(",
")",
"loader",
"=",
"ramlfications",
".",
"loads",
"(",
"raml",
")",
"config",
"=",
"ramlfications",
".",
"setup_config",
"(",
"self",
".",
"ramlconfig",
")",
"self",
".",
"raml",
"=",
"ramlfications",
".",
"parse_raml",
"(",
"loader",
",",
"config",
")"
] | Parse RAML file | [
"Parse",
"RAML",
"file"
] | e93092252635a6b3b0aca2c390b9f820368b791c | https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/core.py#L112-L124 | train |
timofurrer/ramlient | ramlient/core.py | Client.get_resource | def get_resource(self, base_resource_path, resource_path, method=None):
"""
Gets a resource by it's path and optional by it's method
This method does not care about the supported resource methods
unless it is specified.
:param str resource_path: The path of the resource
:param str method: The method of the path.
:returns: the resource if it exists or None
:rtype: ResourceNode
"""
basic_path = base_resource_path + resource_path
dynamic_path = base_resource_path + "{" + resource_path + "}"
for resource in self.raml.resources:
method_matched = method is None or resource.method == method
if method_matched and (resource.path == basic_path
or resource.path == basic_path + '/'):
return resource
if resource.path == dynamic_path and method_matched:
return NodeParameter(resource=resource, parameter=resource_path)
return None | python | def get_resource(self, base_resource_path, resource_path, method=None):
"""
Gets a resource by it's path and optional by it's method
This method does not care about the supported resource methods
unless it is specified.
:param str resource_path: The path of the resource
:param str method: The method of the path.
:returns: the resource if it exists or None
:rtype: ResourceNode
"""
basic_path = base_resource_path + resource_path
dynamic_path = base_resource_path + "{" + resource_path + "}"
for resource in self.raml.resources:
method_matched = method is None or resource.method == method
if method_matched and (resource.path == basic_path
or resource.path == basic_path + '/'):
return resource
if resource.path == dynamic_path and method_matched:
return NodeParameter(resource=resource, parameter=resource_path)
return None | [
"def",
"get_resource",
"(",
"self",
",",
"base_resource_path",
",",
"resource_path",
",",
"method",
"=",
"None",
")",
":",
"basic_path",
"=",
"base_resource_path",
"+",
"resource_path",
"dynamic_path",
"=",
"base_resource_path",
"+",
"\"{\"",
"+",
"resource_path",
"+",
"\"}\"",
"for",
"resource",
"in",
"self",
".",
"raml",
".",
"resources",
":",
"method_matched",
"=",
"method",
"is",
"None",
"or",
"resource",
".",
"method",
"==",
"method",
"if",
"method_matched",
"and",
"(",
"resource",
".",
"path",
"==",
"basic_path",
"or",
"resource",
".",
"path",
"==",
"basic_path",
"+",
"'/'",
")",
":",
"return",
"resource",
"if",
"resource",
".",
"path",
"==",
"dynamic_path",
"and",
"method_matched",
":",
"return",
"NodeParameter",
"(",
"resource",
"=",
"resource",
",",
"parameter",
"=",
"resource_path",
")",
"return",
"None"
] | Gets a resource by it's path and optional by it's method
This method does not care about the supported resource methods
unless it is specified.
:param str resource_path: The path of the resource
:param str method: The method of the path.
:returns: the resource if it exists or None
:rtype: ResourceNode | [
"Gets",
"a",
"resource",
"by",
"it",
"s",
"path",
"and",
"optional",
"by",
"it",
"s",
"method"
] | e93092252635a6b3b0aca2c390b9f820368b791c | https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/core.py#L140-L163 | train |
Robpol86/etaprogress | etaprogress/components/units.py | UnitByte.auto_no_thousands | def auto_no_thousands(self):
"""Like self.auto but calculates the next unit if >999.99."""
if self._value >= 1000000000000:
return self.TiB, 'TiB'
if self._value >= 1000000000:
return self.GiB, 'GiB'
if self._value >= 1000000:
return self.MiB, 'MiB'
if self._value >= 1000:
return self.KiB, 'KiB'
else:
return self.B, 'B' | python | def auto_no_thousands(self):
"""Like self.auto but calculates the next unit if >999.99."""
if self._value >= 1000000000000:
return self.TiB, 'TiB'
if self._value >= 1000000000:
return self.GiB, 'GiB'
if self._value >= 1000000:
return self.MiB, 'MiB'
if self._value >= 1000:
return self.KiB, 'KiB'
else:
return self.B, 'B' | [
"def",
"auto_no_thousands",
"(",
"self",
")",
":",
"if",
"self",
".",
"_value",
">=",
"1000000000000",
":",
"return",
"self",
".",
"TiB",
",",
"'TiB'",
"if",
"self",
".",
"_value",
">=",
"1000000000",
":",
"return",
"self",
".",
"GiB",
",",
"'GiB'",
"if",
"self",
".",
"_value",
">=",
"1000000",
":",
"return",
"self",
".",
"MiB",
",",
"'MiB'",
"if",
"self",
".",
"_value",
">=",
"1000",
":",
"return",
"self",
".",
"KiB",
",",
"'KiB'",
"else",
":",
"return",
"self",
".",
"B",
",",
"'B'"
] | Like self.auto but calculates the next unit if >999.99. | [
"Like",
"self",
".",
"auto",
"but",
"calculates",
"the",
"next",
"unit",
"if",
">",
"999",
".",
"99",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/units.py#L96-L107 | train |
Robpol86/etaprogress | example_colors.py | error | def error(message, code=1):
"""Prints an error message to stderr and exits with a status of 1 by default."""
if message:
print('ERROR: {0}'.format(message), file=sys.stderr)
else:
print(file=sys.stderr)
sys.exit(code) | python | def error(message, code=1):
"""Prints an error message to stderr and exits with a status of 1 by default."""
if message:
print('ERROR: {0}'.format(message), file=sys.stderr)
else:
print(file=sys.stderr)
sys.exit(code) | [
"def",
"error",
"(",
"message",
",",
"code",
"=",
"1",
")",
":",
"if",
"message",
":",
"print",
"(",
"'ERROR: {0}'",
".",
"format",
"(",
"message",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"else",
":",
"print",
"(",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"code",
")"
] | Prints an error message to stderr and exits with a status of 1 by default. | [
"Prints",
"an",
"error",
"message",
"to",
"stderr",
"and",
"exits",
"with",
"a",
"status",
"of",
"1",
"by",
"default",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/example_colors.py#L30-L36 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | update_hmet_card_file | def update_hmet_card_file(hmet_card_file_path, new_hmet_data_path):
"""This function updates the paths in the HMET card file to the new
location of the HMET data. This is necessary because the file paths
are absolute and will need to be updated if moved.
Args:
hmet_card_file_path(str): Location of the file used for the HMET_ASCII card.
new_hmet_data_path(str): Location where the HMET ASCII files are currently.
Example::
new_hmet_data_path = "E:\\GSSHA\\new_hmet_directory"
hmet_card_file_path = "E:\\GSSHA\\hmet_card_file.txt"
update_hmet_card_file(hmet_card_file_path, new_hmet_data_path)
"""
hmet_card_file_path_temp = "{0}_tmp".format(hmet_card_file_path)
try:
remove(hmet_card_file_path_temp)
except OSError:
pass
copy(hmet_card_file_path, hmet_card_file_path_temp)
with io_open(hmet_card_file_path_temp, 'w', newline='\r\n') as out_hmet_list_file:
with open(hmet_card_file_path) as old_hmet_list_file:
for date_path in old_hmet_list_file:
out_hmet_list_file.write(u"{0}\n".format(path.join(new_hmet_data_path,
path.basename(date_path))))
try:
remove(hmet_card_file_path)
except OSError:
pass
rename(hmet_card_file_path_temp, hmet_card_file_path) | python | def update_hmet_card_file(hmet_card_file_path, new_hmet_data_path):
"""This function updates the paths in the HMET card file to the new
location of the HMET data. This is necessary because the file paths
are absolute and will need to be updated if moved.
Args:
hmet_card_file_path(str): Location of the file used for the HMET_ASCII card.
new_hmet_data_path(str): Location where the HMET ASCII files are currently.
Example::
new_hmet_data_path = "E:\\GSSHA\\new_hmet_directory"
hmet_card_file_path = "E:\\GSSHA\\hmet_card_file.txt"
update_hmet_card_file(hmet_card_file_path, new_hmet_data_path)
"""
hmet_card_file_path_temp = "{0}_tmp".format(hmet_card_file_path)
try:
remove(hmet_card_file_path_temp)
except OSError:
pass
copy(hmet_card_file_path, hmet_card_file_path_temp)
with io_open(hmet_card_file_path_temp, 'w', newline='\r\n') as out_hmet_list_file:
with open(hmet_card_file_path) as old_hmet_list_file:
for date_path in old_hmet_list_file:
out_hmet_list_file.write(u"{0}\n".format(path.join(new_hmet_data_path,
path.basename(date_path))))
try:
remove(hmet_card_file_path)
except OSError:
pass
rename(hmet_card_file_path_temp, hmet_card_file_path) | [
"def",
"update_hmet_card_file",
"(",
"hmet_card_file_path",
",",
"new_hmet_data_path",
")",
":",
"hmet_card_file_path_temp",
"=",
"\"{0}_tmp\"",
".",
"format",
"(",
"hmet_card_file_path",
")",
"try",
":",
"remove",
"(",
"hmet_card_file_path_temp",
")",
"except",
"OSError",
":",
"pass",
"copy",
"(",
"hmet_card_file_path",
",",
"hmet_card_file_path_temp",
")",
"with",
"io_open",
"(",
"hmet_card_file_path_temp",
",",
"'w'",
",",
"newline",
"=",
"'\\r\\n'",
")",
"as",
"out_hmet_list_file",
":",
"with",
"open",
"(",
"hmet_card_file_path",
")",
"as",
"old_hmet_list_file",
":",
"for",
"date_path",
"in",
"old_hmet_list_file",
":",
"out_hmet_list_file",
".",
"write",
"(",
"u\"{0}\\n\"",
".",
"format",
"(",
"path",
".",
"join",
"(",
"new_hmet_data_path",
",",
"path",
".",
"basename",
"(",
"date_path",
")",
")",
")",
")",
"try",
":",
"remove",
"(",
"hmet_card_file_path",
")",
"except",
"OSError",
":",
"pass",
"rename",
"(",
"hmet_card_file_path_temp",
",",
"hmet_card_file_path",
")"
] | This function updates the paths in the HMET card file to the new
location of the HMET data. This is necessary because the file paths
are absolute and will need to be updated if moved.
Args:
hmet_card_file_path(str): Location of the file used for the HMET_ASCII card.
new_hmet_data_path(str): Location where the HMET ASCII files are currently.
Example::
new_hmet_data_path = "E:\\GSSHA\\new_hmet_directory"
hmet_card_file_path = "E:\\GSSHA\\hmet_card_file.txt"
update_hmet_card_file(hmet_card_file_path, new_hmet_data_path) | [
"This",
"function",
"updates",
"the",
"paths",
"in",
"the",
"HMET",
"card",
"file",
"to",
"the",
"new",
"location",
"of",
"the",
"HMET",
"data",
".",
"This",
"is",
"necessary",
"because",
"the",
"file",
"paths",
"are",
"absolute",
"and",
"will",
"need",
"to",
"be",
"updated",
"if",
"moved",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L32-L67 | train |
Subsets and Splits