id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
246,900 |
jmgilman/Neolib
|
neolib/pyamf/remoting/gateway/twisted.py
|
TwistedGateway._finaliseRequest
|
def _finaliseRequest(self, request, status, content, mimetype='text/plain'):
"""
Finalises the request.
@param request: The HTTP Request.
@type request: C{http.Request}
@param status: The HTTP status code.
@type status: C{int}
@param content: The content of the response.
@type content: C{str}
@param mimetype: The MIME type of the request.
@type mimetype: C{str}
"""
request.setResponseCode(status)
request.setHeader("Content-Type", mimetype)
request.setHeader("Content-Length", str(len(content)))
request.setHeader("Server", gateway.SERVER_NAME)
request.write(content)
request.finish()
|
python
|
def _finaliseRequest(self, request, status, content, mimetype='text/plain'):
"""
Finalises the request.
@param request: The HTTP Request.
@type request: C{http.Request}
@param status: The HTTP status code.
@type status: C{int}
@param content: The content of the response.
@type content: C{str}
@param mimetype: The MIME type of the request.
@type mimetype: C{str}
"""
request.setResponseCode(status)
request.setHeader("Content-Type", mimetype)
request.setHeader("Content-Length", str(len(content)))
request.setHeader("Server", gateway.SERVER_NAME)
request.write(content)
request.finish()
|
[
"def",
"_finaliseRequest",
"(",
"self",
",",
"request",
",",
"status",
",",
"content",
",",
"mimetype",
"=",
"'text/plain'",
")",
":",
"request",
".",
"setResponseCode",
"(",
"status",
")",
"request",
".",
"setHeader",
"(",
"\"Content-Type\"",
",",
"mimetype",
")",
"request",
".",
"setHeader",
"(",
"\"Content-Length\"",
",",
"str",
"(",
"len",
"(",
"content",
")",
")",
")",
"request",
".",
"setHeader",
"(",
"\"Server\"",
",",
"gateway",
".",
"SERVER_NAME",
")",
"request",
".",
"write",
"(",
"content",
")",
"request",
".",
"finish",
"(",
")"
] |
Finalises the request.
@param request: The HTTP Request.
@type request: C{http.Request}
@param status: The HTTP status code.
@type status: C{int}
@param content: The content of the response.
@type content: C{str}
@param mimetype: The MIME type of the request.
@type mimetype: C{str}
|
[
"Finalises",
"the",
"request",
"."
] |
228fafeaed0f3195676137732384a14820ae285c
|
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/twisted.py#L216-L236
|
246,901 |
jmgilman/Neolib
|
neolib/pyamf/remoting/gateway/twisted.py
|
TwistedGateway.render_POST
|
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET
|
python
|
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET
|
[
"def",
"render_POST",
"(",
"self",
",",
"request",
")",
":",
"def",
"handleDecodeError",
"(",
"failure",
")",
":",
"\"\"\"\n Return HTTP 400 Bad Request.\n \"\"\"",
"errMesg",
"=",
"\"%s: %s\"",
"%",
"(",
"failure",
".",
"type",
",",
"failure",
".",
"getErrorMessage",
"(",
")",
")",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"error",
"(",
"errMesg",
")",
"self",
".",
"logger",
".",
"error",
"(",
"failure",
".",
"getTraceback",
"(",
")",
")",
"body",
"=",
"\"400 Bad Request\\n\\nThe request body was unable to \"",
"\"be successfully decoded.\"",
"if",
"self",
".",
"debug",
":",
"body",
"+=",
"\"\\n\\nTraceback:\\n\\n%s\"",
"%",
"failure",
".",
"getTraceback",
"(",
")",
"self",
".",
"_finaliseRequest",
"(",
"request",
",",
"400",
",",
"body",
")",
"request",
".",
"content",
".",
"seek",
"(",
"0",
",",
"0",
")",
"timezone_offset",
"=",
"self",
".",
"_get_timezone_offset",
"(",
")",
"d",
"=",
"threads",
".",
"deferToThread",
"(",
"remoting",
".",
"decode",
",",
"request",
".",
"content",
".",
"read",
"(",
")",
",",
"strict",
"=",
"self",
".",
"strict",
",",
"logger",
"=",
"self",
".",
"logger",
",",
"timezone_offset",
"=",
"timezone_offset",
")",
"def",
"cb",
"(",
"amf_request",
")",
":",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"AMF Request: %r\"",
"%",
"amf_request",
")",
"x",
"=",
"self",
".",
"getResponse",
"(",
"request",
",",
"amf_request",
")",
"x",
".",
"addCallback",
"(",
"self",
".",
"sendResponse",
",",
"request",
")",
"# Process the request",
"d",
".",
"addCallback",
"(",
"cb",
")",
".",
"addErrback",
"(",
"handleDecodeError",
")",
"return",
"server",
".",
"NOT_DONE_YET"
] |
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
|
[
"Read",
"remoting",
"request",
"from",
"the",
"client",
"."
] |
228fafeaed0f3195676137732384a14820ae285c
|
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/twisted.py#L238-L281
|
246,902 |
kylemede/KMlogger
|
KMlogger/kmLogger.py
|
KMlogger.getStreamLevel
|
def getStreamLevel(self):
"""
Get and return current stream handler's level.
"""
shlvl = 0
for i in range(0,len(self.handlers)):
h = self.handlers[i]
if isinstance(h,logging.StreamHandler):
shlvl = h.level
return shlvl
|
python
|
def getStreamLevel(self):
"""
Get and return current stream handler's level.
"""
shlvl = 0
for i in range(0,len(self.handlers)):
h = self.handlers[i]
if isinstance(h,logging.StreamHandler):
shlvl = h.level
return shlvl
|
[
"def",
"getStreamLevel",
"(",
"self",
")",
":",
"shlvl",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"handlers",
")",
")",
":",
"h",
"=",
"self",
".",
"handlers",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"h",
",",
"logging",
".",
"StreamHandler",
")",
":",
"shlvl",
"=",
"h",
".",
"level",
"return",
"shlvl"
] |
Get and return current stream handler's level.
|
[
"Get",
"and",
"return",
"current",
"stream",
"handler",
"s",
"level",
"."
] |
c904f0aaf35b4aff259567f932b5ca678d7f8176
|
https://github.com/kylemede/KMlogger/blob/c904f0aaf35b4aff259567f932b5ca678d7f8176/KMlogger/kmLogger.py#L89-L98
|
246,903 |
kylemede/KMlogger
|
KMlogger/kmLogger.py
|
KMlogger.logSystemInfo
|
def logSystemInfo(self):
"""
A function to be called just after a logging object is instantiated
to load the log up with info about the computer it is
being ran on and the software version. This function utilizes the
psutil and platform libraries, so they must be install for it to work.
For clarity of the log, it is suggested to perform immediately after
instantiation to put it at the top of the log file.
The messages this prints to the log will look like:
| System Information Summary:
| OS type = Linux
| OS Version = 3.9.10-100.fc17.x86_64
| Machine UserName = xxxxxx.astron.s.u-tokyo.ac.jp
| Machine Processor Type = x86_64
| Number of cores = 8
| Total RAM [GB] = 23.5403785706, % used = 15.9
| Python Version = '2.7.3'
"""
t = datetime.date.today()
infoStr = 'Date KMlogger object instantiated: '+t.strftime('%b %d, %Y')+'\n\n'
infoStr+="\n"+"="*11+' System Information Summary '+'='*11
infoStr+="\n"+'OS type = '+platform.uname()[0]
infoStr+="\n"+'OS Version = '+platform.uname()[2]
infoStr+="\n"+'Machine UserName = '+platform.uname()[1]
infoStr+="\n"+'Machine Processor Type = '+platform.processor()
multiprocessing.cpu_count()
## Some Macs have issues installing psutil, so I will remove it as a dependency for now.
#import psutil
#totMem = int(round(psutil.virtual_memory()[0]/1073741824.0))
#percentMem = int(round(psutil.virtual_memory()[2]))
infoStr+="\n"+'Python Version = '+repr(platform.python_version())
infoStr+="\n"+'='*50
self.fileonly(infoStr)
|
python
|
def logSystemInfo(self):
"""
A function to be called just after a logging object is instantiated
to load the log up with info about the computer it is
being ran on and the software version. This function utilizes the
psutil and platform libraries, so they must be install for it to work.
For clarity of the log, it is suggested to perform immediately after
instantiation to put it at the top of the log file.
The messages this prints to the log will look like:
| System Information Summary:
| OS type = Linux
| OS Version = 3.9.10-100.fc17.x86_64
| Machine UserName = xxxxxx.astron.s.u-tokyo.ac.jp
| Machine Processor Type = x86_64
| Number of cores = 8
| Total RAM [GB] = 23.5403785706, % used = 15.9
| Python Version = '2.7.3'
"""
t = datetime.date.today()
infoStr = 'Date KMlogger object instantiated: '+t.strftime('%b %d, %Y')+'\n\n'
infoStr+="\n"+"="*11+' System Information Summary '+'='*11
infoStr+="\n"+'OS type = '+platform.uname()[0]
infoStr+="\n"+'OS Version = '+platform.uname()[2]
infoStr+="\n"+'Machine UserName = '+platform.uname()[1]
infoStr+="\n"+'Machine Processor Type = '+platform.processor()
multiprocessing.cpu_count()
## Some Macs have issues installing psutil, so I will remove it as a dependency for now.
#import psutil
#totMem = int(round(psutil.virtual_memory()[0]/1073741824.0))
#percentMem = int(round(psutil.virtual_memory()[2]))
infoStr+="\n"+'Python Version = '+repr(platform.python_version())
infoStr+="\n"+'='*50
self.fileonly(infoStr)
|
[
"def",
"logSystemInfo",
"(",
"self",
")",
":",
"t",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"infoStr",
"=",
"'Date KMlogger object instantiated: '",
"+",
"t",
".",
"strftime",
"(",
"'%b %d, %Y'",
")",
"+",
"'\\n\\n'",
"infoStr",
"+=",
"\"\\n\"",
"+",
"\"=\"",
"*",
"11",
"+",
"' System Information Summary '",
"+",
"'='",
"*",
"11",
"infoStr",
"+=",
"\"\\n\"",
"+",
"'OS type = '",
"+",
"platform",
".",
"uname",
"(",
")",
"[",
"0",
"]",
"infoStr",
"+=",
"\"\\n\"",
"+",
"'OS Version = '",
"+",
"platform",
".",
"uname",
"(",
")",
"[",
"2",
"]",
"infoStr",
"+=",
"\"\\n\"",
"+",
"'Machine UserName = '",
"+",
"platform",
".",
"uname",
"(",
")",
"[",
"1",
"]",
"infoStr",
"+=",
"\"\\n\"",
"+",
"'Machine Processor Type = '",
"+",
"platform",
".",
"processor",
"(",
")",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"## Some Macs have issues installing psutil, so I will remove it as a dependency for now.",
"#import psutil ",
"#totMem = int(round(psutil.virtual_memory()[0]/1073741824.0))",
"#percentMem = int(round(psutil.virtual_memory()[2]))",
"infoStr",
"+=",
"\"\\n\"",
"+",
"'Python Version = '",
"+",
"repr",
"(",
"platform",
".",
"python_version",
"(",
")",
")",
"infoStr",
"+=",
"\"\\n\"",
"+",
"'='",
"*",
"50",
"self",
".",
"fileonly",
"(",
"infoStr",
")"
] |
A function to be called just after a logging object is instantiated
to load the log up with info about the computer it is
being ran on and the software version. This function utilizes the
psutil and platform libraries, so they must be install for it to work.
For clarity of the log, it is suggested to perform immediately after
instantiation to put it at the top of the log file.
The messages this prints to the log will look like:
| System Information Summary:
| OS type = Linux
| OS Version = 3.9.10-100.fc17.x86_64
| Machine UserName = xxxxxx.astron.s.u-tokyo.ac.jp
| Machine Processor Type = x86_64
| Number of cores = 8
| Total RAM [GB] = 23.5403785706, % used = 15.9
| Python Version = '2.7.3'
|
[
"A",
"function",
"to",
"be",
"called",
"just",
"after",
"a",
"logging",
"object",
"is",
"instantiated",
"to",
"load",
"the",
"log",
"up",
"with",
"info",
"about",
"the",
"computer",
"it",
"is",
"being",
"ran",
"on",
"and",
"the",
"software",
"version",
".",
"This",
"function",
"utilizes",
"the",
"psutil",
"and",
"platform",
"libraries",
"so",
"they",
"must",
"be",
"install",
"for",
"it",
"to",
"work",
".",
"For",
"clarity",
"of",
"the",
"log",
"it",
"is",
"suggested",
"to",
"perform",
"immediately",
"after",
"instantiation",
"to",
"put",
"it",
"at",
"the",
"top",
"of",
"the",
"log",
"file",
"."
] |
c904f0aaf35b4aff259567f932b5ca678d7f8176
|
https://github.com/kylemede/KMlogger/blob/c904f0aaf35b4aff259567f932b5ca678d7f8176/KMlogger/kmLogger.py#L137-L173
|
246,904 |
exekias/droplet
|
droplet/module.py
|
ModuleMeta.install_wrapper
|
def install_wrapper(cls, install, new_class):
"""
Wrap the install method to call pre and post enable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if self.installed:
raise AssertionError('Module %s is already installed'
% self.verbose_name)
logger.info("Installing %s module" % self.verbose_name)
pre_install.send(sender=self)
res = install(self, *args, **kwargs)
post_install.send(sender=self)
info = self._info
info.status = ModuleInfo.DISABLED
info.save()
logger.info("Installed %s module" % self.verbose_name)
return res
return _wrapped
|
python
|
def install_wrapper(cls, install, new_class):
"""
Wrap the install method to call pre and post enable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if self.installed:
raise AssertionError('Module %s is already installed'
% self.verbose_name)
logger.info("Installing %s module" % self.verbose_name)
pre_install.send(sender=self)
res = install(self, *args, **kwargs)
post_install.send(sender=self)
info = self._info
info.status = ModuleInfo.DISABLED
info.save()
logger.info("Installed %s module" % self.verbose_name)
return res
return _wrapped
|
[
"def",
"install_wrapper",
"(",
"cls",
",",
"install",
",",
"new_class",
")",
":",
"def",
"_wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"installed",
":",
"raise",
"AssertionError",
"(",
"'Module %s is already installed'",
"%",
"self",
".",
"verbose_name",
")",
"logger",
".",
"info",
"(",
"\"Installing %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"pre_install",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"res",
"=",
"install",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"post_install",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"info",
"=",
"self",
".",
"_info",
"info",
".",
"status",
"=",
"ModuleInfo",
".",
"DISABLED",
"info",
".",
"save",
"(",
")",
"logger",
".",
"info",
"(",
"\"Installed %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"return",
"res",
"return",
"_wrapped"
] |
Wrap the install method to call pre and post enable signals and update
module status
|
[
"Wrap",
"the",
"install",
"method",
"to",
"call",
"pre",
"and",
"post",
"enable",
"signals",
"and",
"update",
"module",
"status"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L70-L92
|
246,905 |
exekias/droplet
|
droplet/module.py
|
ModuleMeta.enable_wrapper
|
def enable_wrapper(cls, enable, new_class):
"""
Wrap the enable method to call pre and post enable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if not self.installed:
raise AssertionError('Module %s cannot be enabled'
', you should install it first'
% self.verbose_name)
if self.enabled:
raise AssertionError('Module %s is already enabled'
% self.verbose_name)
logger.info("Enabling %s module" % self.verbose_name)
pre_enable.send(sender=self)
res = enable(self, *args, **kwargs)
# Register interfaces (if present)
if isinstance(self, DropletInterface):
self.register()
post_enable.send(sender=self)
info = self._info
info.status = ModuleInfo.ENABLED
info.save()
logger.info("Enabled %s module" % self.verbose_name)
return res
return _wrapped
|
python
|
def enable_wrapper(cls, enable, new_class):
"""
Wrap the enable method to call pre and post enable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if not self.installed:
raise AssertionError('Module %s cannot be enabled'
', you should install it first'
% self.verbose_name)
if self.enabled:
raise AssertionError('Module %s is already enabled'
% self.verbose_name)
logger.info("Enabling %s module" % self.verbose_name)
pre_enable.send(sender=self)
res = enable(self, *args, **kwargs)
# Register interfaces (if present)
if isinstance(self, DropletInterface):
self.register()
post_enable.send(sender=self)
info = self._info
info.status = ModuleInfo.ENABLED
info.save()
logger.info("Enabled %s module" % self.verbose_name)
return res
return _wrapped
|
[
"def",
"enable_wrapper",
"(",
"cls",
",",
"enable",
",",
"new_class",
")",
":",
"def",
"_wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"installed",
":",
"raise",
"AssertionError",
"(",
"'Module %s cannot be enabled'",
"', you should install it first'",
"%",
"self",
".",
"verbose_name",
")",
"if",
"self",
".",
"enabled",
":",
"raise",
"AssertionError",
"(",
"'Module %s is already enabled'",
"%",
"self",
".",
"verbose_name",
")",
"logger",
".",
"info",
"(",
"\"Enabling %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"pre_enable",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"res",
"=",
"enable",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Register interfaces (if present)",
"if",
"isinstance",
"(",
"self",
",",
"DropletInterface",
")",
":",
"self",
".",
"register",
"(",
")",
"post_enable",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"info",
"=",
"self",
".",
"_info",
"info",
".",
"status",
"=",
"ModuleInfo",
".",
"ENABLED",
"info",
".",
"save",
"(",
")",
"logger",
".",
"info",
"(",
"\"Enabled %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"return",
"res",
"return",
"_wrapped"
] |
Wrap the enable method to call pre and post enable signals and update
module status
|
[
"Wrap",
"the",
"enable",
"method",
"to",
"call",
"pre",
"and",
"post",
"enable",
"signals",
"and",
"update",
"module",
"status"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L95-L127
|
246,906 |
exekias/droplet
|
droplet/module.py
|
ModuleMeta.save_wrapper
|
def save_wrapper(cls, save, new_class):
"""
Wrap the save method to call pre and post enable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if not self.installed:
raise AssertionError('Module %s is not installed' %
self.verbose_name)
logger.info("Saving %s module" % self.verbose_name)
pre_save.send(sender=self)
res = save(self, *args, **kwargs)
post_save.send(sender=self)
logger.info("Saved %s module" % self.verbose_name)
return res
return _wrapped
|
python
|
def save_wrapper(cls, save, new_class):
"""
Wrap the save method to call pre and post enable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if not self.installed:
raise AssertionError('Module %s is not installed' %
self.verbose_name)
logger.info("Saving %s module" % self.verbose_name)
pre_save.send(sender=self)
res = save(self, *args, **kwargs)
post_save.send(sender=self)
logger.info("Saved %s module" % self.verbose_name)
return res
return _wrapped
|
[
"def",
"save_wrapper",
"(",
"cls",
",",
"save",
",",
"new_class",
")",
":",
"def",
"_wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"installed",
":",
"raise",
"AssertionError",
"(",
"'Module %s is not installed'",
"%",
"self",
".",
"verbose_name",
")",
"logger",
".",
"info",
"(",
"\"Saving %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"pre_save",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"res",
"=",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"post_save",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"logger",
".",
"info",
"(",
"\"Saved %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"return",
"res",
"return",
"_wrapped"
] |
Wrap the save method to call pre and post enable signals and update
module status
|
[
"Wrap",
"the",
"save",
"method",
"to",
"call",
"pre",
"and",
"post",
"enable",
"signals",
"and",
"update",
"module",
"status"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L130-L148
|
246,907 |
exekias/droplet
|
droplet/module.py
|
ModuleMeta.disable_wrapper
|
def disable_wrapper(cls, disable, new_class):
"""
Wrap the disable method to call pre and post disable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if not self.enabled:
raise AssertionError('Module %s is already disabled'
% self.verbose_name)
logger.info("Disabling %s module" % self.verbose_name)
pre_disable.send(sender=self)
res = disable(self, *args, **kwargs)
# Unregister interfaces (if present)
if isinstance(self, DropletInterface):
self.unregister()
post_disable.send(sender=self)
info = self._info
info.status = ModuleInfo.DISABLED
info.save()
logger.info("Disabled %s module" % self.verbose_name)
return res
return _wrapped
|
python
|
def disable_wrapper(cls, disable, new_class):
"""
Wrap the disable method to call pre and post disable signals and update
module status
"""
def _wrapped(self, *args, **kwargs):
if not self.enabled:
raise AssertionError('Module %s is already disabled'
% self.verbose_name)
logger.info("Disabling %s module" % self.verbose_name)
pre_disable.send(sender=self)
res = disable(self, *args, **kwargs)
# Unregister interfaces (if present)
if isinstance(self, DropletInterface):
self.unregister()
post_disable.send(sender=self)
info = self._info
info.status = ModuleInfo.DISABLED
info.save()
logger.info("Disabled %s module" % self.verbose_name)
return res
return _wrapped
|
[
"def",
"disable_wrapper",
"(",
"cls",
",",
"disable",
",",
"new_class",
")",
":",
"def",
"_wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"enabled",
":",
"raise",
"AssertionError",
"(",
"'Module %s is already disabled'",
"%",
"self",
".",
"verbose_name",
")",
"logger",
".",
"info",
"(",
"\"Disabling %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"pre_disable",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"res",
"=",
"disable",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Unregister interfaces (if present)",
"if",
"isinstance",
"(",
"self",
",",
"DropletInterface",
")",
":",
"self",
".",
"unregister",
"(",
")",
"post_disable",
".",
"send",
"(",
"sender",
"=",
"self",
")",
"info",
"=",
"self",
".",
"_info",
"info",
".",
"status",
"=",
"ModuleInfo",
".",
"DISABLED",
"info",
".",
"save",
"(",
")",
"logger",
".",
"info",
"(",
"\"Disabled %s module\"",
"%",
"self",
".",
"verbose_name",
")",
"return",
"res",
"return",
"_wrapped"
] |
Wrap the disable method to call pre and post disable signals and update
module status
|
[
"Wrap",
"the",
"disable",
"method",
"to",
"call",
"pre",
"and",
"post",
"disable",
"signals",
"and",
"update",
"module",
"status"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L151-L178
|
246,908 |
exekias/droplet
|
droplet/module.py
|
Module._info
|
def _info(self):
"""
Module internal status representation
"""
name = self.__class__.__module__ + '.' + self.__class__.__name__
info, created = ModuleInfo.objects.get_or_create(name=name)
if created:
# Do not set as changed
info.commit()
return info
|
python
|
def _info(self):
"""
Module internal status representation
"""
name = self.__class__.__module__ + '.' + self.__class__.__name__
info, created = ModuleInfo.objects.get_or_create(name=name)
if created:
# Do not set as changed
info.commit()
return info
|
[
"def",
"_info",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"__class__",
".",
"__module__",
"+",
"'.'",
"+",
"self",
".",
"__class__",
".",
"__name__",
"info",
",",
"created",
"=",
"ModuleInfo",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"name",
")",
"if",
"created",
":",
"# Do not set as changed",
"info",
".",
"commit",
"(",
")",
"return",
"info"
] |
Module internal status representation
|
[
"Module",
"internal",
"status",
"representation"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L266-L275
|
246,909 |
exekias/droplet
|
droplet/module.py
|
Module.models
|
def models(self):
"""
Return all the models defined for this module
"""
app = get_app(self.__class__.__module__.split('.')[-2])
return get_models(app)
|
python
|
def models(self):
"""
Return all the models defined for this module
"""
app = get_app(self.__class__.__module__.split('.')[-2])
return get_models(app)
|
[
"def",
"models",
"(",
"self",
")",
":",
"app",
"=",
"get_app",
"(",
"self",
".",
"__class__",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"2",
"]",
")",
"return",
"get_models",
"(",
"app",
")"
] |
Return all the models defined for this module
|
[
"Return",
"all",
"the",
"models",
"defined",
"for",
"this",
"module"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L338-L343
|
246,910 |
exekias/droplet
|
droplet/module.py
|
Module.conf_files
|
def conf_files(self):
"""
List of configuration files for this module
"""
for attr in dir(self):
field = getattr(self, attr)
if isinstance(field, ConfFile):
yield field
|
python
|
def conf_files(self):
"""
List of configuration files for this module
"""
for attr in dir(self):
field = getattr(self, attr)
if isinstance(field, ConfFile):
yield field
|
[
"def",
"conf_files",
"(",
"self",
")",
":",
"for",
"attr",
"in",
"dir",
"(",
"self",
")",
":",
"field",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"isinstance",
"(",
"field",
",",
"ConfFile",
")",
":",
"yield",
"field"
] |
List of configuration files for this module
|
[
"List",
"of",
"configuration",
"files",
"for",
"this",
"module"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L380-L387
|
246,911 |
exekias/droplet
|
droplet/module.py
|
Module.daemons
|
def daemons(self):
"""
List of daemons for this module
"""
for attr in dir(self):
field = getattr(self, attr)
if isinstance(field, Daemon):
yield field
|
python
|
def daemons(self):
"""
List of daemons for this module
"""
for attr in dir(self):
field = getattr(self, attr)
if isinstance(field, Daemon):
yield field
|
[
"def",
"daemons",
"(",
"self",
")",
":",
"for",
"attr",
"in",
"dir",
"(",
"self",
")",
":",
"field",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"isinstance",
"(",
"field",
",",
"Daemon",
")",
":",
"yield",
"field"
] |
List of daemons for this module
|
[
"List",
"of",
"daemons",
"for",
"this",
"module"
] |
aeac573a2c1c4b774e99d5414a1c79b1bb734941
|
https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/module.py#L419-L426
|
246,912 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_endpoint_by_endpoint_id
|
def get_endpoint_by_endpoint_id(self, endpoint_id):
"""
Get an endpoint by endpoint id
"""
self._validate_uuid(endpoint_id)
url = "/notification/v1/endpoint/{}".format(endpoint_id)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
return self._endpoint_from_json(data.get("Endpoint"))
|
python
|
def get_endpoint_by_endpoint_id(self, endpoint_id):
"""
Get an endpoint by endpoint id
"""
self._validate_uuid(endpoint_id)
url = "/notification/v1/endpoint/{}".format(endpoint_id)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
return self._endpoint_from_json(data.get("Endpoint"))
|
[
"def",
"get_endpoint_by_endpoint_id",
"(",
"self",
",",
"endpoint_id",
")",
":",
"self",
".",
"_validate_uuid",
"(",
"endpoint_id",
")",
"url",
"=",
"\"/notification/v1/endpoint/{}\"",
".",
"format",
"(",
"endpoint_id",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"return",
"self",
".",
"_endpoint_from_json",
"(",
"data",
".",
"get",
"(",
"\"Endpoint\"",
")",
")"
] |
Get an endpoint by endpoint id
|
[
"Get",
"an",
"endpoint",
"by",
"endpoint",
"id"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L46-L59
|
246,913 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_endpoint_by_subscriber_id_and_protocol
|
def get_endpoint_by_subscriber_id_and_protocol(
self, subscriber_id, protocol):
"""
Get an endpoint by subscriber_id and protocol
"""
self._validate_subscriber_id(subscriber_id)
self._validate_endpoint_protocol(protocol)
url = "/notification/v1/endpoint?subscriber_id={}&protocol={}".format(
subscriber_id, protocol)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
try:
return self._endpoint_from_json(data.get("Endpoints")[0])
except IndexError:
raise DataFailureException(url, 404, "No SMS endpoint found")
|
python
|
def get_endpoint_by_subscriber_id_and_protocol(
self, subscriber_id, protocol):
"""
Get an endpoint by subscriber_id and protocol
"""
self._validate_subscriber_id(subscriber_id)
self._validate_endpoint_protocol(protocol)
url = "/notification/v1/endpoint?subscriber_id={}&protocol={}".format(
subscriber_id, protocol)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
try:
return self._endpoint_from_json(data.get("Endpoints")[0])
except IndexError:
raise DataFailureException(url, 404, "No SMS endpoint found")
|
[
"def",
"get_endpoint_by_subscriber_id_and_protocol",
"(",
"self",
",",
"subscriber_id",
",",
"protocol",
")",
":",
"self",
".",
"_validate_subscriber_id",
"(",
"subscriber_id",
")",
"self",
".",
"_validate_endpoint_protocol",
"(",
"protocol",
")",
"url",
"=",
"\"/notification/v1/endpoint?subscriber_id={}&protocol={}\"",
".",
"format",
"(",
"subscriber_id",
",",
"protocol",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"try",
":",
"return",
"self",
".",
"_endpoint_from_json",
"(",
"data",
".",
"get",
"(",
"\"Endpoints\"",
")",
"[",
"0",
"]",
")",
"except",
"IndexError",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"404",
",",
"\"No SMS endpoint found\"",
")"
] |
Get an endpoint by subscriber_id and protocol
|
[
"Get",
"an",
"endpoint",
"by",
"subscriber_id",
"and",
"protocol"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L61-L81
|
246,914 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_endpoint_by_address
|
def get_endpoint_by_address(self, endpoint_addr):
"""
Get an endpoint by address
"""
url = "/notification/v1/endpoint?endpoint_address={}".format(
endpoint_addr)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
try:
return self._endpoint_from_json(data.get("Endpoints")[0])
except IndexError:
raise DataFailureException(url, 404, "No SMS endpoint found")
|
python
|
def get_endpoint_by_address(self, endpoint_addr):
"""
Get an endpoint by address
"""
url = "/notification/v1/endpoint?endpoint_address={}".format(
endpoint_addr)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
try:
return self._endpoint_from_json(data.get("Endpoints")[0])
except IndexError:
raise DataFailureException(url, 404, "No SMS endpoint found")
|
[
"def",
"get_endpoint_by_address",
"(",
"self",
",",
"endpoint_addr",
")",
":",
"url",
"=",
"\"/notification/v1/endpoint?endpoint_address={}\"",
".",
"format",
"(",
"endpoint_addr",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"try",
":",
"return",
"self",
".",
"_endpoint_from_json",
"(",
"data",
".",
"get",
"(",
"\"Endpoints\"",
")",
"[",
"0",
"]",
")",
"except",
"IndexError",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"404",
",",
"\"No SMS endpoint found\"",
")"
] |
Get an endpoint by address
|
[
"Get",
"an",
"endpoint",
"by",
"address"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L83-L99
|
246,915 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_endpoints_by_subscriber_id
|
def get_endpoints_by_subscriber_id(self, subscriber_id):
"""
Search for all endpoints by a given subscriber
"""
self._validate_subscriber_id(subscriber_id)
url = "/notification/v1/endpoint?subscriber_id={}".format(
subscriber_id)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
endpoints = []
for datum in data.get("Endpoints", []):
endpoints.append(self._endpoint_from_json(datum))
return endpoints
|
python
|
def get_endpoints_by_subscriber_id(self, subscriber_id):
"""
Search for all endpoints by a given subscriber
"""
self._validate_subscriber_id(subscriber_id)
url = "/notification/v1/endpoint?subscriber_id={}".format(
subscriber_id)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
endpoints = []
for datum in data.get("Endpoints", []):
endpoints.append(self._endpoint_from_json(datum))
return endpoints
|
[
"def",
"get_endpoints_by_subscriber_id",
"(",
"self",
",",
"subscriber_id",
")",
":",
"self",
".",
"_validate_subscriber_id",
"(",
"subscriber_id",
")",
"url",
"=",
"\"/notification/v1/endpoint?subscriber_id={}\"",
".",
"format",
"(",
"subscriber_id",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"endpoints",
"=",
"[",
"]",
"for",
"datum",
"in",
"data",
".",
"get",
"(",
"\"Endpoints\"",
",",
"[",
"]",
")",
":",
"endpoints",
".",
"append",
"(",
"self",
".",
"_endpoint_from_json",
"(",
"datum",
")",
")",
"return",
"endpoints"
] |
Search for all endpoints by a given subscriber
|
[
"Search",
"for",
"all",
"endpoints",
"by",
"a",
"given",
"subscriber"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L101-L120
|
246,916 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.resend_sms_endpoint_verification
|
def resend_sms_endpoint_verification(self, endpoint_id):
"""
Calls NWS function to resend verification message to endpoint's
phone number
"""
self._validate_uuid(endpoint_id)
url = "/notification/v1/endpoint/{}/verification".format(endpoint_id)
response = NWS_DAO().postURL(url, None, None)
if response.status != 202:
raise DataFailureException(url, response.status, response.data)
return response.status
|
python
|
def resend_sms_endpoint_verification(self, endpoint_id):
"""
Calls NWS function to resend verification message to endpoint's
phone number
"""
self._validate_uuid(endpoint_id)
url = "/notification/v1/endpoint/{}/verification".format(endpoint_id)
response = NWS_DAO().postURL(url, None, None)
if response.status != 202:
raise DataFailureException(url, response.status, response.data)
return response.status
|
[
"def",
"resend_sms_endpoint_verification",
"(",
"self",
",",
"endpoint_id",
")",
":",
"self",
".",
"_validate_uuid",
"(",
"endpoint_id",
")",
"url",
"=",
"\"/notification/v1/endpoint/{}/verification\"",
".",
"format",
"(",
"endpoint_id",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"postURL",
"(",
"url",
",",
"None",
",",
"None",
")",
"if",
"response",
".",
"status",
"!=",
"202",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"return",
"response",
".",
"status"
] |
Calls NWS function to resend verification message to endpoint's
phone number
|
[
"Calls",
"NWS",
"function",
"to",
"resend",
"verification",
"message",
"to",
"endpoint",
"s",
"phone",
"number"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L122-L135
|
246,917 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_subscriptions_by_subscriber_id
|
def get_subscriptions_by_subscriber_id(
self, subscriber_id, max_results=10):
"""
Search for all subscriptions by a given subscriber
"""
return self.search_subscriptions(
subscriber_id=subscriber_id, max_results=max_results)
|
python
|
def get_subscriptions_by_subscriber_id(
self, subscriber_id, max_results=10):
"""
Search for all subscriptions by a given subscriber
"""
return self.search_subscriptions(
subscriber_id=subscriber_id, max_results=max_results)
|
[
"def",
"get_subscriptions_by_subscriber_id",
"(",
"self",
",",
"subscriber_id",
",",
"max_results",
"=",
"10",
")",
":",
"return",
"self",
".",
"search_subscriptions",
"(",
"subscriber_id",
"=",
"subscriber_id",
",",
"max_results",
"=",
"max_results",
")"
] |
Search for all subscriptions by a given subscriber
|
[
"Search",
"for",
"all",
"subscriptions",
"by",
"a",
"given",
"subscriber"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L237-L243
|
246,918 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_subscriptions_by_channel_id_and_subscriber_id
|
def get_subscriptions_by_channel_id_and_subscriber_id(
self, channel_id, subscriber_id):
"""
Search for all subscriptions by a given channel and subscriber
"""
return self.search_subscriptions(
channel_id=channel_id, subscriber_id=subscriber_id)
|
python
|
def get_subscriptions_by_channel_id_and_subscriber_id(
self, channel_id, subscriber_id):
"""
Search for all subscriptions by a given channel and subscriber
"""
return self.search_subscriptions(
channel_id=channel_id, subscriber_id=subscriber_id)
|
[
"def",
"get_subscriptions_by_channel_id_and_subscriber_id",
"(",
"self",
",",
"channel_id",
",",
"subscriber_id",
")",
":",
"return",
"self",
".",
"search_subscriptions",
"(",
"channel_id",
"=",
"channel_id",
",",
"subscriber_id",
"=",
"subscriber_id",
")"
] |
Search for all subscriptions by a given channel and subscriber
|
[
"Search",
"for",
"all",
"subscriptions",
"by",
"a",
"given",
"channel",
"and",
"subscriber"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L245-L251
|
246,919 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_subscriptions_by_channel_id_and_person_id
|
def get_subscriptions_by_channel_id_and_person_id(
self, channel_id, person_id):
"""
Search for all subscriptions by a given channel and person
"""
return self.search_subscriptions(
channel_id=channel_id, person_id=person_id)
|
python
|
def get_subscriptions_by_channel_id_and_person_id(
self, channel_id, person_id):
"""
Search for all subscriptions by a given channel and person
"""
return self.search_subscriptions(
channel_id=channel_id, person_id=person_id)
|
[
"def",
"get_subscriptions_by_channel_id_and_person_id",
"(",
"self",
",",
"channel_id",
",",
"person_id",
")",
":",
"return",
"self",
".",
"search_subscriptions",
"(",
"channel_id",
"=",
"channel_id",
",",
"person_id",
"=",
"person_id",
")"
] |
Search for all subscriptions by a given channel and person
|
[
"Search",
"for",
"all",
"subscriptions",
"by",
"a",
"given",
"channel",
"and",
"person"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L253-L259
|
246,920 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_subscription_by_channel_id_and_endpoint_id
|
def get_subscription_by_channel_id_and_endpoint_id(
self, channel_id, endpoint_id):
"""
Search for subscription by a given channel and endpoint
"""
subscriptions = self.search_subscriptions(
channel_id=channel_id, endpoint_id=endpoint_id)
try:
return subscriptions[0]
except IndexError:
raise DataFailureException(url, 404, "No subscription found")
|
python
|
def get_subscription_by_channel_id_and_endpoint_id(
self, channel_id, endpoint_id):
"""
Search for subscription by a given channel and endpoint
"""
subscriptions = self.search_subscriptions(
channel_id=channel_id, endpoint_id=endpoint_id)
try:
return subscriptions[0]
except IndexError:
raise DataFailureException(url, 404, "No subscription found")
|
[
"def",
"get_subscription_by_channel_id_and_endpoint_id",
"(",
"self",
",",
"channel_id",
",",
"endpoint_id",
")",
":",
"subscriptions",
"=",
"self",
".",
"search_subscriptions",
"(",
"channel_id",
"=",
"channel_id",
",",
"endpoint_id",
"=",
"endpoint_id",
")",
"try",
":",
"return",
"subscriptions",
"[",
"0",
"]",
"except",
"IndexError",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"404",
",",
"\"No subscription found\"",
")"
] |
Search for subscription by a given channel and endpoint
|
[
"Search",
"for",
"subscription",
"by",
"a",
"given",
"channel",
"and",
"endpoint"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L261-L272
|
246,921 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.search_subscriptions
|
def search_subscriptions(self, **kwargs):
"""
Search for all subscriptions by parameters
"""
params = [(key, kwargs[key]) for key in sorted(kwargs.keys())]
url = "/notification/v1/subscription?{}".format(
urlencode(params, doseq=True))
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
subscriptions = []
for datum in data.get("Subscriptions", []):
subscriptions.append(self._subscription_from_json(datum))
return subscriptions
|
python
|
def search_subscriptions(self, **kwargs):
"""
Search for all subscriptions by parameters
"""
params = [(key, kwargs[key]) for key in sorted(kwargs.keys())]
url = "/notification/v1/subscription?{}".format(
urlencode(params, doseq=True))
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
subscriptions = []
for datum in data.get("Subscriptions", []):
subscriptions.append(self._subscription_from_json(datum))
return subscriptions
|
[
"def",
"search_subscriptions",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"[",
"(",
"key",
",",
"kwargs",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"sorted",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"]",
"url",
"=",
"\"/notification/v1/subscription?{}\"",
".",
"format",
"(",
"urlencode",
"(",
"params",
",",
"doseq",
"=",
"True",
")",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"subscriptions",
"=",
"[",
"]",
"for",
"datum",
"in",
"data",
".",
"get",
"(",
"\"Subscriptions\"",
",",
"[",
"]",
")",
":",
"subscriptions",
".",
"append",
"(",
"self",
".",
"_subscription_from_json",
"(",
"datum",
")",
")",
"return",
"subscriptions"
] |
Search for all subscriptions by parameters
|
[
"Search",
"for",
"all",
"subscriptions",
"by",
"parameters"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L274-L291
|
246,922 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_channel_by_channel_id
|
def get_channel_by_channel_id(self, channel_id):
"""
Get a channel by channel id
"""
self._validate_uuid(channel_id)
url = "/notification/v1/channel/{}".format(channel_id)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
return self._channel_from_json(data.get("Channel"))
|
python
|
def get_channel_by_channel_id(self, channel_id):
"""
Get a channel by channel id
"""
self._validate_uuid(channel_id)
url = "/notification/v1/channel/{}".format(channel_id)
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
return self._channel_from_json(data.get("Channel"))
|
[
"def",
"get_channel_by_channel_id",
"(",
"self",
",",
"channel_id",
")",
":",
"self",
".",
"_validate_uuid",
"(",
"channel_id",
")",
"url",
"=",
"\"/notification/v1/channel/{}\"",
".",
"format",
"(",
"channel_id",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"return",
"self",
".",
"_channel_from_json",
"(",
"data",
".",
"get",
"(",
"\"Channel\"",
")",
")"
] |
Get a channel by channel id
|
[
"Get",
"a",
"channel",
"by",
"channel",
"id"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L293-L307
|
246,923 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_channels_by_sln
|
def get_channels_by_sln(self, channel_type, sln):
"""
Search for all channels by sln
"""
return self.search_channels(type=channel_type, tag_sln=sln)
|
python
|
def get_channels_by_sln(self, channel_type, sln):
"""
Search for all channels by sln
"""
return self.search_channels(type=channel_type, tag_sln=sln)
|
[
"def",
"get_channels_by_sln",
"(",
"self",
",",
"channel_type",
",",
"sln",
")",
":",
"return",
"self",
".",
"search_channels",
"(",
"type",
"=",
"channel_type",
",",
"tag_sln",
"=",
"sln",
")"
] |
Search for all channels by sln
|
[
"Search",
"for",
"all",
"channels",
"by",
"sln"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L309-L313
|
246,924 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_channels_by_sln_year_quarter
|
def get_channels_by_sln_year_quarter(
self, channel_type, sln, year, quarter):
"""
Search for all channels by sln, year and quarter
"""
return self.search_channels(
type=channel_type, tag_sln=sln, tag_year=year, tag_quarter=quarter)
|
python
|
def get_channels_by_sln_year_quarter(
self, channel_type, sln, year, quarter):
"""
Search for all channels by sln, year and quarter
"""
return self.search_channels(
type=channel_type, tag_sln=sln, tag_year=year, tag_quarter=quarter)
|
[
"def",
"get_channels_by_sln_year_quarter",
"(",
"self",
",",
"channel_type",
",",
"sln",
",",
"year",
",",
"quarter",
")",
":",
"return",
"self",
".",
"search_channels",
"(",
"type",
"=",
"channel_type",
",",
"tag_sln",
"=",
"sln",
",",
"tag_year",
"=",
"year",
",",
"tag_quarter",
"=",
"quarter",
")"
] |
Search for all channels by sln, year and quarter
|
[
"Search",
"for",
"all",
"channels",
"by",
"sln",
"year",
"and",
"quarter"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L315-L321
|
246,925 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.get_active_channels_by_year_quarter
|
def get_active_channels_by_year_quarter(
self, channel_type, year, quarter, expires=None):
"""
Search for all active channels by year and quarter
"""
if expires is None:
# Set expires_after to midnight of current day
expires = datetime.combine(datetime.utcnow().date(), time.min)
return self.search_channels(
type=channel_type, tag_year=year, tag_quarter=quarter,
expires_after=expires.isoformat())
|
python
|
def get_active_channels_by_year_quarter(
self, channel_type, year, quarter, expires=None):
"""
Search for all active channels by year and quarter
"""
if expires is None:
# Set expires_after to midnight of current day
expires = datetime.combine(datetime.utcnow().date(), time.min)
return self.search_channels(
type=channel_type, tag_year=year, tag_quarter=quarter,
expires_after=expires.isoformat())
|
[
"def",
"get_active_channels_by_year_quarter",
"(",
"self",
",",
"channel_type",
",",
"year",
",",
"quarter",
",",
"expires",
"=",
"None",
")",
":",
"if",
"expires",
"is",
"None",
":",
"# Set expires_after to midnight of current day",
"expires",
"=",
"datetime",
".",
"combine",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"date",
"(",
")",
",",
"time",
".",
"min",
")",
"return",
"self",
".",
"search_channels",
"(",
"type",
"=",
"channel_type",
",",
"tag_year",
"=",
"year",
",",
"tag_quarter",
"=",
"quarter",
",",
"expires_after",
"=",
"expires",
".",
"isoformat",
"(",
")",
")"
] |
Search for all active channels by year and quarter
|
[
"Search",
"for",
"all",
"active",
"channels",
"by",
"year",
"and",
"quarter"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L323-L334
|
246,926 |
uw-it-aca/uw-restclients-nws
|
uw_nws/__init__.py
|
NWS.search_channels
|
def search_channels(self, **kwargs):
"""
Search for all channels by parameters
"""
params = [(key, kwargs[key]) for key in sorted(kwargs.keys())]
url = "/notification/v1/channel?{}".format(
urlencode(params, doseq=True))
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
channels = []
for datum in data.get("Channels", []):
channels.append(self._channel_from_json(datum))
return channels
|
python
|
def search_channels(self, **kwargs):
"""
Search for all channels by parameters
"""
params = [(key, kwargs[key]) for key in sorted(kwargs.keys())]
url = "/notification/v1/channel?{}".format(
urlencode(params, doseq=True))
response = NWS_DAO().getURL(url, self._read_headers)
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
channels = []
for datum in data.get("Channels", []):
channels.append(self._channel_from_json(datum))
return channels
|
[
"def",
"search_channels",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"[",
"(",
"key",
",",
"kwargs",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"sorted",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"]",
"url",
"=",
"\"/notification/v1/channel?{}\"",
".",
"format",
"(",
"urlencode",
"(",
"params",
",",
"doseq",
"=",
"True",
")",
")",
"response",
"=",
"NWS_DAO",
"(",
")",
".",
"getURL",
"(",
"url",
",",
"self",
".",
"_read_headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"DataFailureException",
"(",
"url",
",",
"response",
".",
"status",
",",
"response",
".",
"data",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"channels",
"=",
"[",
"]",
"for",
"datum",
"in",
"data",
".",
"get",
"(",
"\"Channels\"",
",",
"[",
"]",
")",
":",
"channels",
".",
"append",
"(",
"self",
".",
"_channel_from_json",
"(",
"datum",
")",
")",
"return",
"channels"
] |
Search for all channels by parameters
|
[
"Search",
"for",
"all",
"channels",
"by",
"parameters"
] |
ec6fd14342ffc883d14bcb53b2fe9bc288696027
|
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L336-L353
|
246,927 |
knagra/farnsworth
|
base/pipeline.py
|
_get_first_last
|
def _get_first_last(details):
"""
Gets a user's first and last name from details.
"""
if "first_name" in details and "last_name" in details:
return details["first_name"], details["last_name"]
elif "first_name" in details:
lst = details["first_name"].rsplit(" ", 1)
if len(lst) == 2:
return lst
else:
return lst[0], ""
elif "last_name" in details:
return "", details["last_name"]
return "", ""
|
python
|
def _get_first_last(details):
"""
Gets a user's first and last name from details.
"""
if "first_name" in details and "last_name" in details:
return details["first_name"], details["last_name"]
elif "first_name" in details:
lst = details["first_name"].rsplit(" ", 1)
if len(lst) == 2:
return lst
else:
return lst[0], ""
elif "last_name" in details:
return "", details["last_name"]
return "", ""
|
[
"def",
"_get_first_last",
"(",
"details",
")",
":",
"if",
"\"first_name\"",
"in",
"details",
"and",
"\"last_name\"",
"in",
"details",
":",
"return",
"details",
"[",
"\"first_name\"",
"]",
",",
"details",
"[",
"\"last_name\"",
"]",
"elif",
"\"first_name\"",
"in",
"details",
":",
"lst",
"=",
"details",
"[",
"\"first_name\"",
"]",
".",
"rsplit",
"(",
"\" \"",
",",
"1",
")",
"if",
"len",
"(",
"lst",
")",
"==",
"2",
":",
"return",
"lst",
"else",
":",
"return",
"lst",
"[",
"0",
"]",
",",
"\"\"",
"elif",
"\"last_name\"",
"in",
"details",
":",
"return",
"\"\"",
",",
"details",
"[",
"\"last_name\"",
"]",
"return",
"\"\"",
",",
"\"\""
] |
Gets a user's first and last name from details.
|
[
"Gets",
"a",
"user",
"s",
"first",
"and",
"last",
"name",
"from",
"details",
"."
] |
1b6589f0d9fea154f0a1e2231ed906764ed26d26
|
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/base/pipeline.py#L17-L32
|
246,928 |
minhhoit/yacms
|
yacms/boot/__init__.py
|
import_field
|
def import_field(field_classpath):
"""
Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails.
"""
if '.' in field_classpath:
fully_qualified = field_classpath
else:
fully_qualified = "django.db.models.%s" % field_classpath
try:
return import_dotted_path(fully_qualified)
except ImportError:
raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains "
"the field '%s' which could not be "
"imported." % field_classpath)
|
python
|
def import_field(field_classpath):
"""
Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails.
"""
if '.' in field_classpath:
fully_qualified = field_classpath
else:
fully_qualified = "django.db.models.%s" % field_classpath
try:
return import_dotted_path(fully_qualified)
except ImportError:
raise ImproperlyConfigured("The EXTRA_MODEL_FIELDS setting contains "
"the field '%s' which could not be "
"imported." % field_classpath)
|
[
"def",
"import_field",
"(",
"field_classpath",
")",
":",
"if",
"'.'",
"in",
"field_classpath",
":",
"fully_qualified",
"=",
"field_classpath",
"else",
":",
"fully_qualified",
"=",
"\"django.db.models.%s\"",
"%",
"field_classpath",
"try",
":",
"return",
"import_dotted_path",
"(",
"fully_qualified",
")",
"except",
"ImportError",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"The EXTRA_MODEL_FIELDS setting contains \"",
"\"the field '%s' which could not be \"",
"\"imported.\"",
"%",
"field_classpath",
")"
] |
Imports a field by its dotted class path, prepending "django.db.models"
to raw class names and raising an exception if the import fails.
|
[
"Imports",
"a",
"field",
"by",
"its",
"dotted",
"class",
"path",
"prepending",
"django",
".",
"db",
".",
"models",
"to",
"raw",
"class",
"names",
"and",
"raising",
"an",
"exception",
"if",
"the",
"import",
"fails",
"."
] |
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
|
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/boot/__init__.py#L38-L52
|
246,929 |
minhhoit/yacms
|
yacms/boot/__init__.py
|
add_extra_model_fields
|
def add_extra_model_fields(sender, **kwargs):
"""
Injects custom fields onto the given sender model as defined
by the ``EXTRA_MODEL_FIELDS`` setting. This is a closure over
the "fields" variable.
"""
model_key = sender._meta.app_label, sender._meta.model_name
for field_name, field in fields.get(model_key, {}):
field.contribute_to_class(sender, field_name)
|
python
|
def add_extra_model_fields(sender, **kwargs):
"""
Injects custom fields onto the given sender model as defined
by the ``EXTRA_MODEL_FIELDS`` setting. This is a closure over
the "fields" variable.
"""
model_key = sender._meta.app_label, sender._meta.model_name
for field_name, field in fields.get(model_key, {}):
field.contribute_to_class(sender, field_name)
|
[
"def",
"add_extra_model_fields",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"model_key",
"=",
"sender",
".",
"_meta",
".",
"app_label",
",",
"sender",
".",
"_meta",
".",
"model_name",
"for",
"field_name",
",",
"field",
"in",
"fields",
".",
"get",
"(",
"model_key",
",",
"{",
"}",
")",
":",
"field",
".",
"contribute_to_class",
"(",
"sender",
",",
"field_name",
")"
] |
Injects custom fields onto the given sender model as defined
by the ``EXTRA_MODEL_FIELDS`` setting. This is a closure over
the "fields" variable.
|
[
"Injects",
"custom",
"fields",
"onto",
"the",
"given",
"sender",
"model",
"as",
"defined",
"by",
"the",
"EXTRA_MODEL_FIELDS",
"setting",
".",
"This",
"is",
"a",
"closure",
"over",
"the",
"fields",
"variable",
"."
] |
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
|
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/boot/__init__.py#L80-L88
|
246,930 |
jmvrbanac/PackMap
|
packmap/finder.py
|
main
|
def main(*args, **kwargs):
""" Simple entry-point that takes the package name and json output"""
parser = argparse.ArgumentParser()
parser.add_argument('package_name', type=str)
parser.add_argument('output_filename', type=str)
parsed_args = parser.parse_args()
# Find all package requirements
get_all_requires(parsed_args.package_name)
# Write tmp results to a file to be read into main app context
json_file = open(parsed_args.output_filename, 'w')
json_file.write(json.dumps(all_requires))
|
python
|
def main(*args, **kwargs):
""" Simple entry-point that takes the package name and json output"""
parser = argparse.ArgumentParser()
parser.add_argument('package_name', type=str)
parser.add_argument('output_filename', type=str)
parsed_args = parser.parse_args()
# Find all package requirements
get_all_requires(parsed_args.package_name)
# Write tmp results to a file to be read into main app context
json_file = open(parsed_args.output_filename, 'w')
json_file.write(json.dumps(all_requires))
|
[
"def",
"main",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'package_name'",
",",
"type",
"=",
"str",
")",
"parser",
".",
"add_argument",
"(",
"'output_filename'",
",",
"type",
"=",
"str",
")",
"parsed_args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Find all package requirements",
"get_all_requires",
"(",
"parsed_args",
".",
"package_name",
")",
"# Write tmp results to a file to be read into main app context",
"json_file",
"=",
"open",
"(",
"parsed_args",
".",
"output_filename",
",",
"'w'",
")",
"json_file",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"all_requires",
")",
")"
] |
Simple entry-point that takes the package name and json output
|
[
"Simple",
"entry",
"-",
"point",
"that",
"takes",
"the",
"package",
"name",
"and",
"json",
"output"
] |
e35d12d21ab109cae1175e0dc94d7f4855256b23
|
https://github.com/jmvrbanac/PackMap/blob/e35d12d21ab109cae1175e0dc94d7f4855256b23/packmap/finder.py#L57-L71
|
246,931 |
wdbm/shijian
|
shijian.py
|
tail
|
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
|
python
|
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
|
[
"def",
"tail",
"(",
"filepath",
"=",
"\"log.txt\"",
",",
"lines",
"=",
"50",
")",
":",
"try",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"filepath",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"text",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"tail\"",
",",
"\"-\"",
"+",
"str",
"(",
"lines",
")",
",",
"filepath",
"]",
")",
"if",
"text",
":",
"return",
"text",
"else",
":",
"return",
"False",
"else",
":",
"return",
"False",
"except",
":",
"return",
"False"
] |
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
|
[
"Return",
"a",
"specified",
"number",
"of",
"last",
"lines",
"of",
"a",
"specified",
"file",
".",
"If",
"there",
"is",
"an",
"error",
"or",
"the",
"file",
"does",
"not",
"exist",
"return",
"False",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L657-L676
|
246,932 |
wdbm/shijian
|
shijian.py
|
convert_type_list_elements
|
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
|
python
|
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
|
[
"def",
"convert_type_list_elements",
"(",
"list_object",
"=",
"None",
",",
"element_type",
"=",
"str",
")",
":",
"if",
"element_type",
"is",
"str",
":",
"return",
"[",
"str",
"(",
"element",
")",
"if",
"not",
"isinstance",
"(",
"element",
",",
"list",
")",
"else",
"convert_type_list_elements",
"(",
"list_object",
"=",
"element",
",",
"element_type",
"=",
"str",
")",
"for",
"element",
"in",
"list_object",
"]"
] |
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
|
[
"Recursively",
"convert",
"all",
"elements",
"and",
"all",
"elements",
"of",
"all",
"sublists",
"of",
"a",
"list",
"to",
"a",
"specified",
"type",
"and",
"return",
"the",
"new",
"list",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L878-L890
|
246,933 |
wdbm/shijian
|
shijian.py
|
select_spread
|
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
|
python
|
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
|
[
"def",
"select_spread",
"(",
"list_of_elements",
"=",
"None",
",",
"number_of_elements",
"=",
"None",
")",
":",
"if",
"len",
"(",
"list_of_elements",
")",
"<=",
"number_of_elements",
":",
"return",
"list_of_elements",
"if",
"number_of_elements",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"number_of_elements",
"==",
"1",
":",
"return",
"[",
"list_of_elements",
"[",
"int",
"(",
"round",
"(",
"(",
"len",
"(",
"list_of_elements",
")",
"-",
"1",
")",
"/",
"2",
")",
")",
"]",
"]",
"return",
"[",
"list_of_elements",
"[",
"int",
"(",
"round",
"(",
"(",
"len",
"(",
"list_of_elements",
")",
"-",
"1",
")",
"/",
"(",
"2",
"*",
"number_of_elements",
")",
")",
")",
"]",
"]",
"+",
"select_spread",
"(",
"list_of_elements",
"[",
"int",
"(",
"round",
"(",
"(",
"len",
"(",
"list_of_elements",
")",
"-",
"1",
")",
"/",
"(",
"number_of_elements",
")",
")",
")",
":",
"]",
",",
"number_of_elements",
"-",
"1",
")"
] |
This function returns the specified number of elements of a list spread
approximately evenly.
|
[
"This",
"function",
"returns",
"the",
"specified",
"number",
"of",
"elements",
"of",
"a",
"list",
"spread",
"approximately",
"evenly",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L997-L1015
|
246,934 |
wdbm/shijian
|
shijian.py
|
split_list
|
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
|
python
|
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
|
[
"def",
"split_list",
"(",
"list_object",
"=",
"None",
",",
"granularity",
"=",
"None",
")",
":",
"if",
"granularity",
"<",
"0",
":",
"raise",
"Exception",
"(",
"\"negative granularity\"",
")",
"mean_length",
"=",
"len",
"(",
"list_object",
")",
"/",
"float",
"(",
"granularity",
")",
"split_list_object",
"=",
"[",
"]",
"last_length",
"=",
"float",
"(",
"0",
")",
"if",
"len",
"(",
"list_object",
")",
">",
"granularity",
":",
"while",
"last_length",
"<",
"len",
"(",
"list_object",
")",
":",
"split_list_object",
".",
"append",
"(",
"list_object",
"[",
"int",
"(",
"last_length",
")",
":",
"int",
"(",
"last_length",
"+",
"mean_length",
")",
"]",
")",
"last_length",
"+=",
"mean_length",
"else",
":",
"split_list_object",
"=",
"[",
"[",
"element",
"]",
"for",
"element",
"in",
"list_object",
"]",
"return",
"split_list_object"
] |
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
|
[
"This",
"function",
"splits",
"a",
"list",
"into",
"a",
"specified",
"number",
"of",
"lists",
".",
"It",
"returns",
"a",
"list",
"of",
"lists",
"that",
"correspond",
"to",
"these",
"parts",
".",
"Negative",
"numbers",
"of",
"parts",
"are",
"not",
"accepted",
"and",
"numbers",
"of",
"parts",
"greater",
"than",
"the",
"number",
"of",
"elements",
"in",
"the",
"list",
"result",
"in",
"the",
"maximum",
"possible",
"number",
"of",
"lists",
"being",
"returned",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1017-L1040
|
246,935 |
wdbm/shijian
|
shijian.py
|
ustr
|
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
|
python
|
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
|
[
"def",
"ustr",
"(",
"text",
")",
":",
"if",
"text",
"is",
"not",
"None",
":",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"0",
")",
":",
"return",
"str",
"(",
"text",
")",
"else",
":",
"return",
"unicode",
"(",
"text",
")",
"else",
":",
"return",
"text"
] |
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
|
[
"Convert",
"a",
"string",
"to",
"Python",
"2",
"unicode",
"or",
"Python",
"3",
"string",
"as",
"appropriate",
"to",
"the",
"version",
"of",
"Python",
"in",
"use",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1231-L1242
|
246,936 |
wdbm/shijian
|
shijian.py
|
add_time_variables
|
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
|
python
|
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
|
[
"def",
"add_time_variables",
"(",
"df",
",",
"reindex",
"=",
"True",
")",
":",
"if",
"not",
"\"datetime\"",
"in",
"df",
".",
"columns",
":",
"log",
".",
"error",
"(",
"\"field datetime not found in DataFrame\"",
")",
"return",
"False",
"df",
"[",
"\"datetime\"",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"\"datetime\"",
"]",
")",
"df",
"[",
"\"month\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"dt",
".",
"month",
"df",
"[",
"\"month_name\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"dt",
".",
"strftime",
"(",
"\"%B\"",
")",
"df",
"[",
"\"weekday\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"dt",
".",
"weekday",
"df",
"[",
"\"weekday_name\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"dt",
".",
"weekday_name",
"df",
"[",
"\"time_through_day\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"-",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"x",
".",
"date",
"(",
")",
",",
"datetime",
".",
"time",
"(",
")",
")",
")",
"df",
"[",
"\"fraction_through_day\"",
"]",
"=",
"df",
"[",
"\"time_through_day\"",
"]",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"/",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"24",
")",
")",
"df",
"[",
"\"hour\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"dt",
".",
"hour",
"df",
"[",
"\"hours_through_day\"",
"]",
"=",
"df",
"[",
"\"fraction_through_day\"",
"]",
"*",
"24",
"df",
"[",
"\"days_through_week\"",
"]",
"=",
"df",
".",
"apply",
"(",
"lambda",
"row",
":",
"row",
"[",
"\"weekday\"",
"]",
"+",
"row",
"[",
"\"fraction_through_day\"",
"]",
",",
"axis",
"=",
"1",
")",
"df",
"[",
"\"days_through_year\"",
"]",
"=",
"df",
"[",
"\"datetime\"",
"]",
".",
"dt",
".",
"dayofyear",
"df",
".",
"index",
"=",
"df",
"[",
"\"datetime\"",
"]",
"#del df[\"datetime\"]",
"return",
"df"
] |
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
|
[
"Return",
"a",
"DataFrame",
"with",
"variables",
"for",
"weekday",
"index",
"weekday",
"name",
"timedelta",
"through",
"day",
"fraction",
"through",
"day",
"hours",
"through",
"day",
"and",
"days",
"through",
"week",
"added",
"optionally",
"with",
"the",
"index",
"set",
"to",
"datetime",
"and",
"the",
"variable",
"datetime",
"removed",
".",
"It",
"is",
"assumed",
"that",
"the",
"variable",
"datetime",
"exists",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1578-L1611
|
246,937 |
wdbm/shijian
|
shijian.py
|
daily_plots
|
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
|
python
|
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
|
[
"def",
"daily_plots",
"(",
"df",
",",
"variable",
",",
"renormalize",
"=",
"True",
",",
"plot",
"=",
"True",
",",
"scatter",
"=",
"False",
",",
"linestyle",
"=",
"\"-\"",
",",
"linewidth",
"=",
"1",
",",
"s",
"=",
"1",
")",
":",
"if",
"not",
"df",
".",
"index",
".",
"dtype",
"in",
"[",
"\"datetime64[ns]\"",
",",
"\"<M8[ns]\"",
",",
"\">M8[ns]\"",
"]",
":",
"log",
".",
"error",
"(",
"\"index is not datetime\"",
")",
"return",
"False",
"days",
"=",
"[",
"]",
"for",
"group",
"in",
"df",
".",
"groupby",
"(",
"df",
".",
"index",
".",
"day",
")",
":",
"days",
".",
"append",
"(",
"group",
"[",
"1",
"]",
")",
"scaler",
"=",
"MinMaxScaler",
"(",
")",
"plt",
".",
"xlabel",
"(",
"\"hours\"",
")",
"plt",
".",
"ylabel",
"(",
"variable",
")",
"for",
"day",
"in",
"days",
":",
"if",
"renormalize",
":",
"values",
"=",
"scaler",
".",
"fit_transform",
"(",
"day",
"[",
"[",
"variable",
"]",
"]",
")",
"else",
":",
"values",
"=",
"day",
"[",
"variable",
"]",
"if",
"plot",
":",
"plt",
".",
"plot",
"(",
"day",
"[",
"\"hours_through_day\"",
"]",
",",
"values",
",",
"linestyle",
"=",
"linestyle",
",",
"linewidth",
"=",
"linewidth",
")",
"if",
"scatter",
":",
"plt",
".",
"scatter",
"(",
"day",
"[",
"\"hours_through_day\"",
"]",
",",
"values",
",",
"s",
"=",
"s",
")"
] |
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
|
[
"Create",
"daily",
"plots",
"of",
"a",
"variable",
"in",
"a",
"DataFrame",
"optionally",
"renormalized",
".",
"It",
"is",
"assumed",
"that",
"the",
"DataFrame",
"index",
"is",
"datetime",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1613-L1644
|
246,938 |
wdbm/shijian
|
shijian.py
|
weekly_plots
|
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
|
python
|
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
|
[
"def",
"weekly_plots",
"(",
"df",
",",
"variable",
",",
"renormalize",
"=",
"True",
",",
"plot",
"=",
"True",
",",
"scatter",
"=",
"False",
",",
"linestyle",
"=",
"\"-\"",
",",
"linewidth",
"=",
"1",
",",
"s",
"=",
"1",
")",
":",
"if",
"not",
"\"days_through_week\"",
"in",
"df",
".",
"columns",
":",
"log",
".",
"error",
"(",
"\"field days_through_week not found in DataFrame\"",
")",
"return",
"False",
"weeks",
"=",
"[",
"]",
"for",
"group",
"in",
"df",
".",
"groupby",
"(",
"df",
".",
"index",
".",
"week",
")",
":",
"weeks",
".",
"append",
"(",
"group",
"[",
"1",
"]",
")",
"scaler",
"=",
"MinMaxScaler",
"(",
")",
"plt",
".",
"ylabel",
"(",
"variable",
")",
"for",
"week",
"in",
"weeks",
":",
"if",
"renormalize",
":",
"values",
"=",
"scaler",
".",
"fit_transform",
"(",
"week",
"[",
"[",
"variable",
"]",
"]",
")",
"else",
":",
"values",
"=",
"week",
"[",
"variable",
"]",
"if",
"plot",
":",
"plt",
".",
"plot",
"(",
"week",
"[",
"\"days_through_week\"",
"]",
",",
"values",
",",
"linestyle",
"=",
"linestyle",
",",
"linewidth",
"=",
"linewidth",
")",
"if",
"scatter",
":",
"plt",
".",
"scatter",
"(",
"week",
"[",
"\"days_through_week\"",
"]",
",",
"values",
",",
"s",
"=",
"s",
")",
"plt",
".",
"xticks",
"(",
"[",
"0.5",
",",
"1.5",
",",
"2.5",
",",
"3.5",
",",
"4.5",
",",
"5.5",
",",
"6.5",
"]",
",",
"[",
"\"Monday\"",
",",
"\"Tuesday\"",
",",
"\"Wednesday\"",
",",
"\"Thursday\"",
",",
"\"Friday\"",
",",
"\"Saturday\"",
",",
"\"Sunday\"",
"]",
")"
] |
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
|
[
"Create",
"weekly",
"plots",
"of",
"a",
"variable",
"in",
"a",
"DataFrame",
"optionally",
"renormalized",
".",
"It",
"is",
"assumed",
"that",
"the",
"variable",
"days_through_week",
"exists",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1646-L1680
|
246,939 |
wdbm/shijian
|
shijian.py
|
yearly_plots
|
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
|
python
|
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
|
[
"def",
"yearly_plots",
"(",
"df",
",",
"variable",
",",
"renormalize",
"=",
"True",
",",
"horizontal_axis_labels_days",
"=",
"False",
",",
"horizontal_axis_labels_months",
"=",
"True",
",",
"plot",
"=",
"True",
",",
"scatter",
"=",
"False",
",",
"linestyle",
"=",
"\"-\"",
",",
"linewidth",
"=",
"1",
",",
"s",
"=",
"1",
")",
":",
"if",
"not",
"df",
".",
"index",
".",
"dtype",
"in",
"[",
"\"datetime64[ns]\"",
",",
"\"<M8[ns]\"",
",",
"\">M8[ns]\"",
"]",
":",
"log",
".",
"error",
"(",
"\"index is not datetime\"",
")",
"return",
"False",
"years",
"=",
"[",
"]",
"for",
"group",
"in",
"df",
".",
"groupby",
"(",
"df",
".",
"index",
".",
"year",
")",
":",
"years",
".",
"append",
"(",
"group",
"[",
"1",
"]",
")",
"scaler",
"=",
"MinMaxScaler",
"(",
")",
"plt",
".",
"xlabel",
"(",
"\"days\"",
")",
"plt",
".",
"ylabel",
"(",
"variable",
")",
"for",
"year",
"in",
"years",
":",
"if",
"renormalize",
":",
"values",
"=",
"scaler",
".",
"fit_transform",
"(",
"year",
"[",
"[",
"variable",
"]",
"]",
")",
"else",
":",
"values",
"=",
"year",
"[",
"variable",
"]",
"if",
"plot",
":",
"plt",
".",
"plot",
"(",
"year",
"[",
"\"days_through_year\"",
"]",
",",
"values",
",",
"linestyle",
"=",
"linestyle",
",",
"linewidth",
"=",
"linewidth",
",",
"label",
"=",
"year",
".",
"index",
".",
"year",
".",
"values",
"[",
"0",
"]",
")",
"if",
"scatter",
":",
"plt",
".",
"scatter",
"(",
"year",
"[",
"\"days_through_year\"",
"]",
",",
"values",
",",
"s",
"=",
"s",
")",
"if",
"horizontal_axis_labels_months",
":",
"plt",
".",
"xticks",
"(",
"[",
"15.5",
",",
"45",
",",
"74.5",
",",
"105",
",",
"135.5",
",",
"166",
",",
"196.5",
",",
"227.5",
",",
"258",
",",
"288.5",
",",
"319",
",",
"349.5",
"]",
",",
"[",
"\"January\"",
",",
"\"February\"",
",",
"\"March\"",
",",
"\"April\"",
",",
"\"May\"",
",",
"\"June\"",
",",
"\"July\"",
",",
"\"August\"",
",",
"\"September\"",
",",
"\"October\"",
",",
"\"November\"",
",",
"\"December\"",
"]",
")",
"plt",
".",
"legend",
"(",
")"
] |
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
|
[
"Create",
"yearly",
"plots",
"of",
"a",
"variable",
"in",
"a",
"DataFrame",
"optionally",
"renormalized",
".",
"It",
"is",
"assumed",
"that",
"the",
"DataFrame",
"index",
"is",
"datetime",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1682-L1721
|
246,940 |
wdbm/shijian
|
shijian.py
|
add_rolling_statistics_variables
|
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
|
python
|
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
|
[
"def",
"add_rolling_statistics_variables",
"(",
"df",
"=",
"None",
",",
"variable",
"=",
"None",
",",
"window",
"=",
"20",
",",
"upper_factor",
"=",
"2",
",",
"lower_factor",
"=",
"2",
")",
":",
"df",
"[",
"variable",
"+",
"\"_rolling_mean\"",
"]",
"=",
"pd",
".",
"stats",
".",
"moments",
".",
"rolling_mean",
"(",
"df",
"[",
"variable",
"]",
",",
"window",
")",
"df",
"[",
"variable",
"+",
"\"_rolling_standard_deviation\"",
"]",
"=",
"pd",
".",
"stats",
".",
"moments",
".",
"rolling_std",
"(",
"df",
"[",
"variable",
"]",
",",
"window",
")",
"df",
"[",
"variable",
"+",
"\"_rolling_upper_bound\"",
"]",
"=",
"df",
"[",
"variable",
"+",
"\"_rolling_mean\"",
"]",
"+",
"upper_factor",
"*",
"df",
"[",
"variable",
"+",
"\"_rolling_standard_deviation\"",
"]",
"df",
"[",
"variable",
"+",
"\"_rolling_lower_bound\"",
"]",
"=",
"df",
"[",
"variable",
"+",
"\"_rolling_mean\"",
"]",
"-",
"lower_factor",
"*",
"df",
"[",
"variable",
"+",
"\"_rolling_standard_deviation\"",
"]",
"return",
"df"
] |
Add rolling statistics variables derived from a specified variable in a
DataFrame.
|
[
"Add",
"rolling",
"statistics",
"variables",
"derived",
"from",
"a",
"specified",
"variable",
"in",
"a",
"DataFrame",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1723-L1738
|
246,941 |
wdbm/shijian
|
shijian.py
|
rescale_variables
|
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
|
python
|
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
|
[
"def",
"rescale_variables",
"(",
"df",
",",
"variables_include",
"=",
"[",
"]",
",",
"variables_exclude",
"=",
"[",
"]",
")",
":",
"variables_not_rescale",
"=",
"variables_exclude",
"variables_not_rescale",
".",
"extend",
"(",
"df",
".",
"columns",
"[",
"df",
".",
"isna",
"(",
")",
".",
"any",
"(",
")",
"]",
".",
"tolist",
"(",
")",
")",
"# variables with NaNs",
"variables_not_rescale",
".",
"extend",
"(",
"df",
".",
"select_dtypes",
"(",
"include",
"=",
"[",
"\"object\"",
",",
"\"datetime\"",
",",
"\"timedelta\"",
"]",
")",
".",
"columns",
")",
"# variables with strings",
"variables_rescale",
"=",
"list",
"(",
"set",
"(",
"df",
".",
"columns",
")",
"-",
"set",
"(",
"variables_not_rescale",
")",
")",
"variables_rescale",
".",
"extend",
"(",
"variables_include",
")",
"scaler",
"=",
"MinMaxScaler",
"(",
")",
"df",
"[",
"variables_rescale",
"]",
"=",
"scaler",
".",
"fit_transform",
"(",
"df",
"[",
"variables_rescale",
"]",
")",
"return",
"df"
] |
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
|
[
"Rescale",
"variables",
"in",
"a",
"DataFrame",
"excluding",
"variables",
"with",
"NaNs",
"and",
"strings",
"excluding",
"specified",
"variables",
"and",
"including",
"specified",
"variables",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1740-L1756
|
246,942 |
wdbm/shijian
|
shijian.py
|
histogram_hour_counts
|
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
python
|
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
[
"def",
"histogram_hour_counts",
"(",
"df",
",",
"variable",
")",
":",
"if",
"not",
"df",
".",
"index",
".",
"dtype",
"in",
"[",
"\"datetime64[ns]\"",
",",
"\"<M8[ns]\"",
",",
"\">M8[ns]\"",
"]",
":",
"log",
".",
"error",
"(",
"\"index is not datetime\"",
")",
"return",
"False",
"counts",
"=",
"df",
".",
"groupby",
"(",
"df",
".",
"index",
".",
"hour",
")",
"[",
"variable",
"]",
".",
"count",
"(",
")",
"counts",
".",
"plot",
"(",
"kind",
"=",
"\"bar\"",
",",
"width",
"=",
"1",
",",
"rot",
"=",
"0",
",",
"alpha",
"=",
"0.7",
")"
] |
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
|
[
"Create",
"a",
"day",
"-",
"long",
"histogram",
"of",
"counts",
"of",
"the",
"variable",
"for",
"each",
"hour",
".",
"It",
"is",
"assumed",
"that",
"the",
"DataFrame",
"index",
"is",
"datetime",
"and",
"that",
"the",
"variable",
"hour",
"exists",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1758-L1771
|
246,943 |
wdbm/shijian
|
shijian.py
|
histogram_day_counts
|
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
python
|
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
[
"def",
"histogram_day_counts",
"(",
"df",
",",
"variable",
")",
":",
"if",
"not",
"df",
".",
"index",
".",
"dtype",
"in",
"[",
"\"datetime64[ns]\"",
",",
"\"<M8[ns]\"",
",",
"\">M8[ns]\"",
"]",
":",
"log",
".",
"error",
"(",
"\"index is not datetime\"",
")",
"return",
"False",
"counts",
"=",
"df",
".",
"groupby",
"(",
"df",
".",
"index",
".",
"weekday_name",
")",
"[",
"variable",
"]",
".",
"count",
"(",
")",
".",
"reindex",
"(",
"calendar",
".",
"day_name",
"[",
"0",
":",
"]",
")",
"counts",
".",
"plot",
"(",
"kind",
"=",
"\"bar\"",
",",
"width",
"=",
"1",
",",
"rot",
"=",
"0",
",",
"alpha",
"=",
"0.7",
")"
] |
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
|
[
"Create",
"a",
"week",
"-",
"long",
"histogram",
"of",
"counts",
"of",
"the",
"variable",
"for",
"each",
"day",
".",
"It",
"is",
"assumed",
"that",
"the",
"DataFrame",
"index",
"is",
"datetime",
"and",
"that",
"the",
"variable",
"weekday_name",
"exists",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1773-L1786
|
246,944 |
wdbm/shijian
|
shijian.py
|
histogram_month_counts
|
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
python
|
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
[
"def",
"histogram_month_counts",
"(",
"df",
",",
"variable",
")",
":",
"if",
"not",
"df",
".",
"index",
".",
"dtype",
"in",
"[",
"\"datetime64[ns]\"",
",",
"\"<M8[ns]\"",
",",
"\">M8[ns]\"",
"]",
":",
"log",
".",
"error",
"(",
"\"index is not datetime\"",
")",
"return",
"False",
"counts",
"=",
"df",
".",
"groupby",
"(",
"df",
".",
"index",
".",
"strftime",
"(",
"\"%B\"",
")",
")",
"[",
"variable",
"]",
".",
"count",
"(",
")",
".",
"reindex",
"(",
"calendar",
".",
"month_name",
"[",
"1",
":",
"]",
")",
"counts",
".",
"plot",
"(",
"kind",
"=",
"\"bar\"",
",",
"width",
"=",
"1",
",",
"rot",
"=",
"0",
",",
"alpha",
"=",
"0.7",
")"
] |
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
|
[
"Create",
"a",
"year",
"-",
"long",
"histogram",
"of",
"counts",
"of",
"the",
"variable",
"for",
"each",
"month",
".",
"It",
"is",
"assumed",
"that",
"the",
"DataFrame",
"index",
"is",
"datetime",
"and",
"that",
"the",
"variable",
"month_name",
"exists",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1788-L1801
|
246,945 |
wdbm/shijian
|
shijian.py
|
setup_Jupyter
|
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
|
python
|
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
|
[
"def",
"setup_Jupyter",
"(",
")",
":",
"sns",
".",
"set",
"(",
"context",
"=",
"\"paper\"",
",",
"font",
"=",
"\"monospace\"",
")",
"warnings",
".",
"filterwarnings",
"(",
"\"ignore\"",
")",
"pd",
".",
"set_option",
"(",
"\"display.max_rows\"",
",",
"500",
")",
"pd",
".",
"set_option",
"(",
"\"display.max_columns\"",
",",
"500",
")",
"plt",
".",
"rcParams",
"[",
"\"figure.figsize\"",
"]",
"=",
"(",
"17",
",",
"10",
")"
] |
Set up a Jupyter notebook with a few defaults.
|
[
"Set",
"up",
"a",
"Jupyter",
"notebook",
"with",
"a",
"few",
"defaults",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1803-L1811
|
246,946 |
wdbm/shijian
|
shijian.py
|
List_Consensus.ensure_size
|
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
|
python
|
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
|
[
"def",
"ensure_size",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"self",
".",
"size_constraint",
"while",
"sys",
".",
"getsizeof",
"(",
"self",
")",
">",
"size",
":",
"element_frequencies",
"=",
"collections",
".",
"Counter",
"(",
"self",
")",
"infrequent_element",
"=",
"element_frequencies",
".",
"most_common",
"(",
")",
"[",
"-",
"1",
":",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"remove",
"(",
"infrequent_element",
")"
] |
This function removes the least frequent elements until the size
constraint is met.
|
[
"This",
"function",
"removes",
"the",
"least",
"frequent",
"elements",
"until",
"the",
"size",
"constraint",
"is",
"met",
"."
] |
ad6aea877e1eb99fe148127ea185f39f1413ed4f
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L919-L932
|
246,947 |
ONSdigital/sdx-common
|
sdx/common/logger_config.py
|
logger_initial_config
|
def logger_initial_config(service_name=None,
log_level=None,
logger_format=None,
logger_date_format=None):
'''Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
'''
if not log_level:
log_level = os.getenv('LOGGING_LEVEL', 'DEBUG')
if not logger_format:
logger_format = (
"%(asctime)s.%(msecs)06dZ|"
"%(levelname)s: {}: %(message)s"
).format(service_name)
if not logger_date_format:
logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M:%S")
logging.basicConfig(level=log_level,
format=logger_format,
datefmt=logger_date_format)
|
python
|
def logger_initial_config(service_name=None,
log_level=None,
logger_format=None,
logger_date_format=None):
'''Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
'''
if not log_level:
log_level = os.getenv('LOGGING_LEVEL', 'DEBUG')
if not logger_format:
logger_format = (
"%(asctime)s.%(msecs)06dZ|"
"%(levelname)s: {}: %(message)s"
).format(service_name)
if not logger_date_format:
logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M:%S")
logging.basicConfig(level=log_level,
format=logger_format,
datefmt=logger_date_format)
|
[
"def",
"logger_initial_config",
"(",
"service_name",
"=",
"None",
",",
"log_level",
"=",
"None",
",",
"logger_format",
"=",
"None",
",",
"logger_date_format",
"=",
"None",
")",
":",
"if",
"not",
"log_level",
":",
"log_level",
"=",
"os",
".",
"getenv",
"(",
"'LOGGING_LEVEL'",
",",
"'DEBUG'",
")",
"if",
"not",
"logger_format",
":",
"logger_format",
"=",
"(",
"\"%(asctime)s.%(msecs)06dZ|\"",
"\"%(levelname)s: {}: %(message)s\"",
")",
".",
"format",
"(",
"service_name",
")",
"if",
"not",
"logger_date_format",
":",
"logger_date_format",
"=",
"os",
".",
"getenv",
"(",
"'LOGGING_DATE_FORMAT'",
",",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"log_level",
",",
"format",
"=",
"logger_format",
",",
"datefmt",
"=",
"logger_date_format",
")"
] |
Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
|
[
"Set",
"initial",
"logging",
"configurations",
"."
] |
815f6a116d41fddae182943d821dc5f582a9af69
|
https://github.com/ONSdigital/sdx-common/blob/815f6a116d41fddae182943d821dc5f582a9af69/sdx/common/logger_config.py#L5-L38
|
246,948 |
smartmob-project/smartmob-agent
|
smartmob_agent/__init__.py
|
access_log_middleware
|
async def access_log_middleware(app, handler):
"""Log each request in structured event log."""
event_log = app.get('smartmob.event_log') or structlog.get_logger()
clock = app.get('smartmob.clock') or timeit.default_timer
# Keep the request arrival time to ensure we get intuitive logging of
# events.
arrival_time = datetime.utcnow().replace(tzinfo=timezone.utc)
async def access_log(request):
ref = clock()
try:
response = await handler(request)
event_log.info(
'http.access',
path=request.path,
outcome=response.status,
duration=(clock()-ref),
request=request.get('x-request-id', '?'),
**{'@timestamp': arrival_time}
)
return response
except web.HTTPException as error:
event_log.info(
'http.access',
path=request.path,
outcome=error.status,
duration=(clock()-ref),
request=request.get('x-request-id', '?'),
**{'@timestamp': arrival_time}
)
raise
except Exception:
event_log.info(
'http.access',
path=request.path,
outcome=500,
duration=(clock()-ref),
request=request.get('x-request-id', '?'),
**{'@timestamp': arrival_time}
)
raise
return access_log
|
python
|
async def access_log_middleware(app, handler):
"""Log each request in structured event log."""
event_log = app.get('smartmob.event_log') or structlog.get_logger()
clock = app.get('smartmob.clock') or timeit.default_timer
# Keep the request arrival time to ensure we get intuitive logging of
# events.
arrival_time = datetime.utcnow().replace(tzinfo=timezone.utc)
async def access_log(request):
ref = clock()
try:
response = await handler(request)
event_log.info(
'http.access',
path=request.path,
outcome=response.status,
duration=(clock()-ref),
request=request.get('x-request-id', '?'),
**{'@timestamp': arrival_time}
)
return response
except web.HTTPException as error:
event_log.info(
'http.access',
path=request.path,
outcome=error.status,
duration=(clock()-ref),
request=request.get('x-request-id', '?'),
**{'@timestamp': arrival_time}
)
raise
except Exception:
event_log.info(
'http.access',
path=request.path,
outcome=500,
duration=(clock()-ref),
request=request.get('x-request-id', '?'),
**{'@timestamp': arrival_time}
)
raise
return access_log
|
[
"async",
"def",
"access_log_middleware",
"(",
"app",
",",
"handler",
")",
":",
"event_log",
"=",
"app",
".",
"get",
"(",
"'smartmob.event_log'",
")",
"or",
"structlog",
".",
"get_logger",
"(",
")",
"clock",
"=",
"app",
".",
"get",
"(",
"'smartmob.clock'",
")",
"or",
"timeit",
".",
"default_timer",
"# Keep the request arrival time to ensure we get intuitive logging of",
"# events.",
"arrival_time",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"timezone",
".",
"utc",
")",
"async",
"def",
"access_log",
"(",
"request",
")",
":",
"ref",
"=",
"clock",
"(",
")",
"try",
":",
"response",
"=",
"await",
"handler",
"(",
"request",
")",
"event_log",
".",
"info",
"(",
"'http.access'",
",",
"path",
"=",
"request",
".",
"path",
",",
"outcome",
"=",
"response",
".",
"status",
",",
"duration",
"=",
"(",
"clock",
"(",
")",
"-",
"ref",
")",
",",
"request",
"=",
"request",
".",
"get",
"(",
"'x-request-id'",
",",
"'?'",
")",
",",
"*",
"*",
"{",
"'@timestamp'",
":",
"arrival_time",
"}",
")",
"return",
"response",
"except",
"web",
".",
"HTTPException",
"as",
"error",
":",
"event_log",
".",
"info",
"(",
"'http.access'",
",",
"path",
"=",
"request",
".",
"path",
",",
"outcome",
"=",
"error",
".",
"status",
",",
"duration",
"=",
"(",
"clock",
"(",
")",
"-",
"ref",
")",
",",
"request",
"=",
"request",
".",
"get",
"(",
"'x-request-id'",
",",
"'?'",
")",
",",
"*",
"*",
"{",
"'@timestamp'",
":",
"arrival_time",
"}",
")",
"raise",
"except",
"Exception",
":",
"event_log",
".",
"info",
"(",
"'http.access'",
",",
"path",
"=",
"request",
".",
"path",
",",
"outcome",
"=",
"500",
",",
"duration",
"=",
"(",
"clock",
"(",
")",
"-",
"ref",
")",
",",
"request",
"=",
"request",
".",
"get",
"(",
"'x-request-id'",
",",
"'?'",
")",
",",
"*",
"*",
"{",
"'@timestamp'",
":",
"arrival_time",
"}",
")",
"raise",
"return",
"access_log"
] |
Log each request in structured event log.
|
[
"Log",
"each",
"request",
"in",
"structured",
"event",
"log",
"."
] |
4039f577ab7230d135f00df68c611a51e45ddbc7
|
https://github.com/smartmob-project/smartmob-agent/blob/4039f577ab7230d135f00df68c611a51e45ddbc7/smartmob_agent/__init__.py#L95-L139
|
246,949 |
jtpaasch/simplygithub
|
simplygithub/internals/merges.py
|
merge
|
def merge(profile, head, base, commit_message=None):
"""Merge the head of a branch into the base branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
head
The head to merge. It can be a SHA, or a branch name.
base
The name of the branch to merge the specified head into.
commit_message
The message to give for the commit.
Returns:
A dict with data about the merge.
"""
if not commit_message:
commit_message = "Merged " + head + " into " + base + "."
payload = {
"base": base,
"head": head,
"commit_message": commit_message,
}
response = api.post_merge_request(profile, payload)
data = None
if response.status_code == 201:
json_data = response.json()
data = prepare(json_data)
return data
|
python
|
def merge(profile, head, base, commit_message=None):
"""Merge the head of a branch into the base branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
head
The head to merge. It can be a SHA, or a branch name.
base
The name of the branch to merge the specified head into.
commit_message
The message to give for the commit.
Returns:
A dict with data about the merge.
"""
if not commit_message:
commit_message = "Merged " + head + " into " + base + "."
payload = {
"base": base,
"head": head,
"commit_message": commit_message,
}
response = api.post_merge_request(profile, payload)
data = None
if response.status_code == 201:
json_data = response.json()
data = prepare(json_data)
return data
|
[
"def",
"merge",
"(",
"profile",
",",
"head",
",",
"base",
",",
"commit_message",
"=",
"None",
")",
":",
"if",
"not",
"commit_message",
":",
"commit_message",
"=",
"\"Merged \"",
"+",
"head",
"+",
"\" into \"",
"+",
"base",
"+",
"\".\"",
"payload",
"=",
"{",
"\"base\"",
":",
"base",
",",
"\"head\"",
":",
"head",
",",
"\"commit_message\"",
":",
"commit_message",
",",
"}",
"response",
"=",
"api",
".",
"post_merge_request",
"(",
"profile",
",",
"payload",
")",
"data",
"=",
"None",
"if",
"response",
".",
"status_code",
"==",
"201",
":",
"json_data",
"=",
"response",
".",
"json",
"(",
")",
"data",
"=",
"prepare",
"(",
"json_data",
")",
"return",
"data"
] |
Merge the head of a branch into the base branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
head
The head to merge. It can be a SHA, or a branch name.
base
The name of the branch to merge the specified head into.
commit_message
The message to give for the commit.
Returns:
A dict with data about the merge.
|
[
"Merge",
"the",
"head",
"of",
"a",
"branch",
"into",
"the",
"base",
"branch",
"."
] |
b77506275ec276ce90879bf1ea9299a79448b903
|
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/merges.py#L18-L53
|
246,950 |
soasme/rio-client
|
rio_client/parser.py
|
parse_netloc
|
def parse_netloc(scheme, netloc):
"""Parse netloc string."""
auth, _netloc = netloc.split('@')
sender, token = auth.split(':')
if ':' in _netloc:
domain, port = _netloc.split(':')
port = int(port)
else:
domain = _netloc
if scheme == 'https':
port = 443
else:
port = 80
return dict(sender=sender, token=token, domain=domain, port=port)
|
python
|
def parse_netloc(scheme, netloc):
"""Parse netloc string."""
auth, _netloc = netloc.split('@')
sender, token = auth.split(':')
if ':' in _netloc:
domain, port = _netloc.split(':')
port = int(port)
else:
domain = _netloc
if scheme == 'https':
port = 443
else:
port = 80
return dict(sender=sender, token=token, domain=domain, port=port)
|
[
"def",
"parse_netloc",
"(",
"scheme",
",",
"netloc",
")",
":",
"auth",
",",
"_netloc",
"=",
"netloc",
".",
"split",
"(",
"'@'",
")",
"sender",
",",
"token",
"=",
"auth",
".",
"split",
"(",
"':'",
")",
"if",
"':'",
"in",
"_netloc",
":",
"domain",
",",
"port",
"=",
"_netloc",
".",
"split",
"(",
"':'",
")",
"port",
"=",
"int",
"(",
"port",
")",
"else",
":",
"domain",
"=",
"_netloc",
"if",
"scheme",
"==",
"'https'",
":",
"port",
"=",
"443",
"else",
":",
"port",
"=",
"80",
"return",
"dict",
"(",
"sender",
"=",
"sender",
",",
"token",
"=",
"token",
",",
"domain",
"=",
"domain",
",",
"port",
"=",
"port",
")"
] |
Parse netloc string.
|
[
"Parse",
"netloc",
"string",
"."
] |
c6d684c6f9deea5b43f2b05bcaf40714c48b5619
|
https://github.com/soasme/rio-client/blob/c6d684c6f9deea5b43f2b05bcaf40714c48b5619/rio_client/parser.py#L9-L22
|
246,951 |
soasme/rio-client
|
rio_client/parser.py
|
parse_path
|
def parse_path(path):
"""Parse path string."""
version, project = path[1:].split('/')
return dict(version=int(version), project=project)
|
python
|
def parse_path(path):
"""Parse path string."""
version, project = path[1:].split('/')
return dict(version=int(version), project=project)
|
[
"def",
"parse_path",
"(",
"path",
")",
":",
"version",
",",
"project",
"=",
"path",
"[",
"1",
":",
"]",
".",
"split",
"(",
"'/'",
")",
"return",
"dict",
"(",
"version",
"=",
"int",
"(",
"version",
")",
",",
"project",
"=",
"project",
")"
] |
Parse path string.
|
[
"Parse",
"path",
"string",
"."
] |
c6d684c6f9deea5b43f2b05bcaf40714c48b5619
|
https://github.com/soasme/rio-client/blob/c6d684c6f9deea5b43f2b05bcaf40714c48b5619/rio_client/parser.py#L24-L27
|
246,952 |
soasme/rio-client
|
rio_client/parser.py
|
parse_dsn
|
def parse_dsn(dsn):
"""Parse dsn string."""
parsed_dsn = urlparse(dsn)
parsed_path = parse_path(parsed_dsn.path)
return {
'scheme': parsed_dsn.scheme,
'sender': parsed_dsn.username,
'token': parsed_dsn.password,
'domain': parsed_dsn.hostname,
'port': parsed_dsn.port or 80,
'version': parsed_path.get('version'),
'project': parsed_path.get('project'),
}
|
python
|
def parse_dsn(dsn):
"""Parse dsn string."""
parsed_dsn = urlparse(dsn)
parsed_path = parse_path(parsed_dsn.path)
return {
'scheme': parsed_dsn.scheme,
'sender': parsed_dsn.username,
'token': parsed_dsn.password,
'domain': parsed_dsn.hostname,
'port': parsed_dsn.port or 80,
'version': parsed_path.get('version'),
'project': parsed_path.get('project'),
}
|
[
"def",
"parse_dsn",
"(",
"dsn",
")",
":",
"parsed_dsn",
"=",
"urlparse",
"(",
"dsn",
")",
"parsed_path",
"=",
"parse_path",
"(",
"parsed_dsn",
".",
"path",
")",
"return",
"{",
"'scheme'",
":",
"parsed_dsn",
".",
"scheme",
",",
"'sender'",
":",
"parsed_dsn",
".",
"username",
",",
"'token'",
":",
"parsed_dsn",
".",
"password",
",",
"'domain'",
":",
"parsed_dsn",
".",
"hostname",
",",
"'port'",
":",
"parsed_dsn",
".",
"port",
"or",
"80",
",",
"'version'",
":",
"parsed_path",
".",
"get",
"(",
"'version'",
")",
",",
"'project'",
":",
"parsed_path",
".",
"get",
"(",
"'project'",
")",
",",
"}"
] |
Parse dsn string.
|
[
"Parse",
"dsn",
"string",
"."
] |
c6d684c6f9deea5b43f2b05bcaf40714c48b5619
|
https://github.com/soasme/rio-client/blob/c6d684c6f9deea5b43f2b05bcaf40714c48b5619/rio_client/parser.py#L29-L41
|
246,953 |
jldantas/libmft
|
libmft/api.py
|
MFTHeader.create_from_binary
|
def create_from_binary(cls, ignore_signature_check, binary_view):
'''Creates a new object MFTHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
MFTHeader: New object using hte binary stream as source
'''
sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \
usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \
cls._REPR.unpack(binary_view[:cls._REPR.size])
baad = None
if not ignore_signature_check:
if sig == b"FILE":
baad = False
elif sig == b"BAAD":
baad = True
else:
raise HeaderError("Entry has no valid signature.", "MFTHeader")
if fx_offset < MFTHeader._REPR.size: #header[1] is fx_offset
raise HeaderError("Fix up array begins within the header.", "MFTHeader")
if first_attr_offset < cls._REPR.size: #first attribute offset < header size
raise HeaderError("First attribute offset points to inside of the header.", "MFTHeader")
if entry_len > alloc_len: #entry_len > entry_alloc_len
raise HeaderError("Logical size of the MFT is bigger than MFT allocated size.", "MFTHeader")
file_ref, file_seq = get_file_reference(base_record)
nw_obj = cls((baad, fx_offset, fx_count, lsn, seq_number, hard_link_count,
first_attr_offset, MftUsageFlags(usage_flags), entry_len, alloc_len,
file_ref, file_seq, next_attr_id, record_n))
return nw_obj
|
python
|
def create_from_binary(cls, ignore_signature_check, binary_view):
'''Creates a new object MFTHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
MFTHeader: New object using hte binary stream as source
'''
sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \
usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \
cls._REPR.unpack(binary_view[:cls._REPR.size])
baad = None
if not ignore_signature_check:
if sig == b"FILE":
baad = False
elif sig == b"BAAD":
baad = True
else:
raise HeaderError("Entry has no valid signature.", "MFTHeader")
if fx_offset < MFTHeader._REPR.size: #header[1] is fx_offset
raise HeaderError("Fix up array begins within the header.", "MFTHeader")
if first_attr_offset < cls._REPR.size: #first attribute offset < header size
raise HeaderError("First attribute offset points to inside of the header.", "MFTHeader")
if entry_len > alloc_len: #entry_len > entry_alloc_len
raise HeaderError("Logical size of the MFT is bigger than MFT allocated size.", "MFTHeader")
file_ref, file_seq = get_file_reference(base_record)
nw_obj = cls((baad, fx_offset, fx_count, lsn, seq_number, hard_link_count,
first_attr_offset, MftUsageFlags(usage_flags), entry_len, alloc_len,
file_ref, file_seq, next_attr_id, record_n))
return nw_obj
|
[
"def",
"create_from_binary",
"(",
"cls",
",",
"ignore_signature_check",
",",
"binary_view",
")",
":",
"sig",
",",
"fx_offset",
",",
"fx_count",
",",
"lsn",
",",
"seq_number",
",",
"hard_link_count",
",",
"first_attr_offset",
",",
"usage_flags",
",",
"entry_len",
",",
"alloc_len",
",",
"base_record",
",",
"next_attr_id",
",",
"record_n",
"=",
"cls",
".",
"_REPR",
".",
"unpack",
"(",
"binary_view",
"[",
":",
"cls",
".",
"_REPR",
".",
"size",
"]",
")",
"baad",
"=",
"None",
"if",
"not",
"ignore_signature_check",
":",
"if",
"sig",
"==",
"b\"FILE\"",
":",
"baad",
"=",
"False",
"elif",
"sig",
"==",
"b\"BAAD\"",
":",
"baad",
"=",
"True",
"else",
":",
"raise",
"HeaderError",
"(",
"\"Entry has no valid signature.\"",
",",
"\"MFTHeader\"",
")",
"if",
"fx_offset",
"<",
"MFTHeader",
".",
"_REPR",
".",
"size",
":",
"#header[1] is fx_offset",
"raise",
"HeaderError",
"(",
"\"Fix up array begins within the header.\"",
",",
"\"MFTHeader\"",
")",
"if",
"first_attr_offset",
"<",
"cls",
".",
"_REPR",
".",
"size",
":",
"#first attribute offset < header size",
"raise",
"HeaderError",
"(",
"\"First attribute offset points to inside of the header.\"",
",",
"\"MFTHeader\"",
")",
"if",
"entry_len",
">",
"alloc_len",
":",
"#entry_len > entry_alloc_len",
"raise",
"HeaderError",
"(",
"\"Logical size of the MFT is bigger than MFT allocated size.\"",
",",
"\"MFTHeader\"",
")",
"file_ref",
",",
"file_seq",
"=",
"get_file_reference",
"(",
"base_record",
")",
"nw_obj",
"=",
"cls",
"(",
"(",
"baad",
",",
"fx_offset",
",",
"fx_count",
",",
"lsn",
",",
"seq_number",
",",
"hard_link_count",
",",
"first_attr_offset",
",",
"MftUsageFlags",
"(",
"usage_flags",
")",
",",
"entry_len",
",",
"alloc_len",
",",
"file_ref",
",",
"file_seq",
",",
"next_attr_id",
",",
"record_n",
")",
")",
"return",
"nw_obj"
] |
Creates a new object MFTHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
MFTHeader: New object using hte binary stream as source
|
[
"Creates",
"a",
"new",
"object",
"MFTHeader",
"from",
"a",
"binary",
"stream",
".",
"The",
"binary",
"stream",
"can",
"be",
"represented",
"by",
"a",
"byte",
"string",
"bytearray",
"or",
"a",
"memoryview",
"of",
"the",
"bytearray",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L279-L316
|
246,954 |
jldantas/libmft
|
libmft/api.py
|
Datastream._get_dataruns
|
def _get_dataruns(self):
'''Returns a list of dataruns, in order.
'''
if self._data_runs is None:
raise DataStreamError("Resident datastream don't have dataruns")
if not self._data_runs_sorted:
self._data_runs.sort(key=_itemgetter(0))
self._data_runs_sorted = True
return [data[1] for data in self._data_runs]
|
python
|
def _get_dataruns(self):
'''Returns a list of dataruns, in order.
'''
if self._data_runs is None:
raise DataStreamError("Resident datastream don't have dataruns")
if not self._data_runs_sorted:
self._data_runs.sort(key=_itemgetter(0))
self._data_runs_sorted = True
return [data[1] for data in self._data_runs]
|
[
"def",
"_get_dataruns",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data_runs",
"is",
"None",
":",
"raise",
"DataStreamError",
"(",
"\"Resident datastream don't have dataruns\"",
")",
"if",
"not",
"self",
".",
"_data_runs_sorted",
":",
"self",
".",
"_data_runs",
".",
"sort",
"(",
"key",
"=",
"_itemgetter",
"(",
"0",
")",
")",
"self",
".",
"_data_runs_sorted",
"=",
"True",
"return",
"[",
"data",
"[",
"1",
"]",
"for",
"data",
"in",
"self",
".",
"_data_runs",
"]"
] |
Returns a list of dataruns, in order.
|
[
"Returns",
"a",
"list",
"of",
"dataruns",
"in",
"order",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L389-L399
|
246,955 |
jldantas/libmft
|
libmft/api.py
|
Datastream.add_data_attribute
|
def add_data_attribute(self, data_attr):
'''Interprets a DATA attribute and add it to the datastream.'''
if data_attr.header.attr_type_id is not AttrTypes.DATA:
raise DataStreamError("Invalid attribute. A Datastream deals only with DATA attributes")
if data_attr.header.attr_name != self.name:
raise DataStreamError(f"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream")
if data_attr.header.non_resident:
nonr_header = data_attr.header
if self._data_runs is None:
self._data_runs = []
if nonr_header.end_vcn > self.cluster_count:
self.cluster_count = nonr_header.end_vcn
if not nonr_header.start_vcn: #start_vcn == 0
self.size = nonr_header.curr_sstream
self.alloc_size = nonr_header.alloc_sstream
self._data_runs.append((nonr_header.start_vcn, nonr_header.data_runs))
self._data_runs_sorted = False
else: #if it is resident
self.size = self.alloc_size = data_attr.header.content_len
self._pending_processing = None
#respects mft_config["load_data"]
self._content = data_attr.content.content
|
python
|
def add_data_attribute(self, data_attr):
'''Interprets a DATA attribute and add it to the datastream.'''
if data_attr.header.attr_type_id is not AttrTypes.DATA:
raise DataStreamError("Invalid attribute. A Datastream deals only with DATA attributes")
if data_attr.header.attr_name != self.name:
raise DataStreamError(f"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream")
if data_attr.header.non_resident:
nonr_header = data_attr.header
if self._data_runs is None:
self._data_runs = []
if nonr_header.end_vcn > self.cluster_count:
self.cluster_count = nonr_header.end_vcn
if not nonr_header.start_vcn: #start_vcn == 0
self.size = nonr_header.curr_sstream
self.alloc_size = nonr_header.alloc_sstream
self._data_runs.append((nonr_header.start_vcn, nonr_header.data_runs))
self._data_runs_sorted = False
else: #if it is resident
self.size = self.alloc_size = data_attr.header.content_len
self._pending_processing = None
#respects mft_config["load_data"]
self._content = data_attr.content.content
|
[
"def",
"add_data_attribute",
"(",
"self",
",",
"data_attr",
")",
":",
"if",
"data_attr",
".",
"header",
".",
"attr_type_id",
"is",
"not",
"AttrTypes",
".",
"DATA",
":",
"raise",
"DataStreamError",
"(",
"\"Invalid attribute. A Datastream deals only with DATA attributes\"",
")",
"if",
"data_attr",
".",
"header",
".",
"attr_name",
"!=",
"self",
".",
"name",
":",
"raise",
"DataStreamError",
"(",
"f\"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream\"",
")",
"if",
"data_attr",
".",
"header",
".",
"non_resident",
":",
"nonr_header",
"=",
"data_attr",
".",
"header",
"if",
"self",
".",
"_data_runs",
"is",
"None",
":",
"self",
".",
"_data_runs",
"=",
"[",
"]",
"if",
"nonr_header",
".",
"end_vcn",
">",
"self",
".",
"cluster_count",
":",
"self",
".",
"cluster_count",
"=",
"nonr_header",
".",
"end_vcn",
"if",
"not",
"nonr_header",
".",
"start_vcn",
":",
"#start_vcn == 0",
"self",
".",
"size",
"=",
"nonr_header",
".",
"curr_sstream",
"self",
".",
"alloc_size",
"=",
"nonr_header",
".",
"alloc_sstream",
"self",
".",
"_data_runs",
".",
"append",
"(",
"(",
"nonr_header",
".",
"start_vcn",
",",
"nonr_header",
".",
"data_runs",
")",
")",
"self",
".",
"_data_runs_sorted",
"=",
"False",
"else",
":",
"#if it is resident",
"self",
".",
"size",
"=",
"self",
".",
"alloc_size",
"=",
"data_attr",
".",
"header",
".",
"content_len",
"self",
".",
"_pending_processing",
"=",
"None",
"#respects mft_config[\"load_data\"]",
"self",
".",
"_content",
"=",
"data_attr",
".",
"content",
".",
"content"
] |
Interprets a DATA attribute and add it to the datastream.
|
[
"Interprets",
"a",
"DATA",
"attribute",
"and",
"add",
"it",
"to",
"the",
"datastream",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L405-L427
|
246,956 |
jldantas/libmft
|
libmft/api.py
|
Datastream.add_from_datastream
|
def add_from_datastream(self, source_ds):
'''Add information from another datastream. Verifies if the datastream
added is correct and copy the relevant fields if necessary.'''
if source_ds.name != self.name:
raise DataStreamError("Data from a different stream 'f{source_ds.name}' cannot be add to this stream")
if self._data_runs is None:
raise DataStreamError("Cannot add data to a resident datastream.")
if self.cluster_count < source_ds.cluster_count:
self.cluster_count = source_ds.cluster_count
if self.size == 0 and source_ds.size:
self.size = source_ds.size
self.alloc_size = source_ds.alloc_size
if source_ds._data_runs:
self._data_runs += source_ds._data_runs
self._data_runs_sorted = False
|
python
|
def add_from_datastream(self, source_ds):
'''Add information from another datastream. Verifies if the datastream
added is correct and copy the relevant fields if necessary.'''
if source_ds.name != self.name:
raise DataStreamError("Data from a different stream 'f{source_ds.name}' cannot be add to this stream")
if self._data_runs is None:
raise DataStreamError("Cannot add data to a resident datastream.")
if self.cluster_count < source_ds.cluster_count:
self.cluster_count = source_ds.cluster_count
if self.size == 0 and source_ds.size:
self.size = source_ds.size
self.alloc_size = source_ds.alloc_size
if source_ds._data_runs:
self._data_runs += source_ds._data_runs
self._data_runs_sorted = False
|
[
"def",
"add_from_datastream",
"(",
"self",
",",
"source_ds",
")",
":",
"if",
"source_ds",
".",
"name",
"!=",
"self",
".",
"name",
":",
"raise",
"DataStreamError",
"(",
"\"Data from a different stream 'f{source_ds.name}' cannot be add to this stream\"",
")",
"if",
"self",
".",
"_data_runs",
"is",
"None",
":",
"raise",
"DataStreamError",
"(",
"\"Cannot add data to a resident datastream.\"",
")",
"if",
"self",
".",
"cluster_count",
"<",
"source_ds",
".",
"cluster_count",
":",
"self",
".",
"cluster_count",
"=",
"source_ds",
".",
"cluster_count",
"if",
"self",
".",
"size",
"==",
"0",
"and",
"source_ds",
".",
"size",
":",
"self",
".",
"size",
"=",
"source_ds",
".",
"size",
"self",
".",
"alloc_size",
"=",
"source_ds",
".",
"alloc_size",
"if",
"source_ds",
".",
"_data_runs",
":",
"self",
".",
"_data_runs",
"+=",
"source_ds",
".",
"_data_runs",
"self",
".",
"_data_runs_sorted",
"=",
"False"
] |
Add information from another datastream. Verifies if the datastream
added is correct and copy the relevant fields if necessary.
|
[
"Add",
"information",
"from",
"another",
"datastream",
".",
"Verifies",
"if",
"the",
"datastream",
"added",
"is",
"correct",
"and",
"copy",
"the",
"relevant",
"fields",
"if",
"necessary",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L429-L444
|
246,957 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry.create_from_binary
|
def create_from_binary(cls, mft_config, binary_data, entry_number):
#TODO test carefully how to find the correct index entry, specially with NTFS versions < 3
'''Creates a MFTEntry from a binary stream. It correctly process
the binary data extracting the MFTHeader, all the attributes and the
slack information from the binary stream.
The binary data WILL be changed to apply the fixup array.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
binary_data (bytearray) - A binary stream with the data to extract.
This has to be a writeable and support the memoryview call
entry_number (int) - The entry number for this entry
Returns:
MFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry
'''
bin_view = memoryview(binary_data)
entry = None
#test if the entry is empty
if bin_view[0:4] != b"\x00\x00\x00\x00":
try:
header = MFTHeader.create_from_binary(mft_config.ignore_signature_check,
bin_view[:MFTHeader.get_representation_size()])
except HeaderError as e:
e.update_entry_number(entry_number)
e.update_entry_binary(binary_data)
raise
entry = cls(header, _defaultdict(list))
if header.mft_record != entry_number:
_MOD_LOGGER.warning("The MFT entry number doesn't match. %d != %d", entry_number, header.mft_record)
if len(binary_data) != header.entry_alloc_len:
_MOD_LOGGER.error("Expected MFT size is different than entry size.")
raise EntryError(f"Expected MFT size ({len(binary_data)}) is different than entry size ({header.entry_alloc_len}).", binary_data, entry_number)
if mft_config.apply_fixup_array:
apply_fixup_array(bin_view, header.fx_offset, header.fx_count, header.entry_alloc_len)
entry._load_attributes(mft_config, bin_view[header.first_attr_offset:])
bin_view.release() #release the underlying buffer
return entry
|
python
|
def create_from_binary(cls, mft_config, binary_data, entry_number):
#TODO test carefully how to find the correct index entry, specially with NTFS versions < 3
'''Creates a MFTEntry from a binary stream. It correctly process
the binary data extracting the MFTHeader, all the attributes and the
slack information from the binary stream.
The binary data WILL be changed to apply the fixup array.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
binary_data (bytearray) - A binary stream with the data to extract.
This has to be a writeable and support the memoryview call
entry_number (int) - The entry number for this entry
Returns:
MFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry
'''
bin_view = memoryview(binary_data)
entry = None
#test if the entry is empty
if bin_view[0:4] != b"\x00\x00\x00\x00":
try:
header = MFTHeader.create_from_binary(mft_config.ignore_signature_check,
bin_view[:MFTHeader.get_representation_size()])
except HeaderError as e:
e.update_entry_number(entry_number)
e.update_entry_binary(binary_data)
raise
entry = cls(header, _defaultdict(list))
if header.mft_record != entry_number:
_MOD_LOGGER.warning("The MFT entry number doesn't match. %d != %d", entry_number, header.mft_record)
if len(binary_data) != header.entry_alloc_len:
_MOD_LOGGER.error("Expected MFT size is different than entry size.")
raise EntryError(f"Expected MFT size ({len(binary_data)}) is different than entry size ({header.entry_alloc_len}).", binary_data, entry_number)
if mft_config.apply_fixup_array:
apply_fixup_array(bin_view, header.fx_offset, header.fx_count, header.entry_alloc_len)
entry._load_attributes(mft_config, bin_view[header.first_attr_offset:])
bin_view.release() #release the underlying buffer
return entry
|
[
"def",
"create_from_binary",
"(",
"cls",
",",
"mft_config",
",",
"binary_data",
",",
"entry_number",
")",
":",
"#TODO test carefully how to find the correct index entry, specially with NTFS versions < 3",
"bin_view",
"=",
"memoryview",
"(",
"binary_data",
")",
"entry",
"=",
"None",
"#test if the entry is empty",
"if",
"bin_view",
"[",
"0",
":",
"4",
"]",
"!=",
"b\"\\x00\\x00\\x00\\x00\"",
":",
"try",
":",
"header",
"=",
"MFTHeader",
".",
"create_from_binary",
"(",
"mft_config",
".",
"ignore_signature_check",
",",
"bin_view",
"[",
":",
"MFTHeader",
".",
"get_representation_size",
"(",
")",
"]",
")",
"except",
"HeaderError",
"as",
"e",
":",
"e",
".",
"update_entry_number",
"(",
"entry_number",
")",
"e",
".",
"update_entry_binary",
"(",
"binary_data",
")",
"raise",
"entry",
"=",
"cls",
"(",
"header",
",",
"_defaultdict",
"(",
"list",
")",
")",
"if",
"header",
".",
"mft_record",
"!=",
"entry_number",
":",
"_MOD_LOGGER",
".",
"warning",
"(",
"\"The MFT entry number doesn't match. %d != %d\"",
",",
"entry_number",
",",
"header",
".",
"mft_record",
")",
"if",
"len",
"(",
"binary_data",
")",
"!=",
"header",
".",
"entry_alloc_len",
":",
"_MOD_LOGGER",
".",
"error",
"(",
"\"Expected MFT size is different than entry size.\"",
")",
"raise",
"EntryError",
"(",
"f\"Expected MFT size ({len(binary_data)}) is different than entry size ({header.entry_alloc_len}).\"",
",",
"binary_data",
",",
"entry_number",
")",
"if",
"mft_config",
".",
"apply_fixup_array",
":",
"apply_fixup_array",
"(",
"bin_view",
",",
"header",
".",
"fx_offset",
",",
"header",
".",
"fx_count",
",",
"header",
".",
"entry_alloc_len",
")",
"entry",
".",
"_load_attributes",
"(",
"mft_config",
",",
"bin_view",
"[",
"header",
".",
"first_attr_offset",
":",
"]",
")",
"bin_view",
".",
"release",
"(",
")",
"#release the underlying buffer",
"return",
"entry"
] |
Creates a MFTEntry from a binary stream. It correctly process
the binary data extracting the MFTHeader, all the attributes and the
slack information from the binary stream.
The binary data WILL be changed to apply the fixup array.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
binary_data (bytearray) - A binary stream with the data to extract.
This has to be a writeable and support the memoryview call
entry_number (int) - The entry number for this entry
Returns:
MFTEntry: If the object is empty, returns None, otherwise, new object MFTEntry
|
[
"Creates",
"a",
"MFTEntry",
"from",
"a",
"binary",
"stream",
".",
"It",
"correctly",
"process",
"the",
"binary",
"data",
"extracting",
"the",
"MFTHeader",
"all",
"the",
"attributes",
"and",
"the",
"slack",
"information",
"from",
"the",
"binary",
"stream",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L588-L632
|
246,958 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry._find_datastream
|
def _find_datastream(self, name):
"""Find and return if a datastream exists, by name."""
for stream in self.data_streams: #search to see if this is a new datastream or a known one
if stream.name == name:
return stream
return None
|
python
|
def _find_datastream(self, name):
"""Find and return if a datastream exists, by name."""
for stream in self.data_streams: #search to see if this is a new datastream or a known one
if stream.name == name:
return stream
return None
|
[
"def",
"_find_datastream",
"(",
"self",
",",
"name",
")",
":",
"for",
"stream",
"in",
"self",
".",
"data_streams",
":",
"#search to see if this is a new datastream or a known one",
"if",
"stream",
".",
"name",
"==",
"name",
":",
"return",
"stream",
"return",
"None"
] |
Find and return if a datastream exists, by name.
|
[
"Find",
"and",
"return",
"if",
"a",
"datastream",
"exists",
"by",
"name",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L634-L639
|
246,959 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry._add_data_attribute
|
def _add_data_attribute(self, data_attr):
"""Add a data attribute to the datastream structure.
Data attributes require processing before they can be interpreted as
datastream. This function grants that it is adding the attribute to
the correct datastream or creating a new datastream if necessary.
"""
attr_name = data_attr.header.attr_name
stream = self._find_datastream(attr_name)
if stream is None:
stream = Datastream(attr_name)
self.data_streams.append(stream)
stream.add_data_attribute(data_attr)
|
python
|
def _add_data_attribute(self, data_attr):
"""Add a data attribute to the datastream structure.
Data attributes require processing before they can be interpreted as
datastream. This function grants that it is adding the attribute to
the correct datastream or creating a new datastream if necessary.
"""
attr_name = data_attr.header.attr_name
stream = self._find_datastream(attr_name)
if stream is None:
stream = Datastream(attr_name)
self.data_streams.append(stream)
stream.add_data_attribute(data_attr)
|
[
"def",
"_add_data_attribute",
"(",
"self",
",",
"data_attr",
")",
":",
"attr_name",
"=",
"data_attr",
".",
"header",
".",
"attr_name",
"stream",
"=",
"self",
".",
"_find_datastream",
"(",
"attr_name",
")",
"if",
"stream",
"is",
"None",
":",
"stream",
"=",
"Datastream",
"(",
"attr_name",
")",
"self",
".",
"data_streams",
".",
"append",
"(",
"stream",
")",
"stream",
".",
"add_data_attribute",
"(",
"data_attr",
")"
] |
Add a data attribute to the datastream structure.
Data attributes require processing before they can be interpreted as
datastream. This function grants that it is adding the attribute to
the correct datastream or creating a new datastream if necessary.
|
[
"Add",
"a",
"data",
"attribute",
"to",
"the",
"datastream",
"structure",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L641-L654
|
246,960 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry._load_attributes
|
def _load_attributes(self, mft_config, attrs_view):
'''Loads all the attributes of an entry.
Once executed, all the attributes should have been loaded in the
attribute *attrs* instance attribute.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
attrs_view (memoryview(bytearray)) - A binary stream that starts at
the first attribute until the end of the entry
'''
offset = 0
load_attrs = mft_config.attribute_load_list
while (attrs_view[offset:offset+4] != b'\xff\xff\xff\xff'):
attr_type, attr_len, non_resident = _get_attr_info(attrs_view[offset:])
if attr_type in load_attrs:
# pass all the information to the attr, as we don't know how
# much content the attribute has
attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:])
if not attr.header.attr_type_id is AttrTypes.DATA:
self.attrs[attr.header.attr_type_id].append(attr) #add an attribute
else:
self._add_data_attribute(attr)
offset += attr_len
|
python
|
def _load_attributes(self, mft_config, attrs_view):
'''Loads all the attributes of an entry.
Once executed, all the attributes should have been loaded in the
attribute *attrs* instance attribute.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
attrs_view (memoryview(bytearray)) - A binary stream that starts at
the first attribute until the end of the entry
'''
offset = 0
load_attrs = mft_config.attribute_load_list
while (attrs_view[offset:offset+4] != b'\xff\xff\xff\xff'):
attr_type, attr_len, non_resident = _get_attr_info(attrs_view[offset:])
if attr_type in load_attrs:
# pass all the information to the attr, as we don't know how
# much content the attribute has
attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:])
if not attr.header.attr_type_id is AttrTypes.DATA:
self.attrs[attr.header.attr_type_id].append(attr) #add an attribute
else:
self._add_data_attribute(attr)
offset += attr_len
|
[
"def",
"_load_attributes",
"(",
"self",
",",
"mft_config",
",",
"attrs_view",
")",
":",
"offset",
"=",
"0",
"load_attrs",
"=",
"mft_config",
".",
"attribute_load_list",
"while",
"(",
"attrs_view",
"[",
"offset",
":",
"offset",
"+",
"4",
"]",
"!=",
"b'\\xff\\xff\\xff\\xff'",
")",
":",
"attr_type",
",",
"attr_len",
",",
"non_resident",
"=",
"_get_attr_info",
"(",
"attrs_view",
"[",
"offset",
":",
"]",
")",
"if",
"attr_type",
"in",
"load_attrs",
":",
"# pass all the information to the attr, as we don't know how",
"# much content the attribute has",
"attr",
"=",
"Attribute",
".",
"create_from_binary",
"(",
"non_resident",
",",
"mft_config",
".",
"load_dataruns",
",",
"attrs_view",
"[",
"offset",
":",
"]",
")",
"if",
"not",
"attr",
".",
"header",
".",
"attr_type_id",
"is",
"AttrTypes",
".",
"DATA",
":",
"self",
".",
"attrs",
"[",
"attr",
".",
"header",
".",
"attr_type_id",
"]",
".",
"append",
"(",
"attr",
")",
"#add an attribute",
"else",
":",
"self",
".",
"_add_data_attribute",
"(",
"attr",
")",
"offset",
"+=",
"attr_len"
] |
Loads all the attributes of an entry.
Once executed, all the attributes should have been loaded in the
attribute *attrs* instance attribute.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
attrs_view (memoryview(bytearray)) - A binary stream that starts at
the first attribute until the end of the entry
|
[
"Loads",
"all",
"the",
"attributes",
"of",
"an",
"entry",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L656-L681
|
246,961 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry.merge_entries
|
def merge_entries(self, source_entry):
'''Merge two entries.
Allow the merging of two MFTEntries copying the attributes to the correct
place and the datastreams.
Args:
source_entry (:obj:`MFTEntry`) - Source entry where the data will be
copied from
'''
#TODO should we change this to an overloaded iadd?
#TODO I really don't like this. We are spending cycles to load things that are going to be discarted. Check another way.
#copy the attributes
for list_attr in source_entry.attrs.values():
for attr in list_attr:
self.attrs[attr.header.attr_type_id].append(attr) #add an attribute
#copy data_streams
for stream in source_entry.data_streams:
dest_stream = self._find_datastream(stream.name)
if dest_stream is not None:
dest_stream.add_from_datastream(stream)
else:
self.data_streams.append(stream)
|
python
|
def merge_entries(self, source_entry):
'''Merge two entries.
Allow the merging of two MFTEntries copying the attributes to the correct
place and the datastreams.
Args:
source_entry (:obj:`MFTEntry`) - Source entry where the data will be
copied from
'''
#TODO should we change this to an overloaded iadd?
#TODO I really don't like this. We are spending cycles to load things that are going to be discarted. Check another way.
#copy the attributes
for list_attr in source_entry.attrs.values():
for attr in list_attr:
self.attrs[attr.header.attr_type_id].append(attr) #add an attribute
#copy data_streams
for stream in source_entry.data_streams:
dest_stream = self._find_datastream(stream.name)
if dest_stream is not None:
dest_stream.add_from_datastream(stream)
else:
self.data_streams.append(stream)
|
[
"def",
"merge_entries",
"(",
"self",
",",
"source_entry",
")",
":",
"#TODO should we change this to an overloaded iadd?",
"#TODO I really don't like this. We are spending cycles to load things that are going to be discarted. Check another way.",
"#copy the attributes",
"for",
"list_attr",
"in",
"source_entry",
".",
"attrs",
".",
"values",
"(",
")",
":",
"for",
"attr",
"in",
"list_attr",
":",
"self",
".",
"attrs",
"[",
"attr",
".",
"header",
".",
"attr_type_id",
"]",
".",
"append",
"(",
"attr",
")",
"#add an attribute",
"#copy data_streams",
"for",
"stream",
"in",
"source_entry",
".",
"data_streams",
":",
"dest_stream",
"=",
"self",
".",
"_find_datastream",
"(",
"stream",
".",
"name",
")",
"if",
"dest_stream",
"is",
"not",
"None",
":",
"dest_stream",
".",
"add_from_datastream",
"(",
"stream",
")",
"else",
":",
"self",
".",
"data_streams",
".",
"append",
"(",
"stream",
")"
] |
Merge two entries.
Allow the merging of two MFTEntries copying the attributes to the correct
place and the datastreams.
Args:
source_entry (:obj:`MFTEntry`) - Source entry where the data will be
copied from
|
[
"Merge",
"two",
"entries",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L683-L705
|
246,962 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry.get_datastream_names
|
def get_datastream_names(self):
'''Returns a set with the datastream names. If there is no datastream,
returns None
'''
ads_names = set()
for stream in self.data_streams:
ads_names.add(stream.name)
if len(ads_names):
return ads_names
else:
return None
|
python
|
def get_datastream_names(self):
'''Returns a set with the datastream names. If there is no datastream,
returns None
'''
ads_names = set()
for stream in self.data_streams:
ads_names.add(stream.name)
if len(ads_names):
return ads_names
else:
return None
|
[
"def",
"get_datastream_names",
"(",
"self",
")",
":",
"ads_names",
"=",
"set",
"(",
")",
"for",
"stream",
"in",
"self",
".",
"data_streams",
":",
"ads_names",
".",
"add",
"(",
"stream",
".",
"name",
")",
"if",
"len",
"(",
"ads_names",
")",
":",
"return",
"ads_names",
"else",
":",
"return",
"None"
] |
Returns a set with the datastream names. If there is no datastream,
returns None
|
[
"Returns",
"a",
"set",
"with",
"the",
"datastream",
"names",
".",
"If",
"there",
"is",
"no",
"datastream",
"returns",
"None"
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L743-L755
|
246,963 |
jldantas/libmft
|
libmft/api.py
|
MFTEntry.get_main_filename_attr
|
def get_main_filename_attr(self):
'''Returns the main filename attribute of the entry.
As an entry can have multiple FILENAME attributes, this function allows
to return the main one, i.e., the one with the lowest attribute id and
the "biggest" namespace.
'''
fn_attrs = self.get_attributes(AttrTypes.FILE_NAME)
high_attr_id = 0xFFFFFFFF
main_fn = None
if fn_attrs is not None:
#search for the lowest id, that will give the first FILE_NAME
for fn_attr in fn_attrs:
if fn_attr.header.attr_id < high_attr_id:
main_fn = fn_attr
high_attr_id = fn_attr.header.attr_id
#TODO is this necessary? Maybe the first name is always the with with the biggest namespace
# after we have the lowest, search for same name, but the biggest namespace
for fn_attr in fn_attrs:
if main_fn.content.parent_ref == fn_attr.content.parent_ref and \
main_fn.content.parent_seq == fn_attr.content.parent_seq and \
fn_attr.content.name_type.value < main_fn.content.name_type.value:
main_fn = fn_attr
return main_fn
|
python
|
def get_main_filename_attr(self):
'''Returns the main filename attribute of the entry.
As an entry can have multiple FILENAME attributes, this function allows
to return the main one, i.e., the one with the lowest attribute id and
the "biggest" namespace.
'''
fn_attrs = self.get_attributes(AttrTypes.FILE_NAME)
high_attr_id = 0xFFFFFFFF
main_fn = None
if fn_attrs is not None:
#search for the lowest id, that will give the first FILE_NAME
for fn_attr in fn_attrs:
if fn_attr.header.attr_id < high_attr_id:
main_fn = fn_attr
high_attr_id = fn_attr.header.attr_id
#TODO is this necessary? Maybe the first name is always the with with the biggest namespace
# after we have the lowest, search for same name, but the biggest namespace
for fn_attr in fn_attrs:
if main_fn.content.parent_ref == fn_attr.content.parent_ref and \
main_fn.content.parent_seq == fn_attr.content.parent_seq and \
fn_attr.content.name_type.value < main_fn.content.name_type.value:
main_fn = fn_attr
return main_fn
|
[
"def",
"get_main_filename_attr",
"(",
"self",
")",
":",
"fn_attrs",
"=",
"self",
".",
"get_attributes",
"(",
"AttrTypes",
".",
"FILE_NAME",
")",
"high_attr_id",
"=",
"0xFFFFFFFF",
"main_fn",
"=",
"None",
"if",
"fn_attrs",
"is",
"not",
"None",
":",
"#search for the lowest id, that will give the first FILE_NAME",
"for",
"fn_attr",
"in",
"fn_attrs",
":",
"if",
"fn_attr",
".",
"header",
".",
"attr_id",
"<",
"high_attr_id",
":",
"main_fn",
"=",
"fn_attr",
"high_attr_id",
"=",
"fn_attr",
".",
"header",
".",
"attr_id",
"#TODO is this necessary? Maybe the first name is always the with with the biggest namespace",
"# after we have the lowest, search for same name, but the biggest namespace",
"for",
"fn_attr",
"in",
"fn_attrs",
":",
"if",
"main_fn",
".",
"content",
".",
"parent_ref",
"==",
"fn_attr",
".",
"content",
".",
"parent_ref",
"and",
"main_fn",
".",
"content",
".",
"parent_seq",
"==",
"fn_attr",
".",
"content",
".",
"parent_seq",
"and",
"fn_attr",
".",
"content",
".",
"name_type",
".",
"value",
"<",
"main_fn",
".",
"content",
".",
"name_type",
".",
"value",
":",
"main_fn",
"=",
"fn_attr",
"return",
"main_fn"
] |
Returns the main filename attribute of the entry.
As an entry can have multiple FILENAME attributes, this function allows
to return the main one, i.e., the one with the lowest attribute id and
the "biggest" namespace.
|
[
"Returns",
"the",
"main",
"filename",
"attribute",
"of",
"the",
"entry",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L757-L783
|
246,964 |
jldantas/libmft
|
libmft/api.py
|
MFT._load_relationship_info
|
def _load_relationship_info(self):
"""Maps parent and child entries in the MFT.
Because the library expects the MFT file to be provided, it doesn't
have access to anything non-resident. Because of that, if we want all
the information related to the entry, it is necessary to visit all the
entries and map the relationship between each of them.
Note:
Because the data necessary to do this should always happen before
the first fixup entry, we don't need to apply it.
"""
mft_entry_size = self.mft_entry_size
fp = self.file_pointer
record_n = 0
#define the minimum amount that needs to be read
base_struct = struct.Struct("<Q")
base_struct_offset = 32
seq_struct = struct.Struct("<H")
seq_struct_offset = 16
buffer_base = bytearray(base_struct.size)
buffer_seq = bytearray(seq_struct.size)
#go over the file using the entry size as setp
for i in range(0, _get_file_size(self.file_pointer), mft_entry_size):
record_n = int(i/mft_entry_size)
fp.seek(i + base_struct_offset)
fp.readinto(buffer_base)
base_ref, base_seq = get_file_reference(base_struct.unpack(buffer_base)[0])
if base_ref:
#calculate and prepare to read the sequence number
fp.seek((base_ref * mft_entry_size) + seq_struct_offset)
fp.readinto(buffer_seq)
seq, = seq_struct.unpack(buffer_seq)
if seq == base_seq: #entries are related
self._entries_parent_child[base_ref].append(record_n)
self._entries_child_parent[record_n] = base_ref
else:
self._number_valid_entries += 1
else:
self._number_valid_entries += 1
|
python
|
def _load_relationship_info(self):
"""Maps parent and child entries in the MFT.
Because the library expects the MFT file to be provided, it doesn't
have access to anything non-resident. Because of that, if we want all
the information related to the entry, it is necessary to visit all the
entries and map the relationship between each of them.
Note:
Because the data necessary to do this should always happen before
the first fixup entry, we don't need to apply it.
"""
mft_entry_size = self.mft_entry_size
fp = self.file_pointer
record_n = 0
#define the minimum amount that needs to be read
base_struct = struct.Struct("<Q")
base_struct_offset = 32
seq_struct = struct.Struct("<H")
seq_struct_offset = 16
buffer_base = bytearray(base_struct.size)
buffer_seq = bytearray(seq_struct.size)
#go over the file using the entry size as setp
for i in range(0, _get_file_size(self.file_pointer), mft_entry_size):
record_n = int(i/mft_entry_size)
fp.seek(i + base_struct_offset)
fp.readinto(buffer_base)
base_ref, base_seq = get_file_reference(base_struct.unpack(buffer_base)[0])
if base_ref:
#calculate and prepare to read the sequence number
fp.seek((base_ref * mft_entry_size) + seq_struct_offset)
fp.readinto(buffer_seq)
seq, = seq_struct.unpack(buffer_seq)
if seq == base_seq: #entries are related
self._entries_parent_child[base_ref].append(record_n)
self._entries_child_parent[record_n] = base_ref
else:
self._number_valid_entries += 1
else:
self._number_valid_entries += 1
|
[
"def",
"_load_relationship_info",
"(",
"self",
")",
":",
"mft_entry_size",
"=",
"self",
".",
"mft_entry_size",
"fp",
"=",
"self",
".",
"file_pointer",
"record_n",
"=",
"0",
"#define the minimum amount that needs to be read",
"base_struct",
"=",
"struct",
".",
"Struct",
"(",
"\"<Q\"",
")",
"base_struct_offset",
"=",
"32",
"seq_struct",
"=",
"struct",
".",
"Struct",
"(",
"\"<H\"",
")",
"seq_struct_offset",
"=",
"16",
"buffer_base",
"=",
"bytearray",
"(",
"base_struct",
".",
"size",
")",
"buffer_seq",
"=",
"bytearray",
"(",
"seq_struct",
".",
"size",
")",
"#go over the file using the entry size as setp",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"_get_file_size",
"(",
"self",
".",
"file_pointer",
")",
",",
"mft_entry_size",
")",
":",
"record_n",
"=",
"int",
"(",
"i",
"/",
"mft_entry_size",
")",
"fp",
".",
"seek",
"(",
"i",
"+",
"base_struct_offset",
")",
"fp",
".",
"readinto",
"(",
"buffer_base",
")",
"base_ref",
",",
"base_seq",
"=",
"get_file_reference",
"(",
"base_struct",
".",
"unpack",
"(",
"buffer_base",
")",
"[",
"0",
"]",
")",
"if",
"base_ref",
":",
"#calculate and prepare to read the sequence number",
"fp",
".",
"seek",
"(",
"(",
"base_ref",
"*",
"mft_entry_size",
")",
"+",
"seq_struct_offset",
")",
"fp",
".",
"readinto",
"(",
"buffer_seq",
")",
"seq",
",",
"=",
"seq_struct",
".",
"unpack",
"(",
"buffer_seq",
")",
"if",
"seq",
"==",
"base_seq",
":",
"#entries are related",
"self",
".",
"_entries_parent_child",
"[",
"base_ref",
"]",
".",
"append",
"(",
"record_n",
")",
"self",
".",
"_entries_child_parent",
"[",
"record_n",
"]",
"=",
"base_ref",
"else",
":",
"self",
".",
"_number_valid_entries",
"+=",
"1",
"else",
":",
"self",
".",
"_number_valid_entries",
"+=",
"1"
] |
Maps parent and child entries in the MFT.
Because the library expects the MFT file to be provided, it doesn't
have access to anything non-resident. Because of that, if we want all
the information related to the entry, it is necessary to visit all the
entries and map the relationship between each of them.
Note:
Because the data necessary to do this should always happen before
the first fixup entry, we don't need to apply it.
|
[
"Maps",
"parent",
"and",
"child",
"entries",
"in",
"the",
"MFT",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L841-L881
|
246,965 |
jldantas/libmft
|
libmft/api.py
|
MFT._compute_full_path
|
def _compute_full_path(self, fn_parent_ref, fn_parent_seq):
'''Based on the parent reference and sequence, computes the full path.
The majority of the files in a filesystem has a very small amount of
parent directories. By definition, a filesystem is expected to have
much smaller amount of directories than files. As such we use a function
with the minimal amount of arguments to find a parent, that way we can
cache the results easily and speed up the overall code.
Args:
fn_parent_ref (int): Parent reference number
fn_parent_seq (int): Parent sequence number
Returns:
tuple(bool, str): A tuple where the first element is a boolean that
is ``True`` if the the file is orphan and ``False`` if not. The
second element is a string with the full path without the file name
'''
names = []
root_id = 5
index, seq = fn_parent_ref, fn_parent_seq
is_orphan = False
#search until hit the root entry
while index != root_id:
try:
parent_entry = self[index]
#if the sequence number is wrong, something changed = orphan
if seq != parent_entry.header.seq_number:
is_orphan = True
break
else:
parent_fn_attr = parent_entry.get_main_filename_attr()
index, seq = parent_fn_attr.content.parent_ref, parent_fn_attr.content.parent_seq
names.append(parent_fn_attr.content.name)
except ValueError as e:
#if the entry itself no longer exists = orphan
is_orphan = True
break
return (is_orphan, "\\".join(reversed(names)))
|
python
|
def _compute_full_path(self, fn_parent_ref, fn_parent_seq):
'''Based on the parent reference and sequence, computes the full path.
The majority of the files in a filesystem has a very small amount of
parent directories. By definition, a filesystem is expected to have
much smaller amount of directories than files. As such we use a function
with the minimal amount of arguments to find a parent, that way we can
cache the results easily and speed up the overall code.
Args:
fn_parent_ref (int): Parent reference number
fn_parent_seq (int): Parent sequence number
Returns:
tuple(bool, str): A tuple where the first element is a boolean that
is ``True`` if the the file is orphan and ``False`` if not. The
second element is a string with the full path without the file name
'''
names = []
root_id = 5
index, seq = fn_parent_ref, fn_parent_seq
is_orphan = False
#search until hit the root entry
while index != root_id:
try:
parent_entry = self[index]
#if the sequence number is wrong, something changed = orphan
if seq != parent_entry.header.seq_number:
is_orphan = True
break
else:
parent_fn_attr = parent_entry.get_main_filename_attr()
index, seq = parent_fn_attr.content.parent_ref, parent_fn_attr.content.parent_seq
names.append(parent_fn_attr.content.name)
except ValueError as e:
#if the entry itself no longer exists = orphan
is_orphan = True
break
return (is_orphan, "\\".join(reversed(names)))
|
[
"def",
"_compute_full_path",
"(",
"self",
",",
"fn_parent_ref",
",",
"fn_parent_seq",
")",
":",
"names",
"=",
"[",
"]",
"root_id",
"=",
"5",
"index",
",",
"seq",
"=",
"fn_parent_ref",
",",
"fn_parent_seq",
"is_orphan",
"=",
"False",
"#search until hit the root entry",
"while",
"index",
"!=",
"root_id",
":",
"try",
":",
"parent_entry",
"=",
"self",
"[",
"index",
"]",
"#if the sequence number is wrong, something changed = orphan",
"if",
"seq",
"!=",
"parent_entry",
".",
"header",
".",
"seq_number",
":",
"is_orphan",
"=",
"True",
"break",
"else",
":",
"parent_fn_attr",
"=",
"parent_entry",
".",
"get_main_filename_attr",
"(",
")",
"index",
",",
"seq",
"=",
"parent_fn_attr",
".",
"content",
".",
"parent_ref",
",",
"parent_fn_attr",
".",
"content",
".",
"parent_seq",
"names",
".",
"append",
"(",
"parent_fn_attr",
".",
"content",
".",
"name",
")",
"except",
"ValueError",
"as",
"e",
":",
"#if the entry itself no longer exists = orphan",
"is_orphan",
"=",
"True",
"break",
"return",
"(",
"is_orphan",
",",
"\"\\\\\"",
".",
"join",
"(",
"reversed",
"(",
"names",
")",
")",
")"
] |
Based on the parent reference and sequence, computes the full path.
The majority of the files in a filesystem has a very small amount of
parent directories. By definition, a filesystem is expected to have
much smaller amount of directories than files. As such we use a function
with the minimal amount of arguments to find a parent, that way we can
cache the results easily and speed up the overall code.
Args:
fn_parent_ref (int): Parent reference number
fn_parent_seq (int): Parent sequence number
Returns:
tuple(bool, str): A tuple where the first element is a boolean that
is ``True`` if the the file is orphan and ``False`` if not. The
second element is a string with the full path without the file name
|
[
"Based",
"on",
"the",
"parent",
"reference",
"and",
"sequence",
"computes",
"the",
"full",
"path",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L903-L943
|
246,966 |
jldantas/libmft
|
libmft/api.py
|
MFT.get_full_path
|
def get_full_path(self, fn_attr):
'''Returns the full path of a FILENAME.
The NTFS filesystem allows for things called hardlinks. Hard links are
saved, internally, as different filename attributes. Because of this,
an entry can, when dealing with full paths, have multiple full paths.
As such, this function receives a fn_attr and uses it to compute
the full path for this particular attribute.
Also, the MFT entry might still exist, but the file has been deleted,
depending on the state of the MFT, the path might not be fully reconstructable,
these entries are called "orphan".
Args:
fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content
Returns:
tuple(bool, str): A tuple where the first element is a boolean that
is ``True`` if the the file is orphan and ``False`` if not. The
second element is a string with the full path
'''
if fn_attr.header.attr_type_id is not AttrTypes.FILE_NAME:
raise MFTError("Need a filename attribute to compute full path.")
orphan, path = self._compute_full_path(fn_attr.content.parent_ref, fn_attr.content.parent_seq)
return (orphan, "\\".join([path, fn_attr.content.name]))
|
python
|
def get_full_path(self, fn_attr):
'''Returns the full path of a FILENAME.
The NTFS filesystem allows for things called hardlinks. Hard links are
saved, internally, as different filename attributes. Because of this,
an entry can, when dealing with full paths, have multiple full paths.
As such, this function receives a fn_attr and uses it to compute
the full path for this particular attribute.
Also, the MFT entry might still exist, but the file has been deleted,
depending on the state of the MFT, the path might not be fully reconstructable,
these entries are called "orphan".
Args:
fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content
Returns:
tuple(bool, str): A tuple where the first element is a boolean that
is ``True`` if the the file is orphan and ``False`` if not. The
second element is a string with the full path
'''
if fn_attr.header.attr_type_id is not AttrTypes.FILE_NAME:
raise MFTError("Need a filename attribute to compute full path.")
orphan, path = self._compute_full_path(fn_attr.content.parent_ref, fn_attr.content.parent_seq)
return (orphan, "\\".join([path, fn_attr.content.name]))
|
[
"def",
"get_full_path",
"(",
"self",
",",
"fn_attr",
")",
":",
"if",
"fn_attr",
".",
"header",
".",
"attr_type_id",
"is",
"not",
"AttrTypes",
".",
"FILE_NAME",
":",
"raise",
"MFTError",
"(",
"\"Need a filename attribute to compute full path.\"",
")",
"orphan",
",",
"path",
"=",
"self",
".",
"_compute_full_path",
"(",
"fn_attr",
".",
"content",
".",
"parent_ref",
",",
"fn_attr",
".",
"content",
".",
"parent_seq",
")",
"return",
"(",
"orphan",
",",
"\"\\\\\"",
".",
"join",
"(",
"[",
"path",
",",
"fn_attr",
".",
"content",
".",
"name",
"]",
")",
")"
] |
Returns the full path of a FILENAME.
The NTFS filesystem allows for things called hardlinks. Hard links are
saved, internally, as different filename attributes. Because of this,
an entry can, when dealing with full paths, have multiple full paths.
As such, this function receives a fn_attr and uses it to compute
the full path for this particular attribute.
Also, the MFT entry might still exist, but the file has been deleted,
depending on the state of the MFT, the path might not be fully reconstructable,
these entries are called "orphan".
Args:
fn_attr (:obj:`Attribute`): An attribute that has a FILENAME as content
Returns:
tuple(bool, str): A tuple where the first element is a boolean that
is ``True`` if the the file is orphan and ``False`` if not. The
second element is a string with the full path
|
[
"Returns",
"the",
"full",
"path",
"of",
"a",
"FILENAME",
"."
] |
65a988605fe7663b788bd81dcb52c0a4eaad1549
|
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L946-L972
|
246,967 |
radjkarl/appBase
|
appbase/mainWindowRessources/menubar.py
|
MenuBar.setFullscreen
|
def setFullscreen(self, fullscreen):
"""toggle between fullscreen and normal window"""
if not fullscreen:
self.ckBox_fullscreen.setChecked(False)
self.parent().showNormal()
else:
self.ckBox_fullscreen.setChecked(True)
self.parent().showFullScreen()
|
python
|
def setFullscreen(self, fullscreen):
"""toggle between fullscreen and normal window"""
if not fullscreen:
self.ckBox_fullscreen.setChecked(False)
self.parent().showNormal()
else:
self.ckBox_fullscreen.setChecked(True)
self.parent().showFullScreen()
|
[
"def",
"setFullscreen",
"(",
"self",
",",
"fullscreen",
")",
":",
"if",
"not",
"fullscreen",
":",
"self",
".",
"ckBox_fullscreen",
".",
"setChecked",
"(",
"False",
")",
"self",
".",
"parent",
"(",
")",
".",
"showNormal",
"(",
")",
"else",
":",
"self",
".",
"ckBox_fullscreen",
".",
"setChecked",
"(",
"True",
")",
"self",
".",
"parent",
"(",
")",
".",
"showFullScreen",
"(",
")"
] |
toggle between fullscreen and normal window
|
[
"toggle",
"between",
"fullscreen",
"and",
"normal",
"window"
] |
72b514e6dee7c083f01a2d0b2cc93d46df55bdcb
|
https://github.com/radjkarl/appBase/blob/72b514e6dee7c083f01a2d0b2cc93d46df55bdcb/appbase/mainWindowRessources/menubar.py#L165-L172
|
246,968 |
dhain/potpy
|
potpy/template.py
|
Template.match
|
def match(self, string):
"""Match a string against the template.
If the string matches the template, return a dict mapping template
parameter names to converted values, otherwise return ``None``.
>>> t = Template('Hello my name is {name}!')
>>> t.match('Hello my name is David!')
{'name': 'David'}
>>> t.match('This string does not match.')
"""
m = self.regex.match(string)
if m:
c = self.type_converters
return dict((k, c[k](v) if k in c else v)
for k, v in m.groupdict().iteritems())
return None
|
python
|
def match(self, string):
"""Match a string against the template.
If the string matches the template, return a dict mapping template
parameter names to converted values, otherwise return ``None``.
>>> t = Template('Hello my name is {name}!')
>>> t.match('Hello my name is David!')
{'name': 'David'}
>>> t.match('This string does not match.')
"""
m = self.regex.match(string)
if m:
c = self.type_converters
return dict((k, c[k](v) if k in c else v)
for k, v in m.groupdict().iteritems())
return None
|
[
"def",
"match",
"(",
"self",
",",
"string",
")",
":",
"m",
"=",
"self",
".",
"regex",
".",
"match",
"(",
"string",
")",
"if",
"m",
":",
"c",
"=",
"self",
".",
"type_converters",
"return",
"dict",
"(",
"(",
"k",
",",
"c",
"[",
"k",
"]",
"(",
"v",
")",
"if",
"k",
"in",
"c",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"m",
".",
"groupdict",
"(",
")",
".",
"iteritems",
"(",
")",
")",
"return",
"None"
] |
Match a string against the template.
If the string matches the template, return a dict mapping template
parameter names to converted values, otherwise return ``None``.
>>> t = Template('Hello my name is {name}!')
>>> t.match('Hello my name is David!')
{'name': 'David'}
>>> t.match('This string does not match.')
|
[
"Match",
"a",
"string",
"against",
"the",
"template",
"."
] |
e39a5a84f763fbf144b07a620afb02a5ff3741c9
|
https://github.com/dhain/potpy/blob/e39a5a84f763fbf144b07a620afb02a5ff3741c9/potpy/template.py#L103-L119
|
246,969 |
svetlyak40wt/twiggy-goodies
|
twiggy_goodies/django_rq.py
|
job
|
def job(func_or_queue, connection=None, *args, **kwargs):
"""This decorator does all what django_rq's one, plus
it group all logged messages using uuid and sets
job_name field as well."""
decorated_func = _job(func_or_queue, connection=connection, *args, **kwargs)
if callable(func_or_queue):
@wraps(decorated_func)
def wrapper(*args, **kwargs):
with log.fields(uuid=uuid.uuid4(),
job_name=decorated_func.__name__):
return decorated_func(*args, **kwargs)
return wrapper
else:
def decorator(func):
@decorated_func
@wraps(func)
def wrapper(*args, **kwargs):
with log.fields(uuid=uuid.uuid4(),
job_name=func.__name__):
return func(*args, **kwargs)
return wrapper
return decorator
|
python
|
def job(func_or_queue, connection=None, *args, **kwargs):
"""This decorator does all what django_rq's one, plus
it group all logged messages using uuid and sets
job_name field as well."""
decorated_func = _job(func_or_queue, connection=connection, *args, **kwargs)
if callable(func_or_queue):
@wraps(decorated_func)
def wrapper(*args, **kwargs):
with log.fields(uuid=uuid.uuid4(),
job_name=decorated_func.__name__):
return decorated_func(*args, **kwargs)
return wrapper
else:
def decorator(func):
@decorated_func
@wraps(func)
def wrapper(*args, **kwargs):
with log.fields(uuid=uuid.uuid4(),
job_name=func.__name__):
return func(*args, **kwargs)
return wrapper
return decorator
|
[
"def",
"job",
"(",
"func_or_queue",
",",
"connection",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"decorated_func",
"=",
"_job",
"(",
"func_or_queue",
",",
"connection",
"=",
"connection",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"callable",
"(",
"func_or_queue",
")",
":",
"@",
"wraps",
"(",
"decorated_func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"log",
".",
"fields",
"(",
"uuid",
"=",
"uuid",
".",
"uuid4",
"(",
")",
",",
"job_name",
"=",
"decorated_func",
".",
"__name__",
")",
":",
"return",
"decorated_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"else",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"decorated_func",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"log",
".",
"fields",
"(",
"uuid",
"=",
"uuid",
".",
"uuid4",
"(",
")",
",",
"job_name",
"=",
"func",
".",
"__name__",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] |
This decorator does all what django_rq's one, plus
it group all logged messages using uuid and sets
job_name field as well.
|
[
"This",
"decorator",
"does",
"all",
"what",
"django_rq",
"s",
"one",
"plus",
"it",
"group",
"all",
"logged",
"messages",
"using",
"uuid",
"and",
"sets",
"job_name",
"field",
"as",
"well",
"."
] |
71528d5959fab81eb8d0e4373f20d37a013ac00e
|
https://github.com/svetlyak40wt/twiggy-goodies/blob/71528d5959fab81eb8d0e4373f20d37a013ac00e/twiggy_goodies/django_rq.py#L10-L32
|
246,970 |
chrisdrackett/django-support
|
support/templatetags/support_tags.py
|
active
|
def active(parser, token):
""" tag to determine if a link is to the current page, and if it is, sets 'link_active'
to True in the context.
Use:
{% active path view strict[True,False] arg1 arg2 arg3 %}
path: the path to check. Generally this is just 'request.path'
view: the view that you want to check against the path. (uses reverse)
strict: if strict is true, then the two paths need to be exactly the same.
if strict is false, then a path of /some/path/ and a view with the url
/some/ will match.
args: args needed by the given view to get its path using reverse.
Example:
{% active request.path "settings" True %}
<a href="{% url settings %}" class="{% if link_active %} active{% endif %}">
"""
args = token.split_contents()
path = args[1]
view = args[2].replace('"', '').replace("'", '')
strict = args[3].replace('"', '').replace("'", '')
arg1 = None; arg2 = None; arg3 = None
if len(args) > 4:
arg1 = args[4]
if len(args) > 5:
arg2 = args[5]
if len(args) > 6:
arg3 = args[6]
if not view:
raise template.TemplateSyntaxError, "%r tag requires at least one view argument" % token.contents.split()[0]
return ActiveTag(view, path, strict, arg1, arg2, arg3)
|
python
|
def active(parser, token):
""" tag to determine if a link is to the current page, and if it is, sets 'link_active'
to True in the context.
Use:
{% active path view strict[True,False] arg1 arg2 arg3 %}
path: the path to check. Generally this is just 'request.path'
view: the view that you want to check against the path. (uses reverse)
strict: if strict is true, then the two paths need to be exactly the same.
if strict is false, then a path of /some/path/ and a view with the url
/some/ will match.
args: args needed by the given view to get its path using reverse.
Example:
{% active request.path "settings" True %}
<a href="{% url settings %}" class="{% if link_active %} active{% endif %}">
"""
args = token.split_contents()
path = args[1]
view = args[2].replace('"', '').replace("'", '')
strict = args[3].replace('"', '').replace("'", '')
arg1 = None; arg2 = None; arg3 = None
if len(args) > 4:
arg1 = args[4]
if len(args) > 5:
arg2 = args[5]
if len(args) > 6:
arg3 = args[6]
if not view:
raise template.TemplateSyntaxError, "%r tag requires at least one view argument" % token.contents.split()[0]
return ActiveTag(view, path, strict, arg1, arg2, arg3)
|
[
"def",
"active",
"(",
"parser",
",",
"token",
")",
":",
"args",
"=",
"token",
".",
"split_contents",
"(",
")",
"path",
"=",
"args",
"[",
"1",
"]",
"view",
"=",
"args",
"[",
"2",
"]",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"replace",
"(",
"\"'\"",
",",
"''",
")",
"strict",
"=",
"args",
"[",
"3",
"]",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"replace",
"(",
"\"'\"",
",",
"''",
")",
"arg1",
"=",
"None",
"arg2",
"=",
"None",
"arg3",
"=",
"None",
"if",
"len",
"(",
"args",
")",
">",
"4",
":",
"arg1",
"=",
"args",
"[",
"4",
"]",
"if",
"len",
"(",
"args",
")",
">",
"5",
":",
"arg2",
"=",
"args",
"[",
"5",
"]",
"if",
"len",
"(",
"args",
")",
">",
"6",
":",
"arg3",
"=",
"args",
"[",
"6",
"]",
"if",
"not",
"view",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
",",
"\"%r tag requires at least one view argument\"",
"%",
"token",
".",
"contents",
".",
"split",
"(",
")",
"[",
"0",
"]",
"return",
"ActiveTag",
"(",
"view",
",",
"path",
",",
"strict",
",",
"arg1",
",",
"arg2",
",",
"arg3",
")"
] |
tag to determine if a link is to the current page, and if it is, sets 'link_active'
to True in the context.
Use:
{% active path view strict[True,False] arg1 arg2 arg3 %}
path: the path to check. Generally this is just 'request.path'
view: the view that you want to check against the path. (uses reverse)
strict: if strict is true, then the two paths need to be exactly the same.
if strict is false, then a path of /some/path/ and a view with the url
/some/ will match.
args: args needed by the given view to get its path using reverse.
Example:
{% active request.path "settings" True %}
<a href="{% url settings %}" class="{% if link_active %} active{% endif %}">
|
[
"tag",
"to",
"determine",
"if",
"a",
"link",
"is",
"to",
"the",
"current",
"page",
"and",
"if",
"it",
"is",
"sets",
"link_active",
"to",
"True",
"in",
"the",
"context",
"."
] |
a4f29421a31797e0b069637a0afec85328b4f0ca
|
https://github.com/chrisdrackett/django-support/blob/a4f29421a31797e0b069637a0afec85328b4f0ca/support/templatetags/support_tags.py#L22-L55
|
246,971 |
chrisdrackett/django-support
|
support/templatetags/support_tags.py
|
full_width_svg
|
def full_width_svg(url, width, height, alt_text=None):
''' Helper to render an SVG that will size to fill
its element while keeping its dimentions.
'''
return {
'ratio': str((float(height)/float(width))*100)[:2],
'url': url,
'alt_text': alt_text
}
|
python
|
def full_width_svg(url, width, height, alt_text=None):
''' Helper to render an SVG that will size to fill
its element while keeping its dimentions.
'''
return {
'ratio': str((float(height)/float(width))*100)[:2],
'url': url,
'alt_text': alt_text
}
|
[
"def",
"full_width_svg",
"(",
"url",
",",
"width",
",",
"height",
",",
"alt_text",
"=",
"None",
")",
":",
"return",
"{",
"'ratio'",
":",
"str",
"(",
"(",
"float",
"(",
"height",
")",
"/",
"float",
"(",
"width",
")",
")",
"*",
"100",
")",
"[",
":",
"2",
"]",
",",
"'url'",
":",
"url",
",",
"'alt_text'",
":",
"alt_text",
"}"
] |
Helper to render an SVG that will size to fill
its element while keeping its dimentions.
|
[
"Helper",
"to",
"render",
"an",
"SVG",
"that",
"will",
"size",
"to",
"fill",
"its",
"element",
"while",
"keeping",
"its",
"dimentions",
"."
] |
a4f29421a31797e0b069637a0afec85328b4f0ca
|
https://github.com/chrisdrackett/django-support/blob/a4f29421a31797e0b069637a0afec85328b4f0ca/support/templatetags/support_tags.py#L160-L170
|
246,972 |
mirceaulinic/pypluribus
|
pyPluribus/device.py
|
PluribusDevice.open
|
def open(self):
"""Opens a SSH connection with a Pluribus machine."""
self._connection = paramiko.SSHClient()
self._connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self._connection.connect(hostname=self._hostname,
username=self._username,
password=self._password,
timeout=self._timeout,
port=self._port)
self._connection.get_transport().set_keepalive(self._keepalive)
self.connected = True
self.config = PluribusConfig(self)
except paramiko.ssh_exception.AuthenticationException:
raise pyPluribus.exceptions.ConnectionError("Unable to open connection with {hostname}: \
invalid credentials!".format(hostname=self._hostname))
except socket_error as sockerr:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {skterr}. \
Wrong port?".format(skterr=sockerr.message))
except socket_gaierror as sockgai:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {gaierr}. \
Wrong hostname?".format(gaierr=sockgai.message))
|
python
|
def open(self):
"""Opens a SSH connection with a Pluribus machine."""
self._connection = paramiko.SSHClient()
self._connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self._connection.connect(hostname=self._hostname,
username=self._username,
password=self._password,
timeout=self._timeout,
port=self._port)
self._connection.get_transport().set_keepalive(self._keepalive)
self.connected = True
self.config = PluribusConfig(self)
except paramiko.ssh_exception.AuthenticationException:
raise pyPluribus.exceptions.ConnectionError("Unable to open connection with {hostname}: \
invalid credentials!".format(hostname=self._hostname))
except socket_error as sockerr:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {skterr}. \
Wrong port?".format(skterr=sockerr.message))
except socket_gaierror as sockgai:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {gaierr}. \
Wrong hostname?".format(gaierr=sockgai.message))
|
[
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"_connection",
"=",
"paramiko",
".",
"SSHClient",
"(",
")",
"self",
".",
"_connection",
".",
"set_missing_host_key_policy",
"(",
"paramiko",
".",
"AutoAddPolicy",
"(",
")",
")",
"try",
":",
"self",
".",
"_connection",
".",
"connect",
"(",
"hostname",
"=",
"self",
".",
"_hostname",
",",
"username",
"=",
"self",
".",
"_username",
",",
"password",
"=",
"self",
".",
"_password",
",",
"timeout",
"=",
"self",
".",
"_timeout",
",",
"port",
"=",
"self",
".",
"_port",
")",
"self",
".",
"_connection",
".",
"get_transport",
"(",
")",
".",
"set_keepalive",
"(",
"self",
".",
"_keepalive",
")",
"self",
".",
"connected",
"=",
"True",
"self",
".",
"config",
"=",
"PluribusConfig",
"(",
"self",
")",
"except",
"paramiko",
".",
"ssh_exception",
".",
"AuthenticationException",
":",
"raise",
"pyPluribus",
".",
"exceptions",
".",
"ConnectionError",
"(",
"\"Unable to open connection with {hostname}: \\\n invalid credentials!\"",
".",
"format",
"(",
"hostname",
"=",
"self",
".",
"_hostname",
")",
")",
"except",
"socket_error",
"as",
"sockerr",
":",
"raise",
"pyPluribus",
".",
"exceptions",
".",
"ConnectionError",
"(",
"\"Cannot open connection: {skterr}. \\\n Wrong port?\"",
".",
"format",
"(",
"skterr",
"=",
"sockerr",
".",
"message",
")",
")",
"except",
"socket_gaierror",
"as",
"sockgai",
":",
"raise",
"pyPluribus",
".",
"exceptions",
".",
"ConnectionError",
"(",
"\"Cannot open connection: {gaierr}. \\\n Wrong hostname?\"",
".",
"format",
"(",
"gaierr",
"=",
"sockgai",
".",
"message",
")",
")"
] |
Opens a SSH connection with a Pluribus machine.
|
[
"Opens",
"a",
"SSH",
"connection",
"with",
"a",
"Pluribus",
"machine",
"."
] |
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
|
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/device.py#L57-L78
|
246,973 |
mirceaulinic/pypluribus
|
pyPluribus/device.py
|
PluribusDevice.close
|
def close(self):
"""Closes the SSH connection if the connection is UP."""
if not self.connected:
return None
if self.config is not None:
if self.config.changed() and not self.config.committed():
try:
self.config.discard() # if configuration changed and not committed, will rollback
except pyPluribus.exceptions.ConfigurationDiscardError as discarderr: # bad luck.
raise pyPluribus.exceptions.ConnectionError("Could not discard the configuration: \
{err}".format(err=discarderr))
self._connection.close() # close SSH connection
self.config = None # reset config object
self._connection = None #
self.connected = False
|
python
|
def close(self):
"""Closes the SSH connection if the connection is UP."""
if not self.connected:
return None
if self.config is not None:
if self.config.changed() and not self.config.committed():
try:
self.config.discard() # if configuration changed and not committed, will rollback
except pyPluribus.exceptions.ConfigurationDiscardError as discarderr: # bad luck.
raise pyPluribus.exceptions.ConnectionError("Could not discard the configuration: \
{err}".format(err=discarderr))
self._connection.close() # close SSH connection
self.config = None # reset config object
self._connection = None #
self.connected = False
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"connected",
":",
"return",
"None",
"if",
"self",
".",
"config",
"is",
"not",
"None",
":",
"if",
"self",
".",
"config",
".",
"changed",
"(",
")",
"and",
"not",
"self",
".",
"config",
".",
"committed",
"(",
")",
":",
"try",
":",
"self",
".",
"config",
".",
"discard",
"(",
")",
"# if configuration changed and not committed, will rollback",
"except",
"pyPluribus",
".",
"exceptions",
".",
"ConfigurationDiscardError",
"as",
"discarderr",
":",
"# bad luck.",
"raise",
"pyPluribus",
".",
"exceptions",
".",
"ConnectionError",
"(",
"\"Could not discard the configuration: \\\n {err}\"",
".",
"format",
"(",
"err",
"=",
"discarderr",
")",
")",
"self",
".",
"_connection",
".",
"close",
"(",
")",
"# close SSH connection",
"self",
".",
"config",
"=",
"None",
"# reset config object",
"self",
".",
"_connection",
"=",
"None",
"#",
"self",
".",
"connected",
"=",
"False"
] |
Closes the SSH connection if the connection is UP.
|
[
"Closes",
"the",
"SSH",
"connection",
"if",
"the",
"connection",
"is",
"UP",
"."
] |
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
|
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/device.py#L80-L94
|
246,974 |
mirceaulinic/pypluribus
|
pyPluribus/device.py
|
PluribusDevice.cli
|
def cli(self, command):
"""
Executes a command and returns raw output from the CLI.
:param command: Command to be executed on the CLI.
:raise pyPluribus.exceptions.TimeoutError: when execution of the command exceeds the timeout
:raise pyPluribus.exceptions.CommandExecutionError: when not able to retrieve the output
:return: Raw output of the command
CLI Example:
.. code-block:: python
device.cli('switch-poweroff')
"""
if not self.connected:
raise pyPluribus.exceptions.ConnectionError("Not connected to the deivce.")
cli_output = ''
ssh_session = self._connection.get_transport().open_session() # opens a new SSH session
ssh_session.settimeout(self._timeout)
ssh_session.exec_command(command)
ssh_output = ''
err_output = ''
ssh_output_makefile = ssh_session.makefile()
ssh_error_makefile = ssh_session.makefile_stderr()
for byte_output in ssh_output_makefile:
ssh_output += byte_output
for byte_error in ssh_error_makefile:
err_output += byte_error
if not ssh_output:
if err_output:
raise pyPluribus.exceptions.CommandExecutionError(err_output)
cli_output = '\n'.join(ssh_output.split(self._ssh_banner)[-1].splitlines()[1:])
if cli_output == 'Please enter username and password:': # rare cases when connection is lost :(
self.open() # retry to open connection
return self.cli(command)
return cli_output
|
python
|
def cli(self, command):
"""
Executes a command and returns raw output from the CLI.
:param command: Command to be executed on the CLI.
:raise pyPluribus.exceptions.TimeoutError: when execution of the command exceeds the timeout
:raise pyPluribus.exceptions.CommandExecutionError: when not able to retrieve the output
:return: Raw output of the command
CLI Example:
.. code-block:: python
device.cli('switch-poweroff')
"""
if not self.connected:
raise pyPluribus.exceptions.ConnectionError("Not connected to the deivce.")
cli_output = ''
ssh_session = self._connection.get_transport().open_session() # opens a new SSH session
ssh_session.settimeout(self._timeout)
ssh_session.exec_command(command)
ssh_output = ''
err_output = ''
ssh_output_makefile = ssh_session.makefile()
ssh_error_makefile = ssh_session.makefile_stderr()
for byte_output in ssh_output_makefile:
ssh_output += byte_output
for byte_error in ssh_error_makefile:
err_output += byte_error
if not ssh_output:
if err_output:
raise pyPluribus.exceptions.CommandExecutionError(err_output)
cli_output = '\n'.join(ssh_output.split(self._ssh_banner)[-1].splitlines()[1:])
if cli_output == 'Please enter username and password:': # rare cases when connection is lost :(
self.open() # retry to open connection
return self.cli(command)
return cli_output
|
[
"def",
"cli",
"(",
"self",
",",
"command",
")",
":",
"if",
"not",
"self",
".",
"connected",
":",
"raise",
"pyPluribus",
".",
"exceptions",
".",
"ConnectionError",
"(",
"\"Not connected to the deivce.\"",
")",
"cli_output",
"=",
"''",
"ssh_session",
"=",
"self",
".",
"_connection",
".",
"get_transport",
"(",
")",
".",
"open_session",
"(",
")",
"# opens a new SSH session",
"ssh_session",
".",
"settimeout",
"(",
"self",
".",
"_timeout",
")",
"ssh_session",
".",
"exec_command",
"(",
"command",
")",
"ssh_output",
"=",
"''",
"err_output",
"=",
"''",
"ssh_output_makefile",
"=",
"ssh_session",
".",
"makefile",
"(",
")",
"ssh_error_makefile",
"=",
"ssh_session",
".",
"makefile_stderr",
"(",
")",
"for",
"byte_output",
"in",
"ssh_output_makefile",
":",
"ssh_output",
"+=",
"byte_output",
"for",
"byte_error",
"in",
"ssh_error_makefile",
":",
"err_output",
"+=",
"byte_error",
"if",
"not",
"ssh_output",
":",
"if",
"err_output",
":",
"raise",
"pyPluribus",
".",
"exceptions",
".",
"CommandExecutionError",
"(",
"err_output",
")",
"cli_output",
"=",
"'\\n'",
".",
"join",
"(",
"ssh_output",
".",
"split",
"(",
"self",
".",
"_ssh_banner",
")",
"[",
"-",
"1",
"]",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"]",
")",
"if",
"cli_output",
"==",
"'Please enter username and password:'",
":",
"# rare cases when connection is lost :(",
"self",
".",
"open",
"(",
")",
"# retry to open connection",
"return",
"self",
".",
"cli",
"(",
"command",
")",
"return",
"cli_output"
] |
Executes a command and returns raw output from the CLI.
:param command: Command to be executed on the CLI.
:raise pyPluribus.exceptions.TimeoutError: when execution of the command exceeds the timeout
:raise pyPluribus.exceptions.CommandExecutionError: when not able to retrieve the output
:return: Raw output of the command
CLI Example:
.. code-block:: python
device.cli('switch-poweroff')
|
[
"Executes",
"a",
"command",
"and",
"returns",
"raw",
"output",
"from",
"the",
"CLI",
"."
] |
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
|
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/device.py#L98-L145
|
246,975 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
get_data_xlsx
|
def get_data_xlsx(file_name, file_contents=None, on_demand=False):
'''
Loads the new excel format files. Old format files will automatically get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)
|
python
|
def get_data_xlsx(file_name, file_contents=None, on_demand=False):
'''
Loads the new excel format files. Old format files will automatically get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)
|
[
"def",
"get_data_xlsx",
"(",
"file_name",
",",
"file_contents",
"=",
"None",
",",
"on_demand",
"=",
"False",
")",
":",
"return",
"get_data_xls",
"(",
"file_name",
",",
"file_contents",
"=",
"file_contents",
",",
"on_demand",
"=",
"on_demand",
")"
] |
Loads the new excel format files. Old format files will automatically get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
|
[
"Loads",
"the",
"new",
"excel",
"format",
"files",
".",
"Old",
"format",
"files",
"will",
"automatically",
"get",
"loaded",
"as",
"well",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L53-L65
|
246,976 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
get_data_xls
|
def get_data_xls(file_name, file_contents=None, on_demand=False):
'''
Loads the old excel format files. New format files will automatically
get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def tuple_to_iso_date(tuple_date):
'''
Turns a gregorian (year, month, day, hour, minute, nearest_second) into a
standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's
assumed to be a time; if the time part is all zeros it's assumed to be a date;
if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).
Note that datetimes of midnight will come back as date-only strings. A date
of month=0 and day=0 is meaningless, so that part of the coercion is safe.
For more on the hairy nature of Excel date/times see
http://www.lexicon.net/sjmachin/xlrd.html
'''
(y,m,d, hh,mm,ss) = tuple_date
non_zero = lambda n: n!=0
date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else ''
time = "T%02d:%02d:%02d" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else ''
return date+time
def format_excel_val(book, val_type, value, want_tuple_date):
'''Cleans up the incoming excel data'''
# Data val_type Codes:
# EMPTY 0
# TEXT 1 a Unicode string
# NUMBER 2 float
# DATE 3 float
# BOOLEAN 4 int; 1 means TRUE, 0 means FALSE
# ERROR 5
if val_type == 2: # TEXT
if value == int(value): value = int(value)
elif val_type == 3: # NUMBER
datetuple = xlrd.xldate_as_tuple(value, book.datemode)
value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple)
elif val_type == 5: # ERROR
value = xlrd.error_text_from_code[value]
return value
def xlrd_xsl_to_array(file_name, file_contents=None):
'''
Returns:
A list of 2-D tables holding the converted cells of each sheet
'''
book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand)
formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False)
row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r))))
data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)]
if not on_demand:
for sheet in data:
sheet.load()
book.release_resources()
return data
return xlrd_xsl_to_array(file_name, file_contents)
|
python
|
def get_data_xls(file_name, file_contents=None, on_demand=False):
'''
Loads the old excel format files. New format files will automatically
get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def tuple_to_iso_date(tuple_date):
'''
Turns a gregorian (year, month, day, hour, minute, nearest_second) into a
standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's
assumed to be a time; if the time part is all zeros it's assumed to be a date;
if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).
Note that datetimes of midnight will come back as date-only strings. A date
of month=0 and day=0 is meaningless, so that part of the coercion is safe.
For more on the hairy nature of Excel date/times see
http://www.lexicon.net/sjmachin/xlrd.html
'''
(y,m,d, hh,mm,ss) = tuple_date
non_zero = lambda n: n!=0
date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else ''
time = "T%02d:%02d:%02d" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else ''
return date+time
def format_excel_val(book, val_type, value, want_tuple_date):
'''Cleans up the incoming excel data'''
# Data val_type Codes:
# EMPTY 0
# TEXT 1 a Unicode string
# NUMBER 2 float
# DATE 3 float
# BOOLEAN 4 int; 1 means TRUE, 0 means FALSE
# ERROR 5
if val_type == 2: # TEXT
if value == int(value): value = int(value)
elif val_type == 3: # NUMBER
datetuple = xlrd.xldate_as_tuple(value, book.datemode)
value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple)
elif val_type == 5: # ERROR
value = xlrd.error_text_from_code[value]
return value
def xlrd_xsl_to_array(file_name, file_contents=None):
'''
Returns:
A list of 2-D tables holding the converted cells of each sheet
'''
book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand)
formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False)
row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r))))
data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)]
if not on_demand:
for sheet in data:
sheet.load()
book.release_resources()
return data
return xlrd_xsl_to_array(file_name, file_contents)
|
[
"def",
"get_data_xls",
"(",
"file_name",
",",
"file_contents",
"=",
"None",
",",
"on_demand",
"=",
"False",
")",
":",
"def",
"tuple_to_iso_date",
"(",
"tuple_date",
")",
":",
"'''\n Turns a gregorian (year, month, day, hour, minute, nearest_second) into a\n standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's\n assumed to be a time; if the time part is all zeros it's assumed to be a date;\n if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).\n\n Note that datetimes of midnight will come back as date-only strings. A date\n of month=0 and day=0 is meaningless, so that part of the coercion is safe.\n For more on the hairy nature of Excel date/times see\n http://www.lexicon.net/sjmachin/xlrd.html\n '''",
"(",
"y",
",",
"m",
",",
"d",
",",
"hh",
",",
"mm",
",",
"ss",
")",
"=",
"tuple_date",
"non_zero",
"=",
"lambda",
"n",
":",
"n",
"!=",
"0",
"date",
"=",
"\"%04d-%02d-%02d\"",
"%",
"(",
"y",
",",
"m",
",",
"d",
")",
"if",
"list",
"(",
"filter",
"(",
"non_zero",
",",
"(",
"y",
",",
"m",
",",
"d",
")",
")",
")",
"else",
"''",
"time",
"=",
"\"T%02d:%02d:%02d\"",
"%",
"(",
"hh",
",",
"mm",
",",
"ss",
")",
"if",
"list",
"(",
"filter",
"(",
"non_zero",
",",
"(",
"hh",
",",
"mm",
",",
"ss",
")",
")",
")",
"or",
"not",
"date",
"else",
"''",
"return",
"date",
"+",
"time",
"def",
"format_excel_val",
"(",
"book",
",",
"val_type",
",",
"value",
",",
"want_tuple_date",
")",
":",
"'''Cleans up the incoming excel data'''",
"# Data val_type Codes:",
"# EMPTY 0",
"# TEXT 1 a Unicode string",
"# NUMBER 2 float",
"# DATE 3 float",
"# BOOLEAN 4 int; 1 means TRUE, 0 means FALSE",
"# ERROR 5",
"if",
"val_type",
"==",
"2",
":",
"# TEXT",
"if",
"value",
"==",
"int",
"(",
"value",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"elif",
"val_type",
"==",
"3",
":",
"# NUMBER",
"datetuple",
"=",
"xlrd",
".",
"xldate_as_tuple",
"(",
"value",
",",
"book",
".",
"datemode",
")",
"value",
"=",
"datetuple",
"if",
"want_tuple_date",
"else",
"tuple_to_iso_date",
"(",
"datetuple",
")",
"elif",
"val_type",
"==",
"5",
":",
"# ERROR",
"value",
"=",
"xlrd",
".",
"error_text_from_code",
"[",
"value",
"]",
"return",
"value",
"def",
"xlrd_xsl_to_array",
"(",
"file_name",
",",
"file_contents",
"=",
"None",
")",
":",
"'''\n Returns:\n A list of 2-D tables holding the converted cells of each sheet\n '''",
"book",
"=",
"xlrd",
".",
"open_workbook",
"(",
"file_name",
",",
"file_contents",
"=",
"file_contents",
",",
"on_demand",
"=",
"on_demand",
")",
"formatter",
"=",
"lambda",
"t_v",
":",
"format_excel_val",
"(",
"book",
",",
"t_v",
"[",
"0",
"]",
",",
"t_v",
"[",
"1",
"]",
",",
"False",
")",
"row_builder",
"=",
"lambda",
"s",
",",
"r",
":",
"list",
"(",
"map",
"(",
"formatter",
",",
"zip",
"(",
"s",
".",
"row_types",
"(",
"r",
")",
",",
"s",
".",
"row_values",
"(",
"r",
")",
")",
")",
")",
"data",
"=",
"[",
"SheetYielder",
"(",
"book",
",",
"index",
",",
"row_builder",
")",
"for",
"index",
"in",
"range",
"(",
"book",
".",
"nsheets",
")",
"]",
"if",
"not",
"on_demand",
":",
"for",
"sheet",
"in",
"data",
":",
"sheet",
".",
"load",
"(",
")",
"book",
".",
"release_resources",
"(",
")",
"return",
"data",
"return",
"xlrd_xsl_to_array",
"(",
"file_name",
",",
"file_contents",
")"
] |
Loads the old excel format files. New format files will automatically
get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
|
[
"Loads",
"the",
"old",
"excel",
"format",
"files",
".",
"New",
"format",
"files",
"will",
"automatically",
"get",
"loaded",
"as",
"well",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L145-L210
|
246,977 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
get_data_excel_xml
|
def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
'''
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
'''
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))]
|
python
|
def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
'''
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
'''
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))]
|
[
"def",
"get_data_excel_xml",
"(",
"file_name",
",",
"file_contents",
"=",
"None",
",",
"on_demand",
"=",
"False",
")",
":",
"# NOTE this method is inefficient and uses code that's not of the highest quality",
"if",
"file_contents",
":",
"xml_file",
"=",
"BytesIO",
"(",
"file_contents",
")",
"else",
":",
"xml_file",
"=",
"file_name",
"book",
"=",
"xmlparse",
".",
"ParseExcelXMLFile",
"(",
"xml_file",
")",
"row_builder",
"=",
"lambda",
"s",
",",
"r",
":",
"list",
"(",
"s",
".",
"row_values",
"(",
"r",
")",
")",
"return",
"[",
"XMLSheetYielder",
"(",
"book",
",",
"index",
",",
"row_builder",
")",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"book",
")",
")",
"]"
] |
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
|
[
"Loads",
"xml",
"excel",
"format",
"files",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L239-L258
|
246,978 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
get_data_csv
|
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
'''
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line
finally:
try:
csv_file.close()
except:
pass
def process_csv(csv_contents, csv_file):
return [line for line in yield_csv(csv_contents, csv_file)]
if file_contents:
csv_file = BytesIO(file_contents)
else:
# Don't use 'open as' format, as on_demand loads shouldn't close the file early
csv_file = open(file_name, 'rb')
reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)
if on_demand:
table = yield_csv(reader, csv_file)
else:
table = process_csv(reader, csv_file)
return [table]
|
python
|
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
'''
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line
finally:
try:
csv_file.close()
except:
pass
def process_csv(csv_contents, csv_file):
return [line for line in yield_csv(csv_contents, csv_file)]
if file_contents:
csv_file = BytesIO(file_contents)
else:
# Don't use 'open as' format, as on_demand loads shouldn't close the file early
csv_file = open(file_name, 'rb')
reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)
if on_demand:
table = yield_csv(reader, csv_file)
else:
table = process_csv(reader, csv_file)
return [table]
|
[
"def",
"get_data_csv",
"(",
"file_name",
",",
"encoding",
"=",
"'utf-8'",
",",
"file_contents",
"=",
"None",
",",
"on_demand",
"=",
"False",
")",
":",
"def",
"yield_csv",
"(",
"csv_contents",
",",
"csv_file",
")",
":",
"try",
":",
"for",
"line",
"in",
"csv_contents",
":",
"yield",
"line",
"finally",
":",
"try",
":",
"csv_file",
".",
"close",
"(",
")",
"except",
":",
"pass",
"def",
"process_csv",
"(",
"csv_contents",
",",
"csv_file",
")",
":",
"return",
"[",
"line",
"for",
"line",
"in",
"yield_csv",
"(",
"csv_contents",
",",
"csv_file",
")",
"]",
"if",
"file_contents",
":",
"csv_file",
"=",
"BytesIO",
"(",
"file_contents",
")",
"else",
":",
"# Don't use 'open as' format, as on_demand loads shouldn't close the file early",
"csv_file",
"=",
"open",
"(",
"file_name",
",",
"'rb'",
")",
"reader",
"=",
"csv",
".",
"reader",
"(",
"csv_file",
",",
"dialect",
"=",
"csv",
".",
"excel",
",",
"encoding",
"=",
"encoding",
")",
"if",
"on_demand",
":",
"table",
"=",
"yield_csv",
"(",
"reader",
",",
"csv_file",
")",
"else",
":",
"table",
"=",
"process_csv",
"(",
"reader",
",",
"csv_file",
")",
"return",
"[",
"table",
"]"
] |
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
|
[
"Gets",
"good",
"old",
"csv",
"data",
"from",
"a",
"file",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L260-L298
|
246,979 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
write
|
def write(data, file_name, worksheet_names=None):
'''
Writes 2D tables to file.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file (determines type).
worksheet_names: A list of worksheet names (optional).
'''
if re.search(XML_EXT_REGEX, file_name):
return write_xml(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLSX_EXT_REGEX, file_name):
return write_xlsx(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLS_EXT_REGEX, file_name):
return write_xls(data, file_name, worksheet_names=worksheet_names)
elif re.search(CSV_EXT_REGEX, file_name):
return write_csv(data, file_name)
else:
return write_csv(data, file_name)
|
python
|
def write(data, file_name, worksheet_names=None):
'''
Writes 2D tables to file.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file (determines type).
worksheet_names: A list of worksheet names (optional).
'''
if re.search(XML_EXT_REGEX, file_name):
return write_xml(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLSX_EXT_REGEX, file_name):
return write_xlsx(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLS_EXT_REGEX, file_name):
return write_xls(data, file_name, worksheet_names=worksheet_names)
elif re.search(CSV_EXT_REGEX, file_name):
return write_csv(data, file_name)
else:
return write_csv(data, file_name)
|
[
"def",
"write",
"(",
"data",
",",
"file_name",
",",
"worksheet_names",
"=",
"None",
")",
":",
"if",
"re",
".",
"search",
"(",
"XML_EXT_REGEX",
",",
"file_name",
")",
":",
"return",
"write_xml",
"(",
"data",
",",
"file_name",
",",
"worksheet_names",
"=",
"worksheet_names",
")",
"elif",
"re",
".",
"search",
"(",
"XLSX_EXT_REGEX",
",",
"file_name",
")",
":",
"return",
"write_xlsx",
"(",
"data",
",",
"file_name",
",",
"worksheet_names",
"=",
"worksheet_names",
")",
"elif",
"re",
".",
"search",
"(",
"XLS_EXT_REGEX",
",",
"file_name",
")",
":",
"return",
"write_xls",
"(",
"data",
",",
"file_name",
",",
"worksheet_names",
"=",
"worksheet_names",
")",
"elif",
"re",
".",
"search",
"(",
"CSV_EXT_REGEX",
",",
"file_name",
")",
":",
"return",
"write_csv",
"(",
"data",
",",
"file_name",
")",
"else",
":",
"return",
"write_csv",
"(",
"data",
",",
"file_name",
")"
] |
Writes 2D tables to file.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file (determines type).
worksheet_names: A list of worksheet names (optional).
|
[
"Writes",
"2D",
"tables",
"to",
"file",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L300-L318
|
246,980 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
write_xls
|
def write_xls(data, file_name, worksheet_names=None):
'''
Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional).
'''
workbook = xlwt.Workbook()
for sheet_index, sheet_data in enumerate(data):
if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]:
name = worksheet_names[sheet_index]
else:
name = 'Worksheet {}'.format(sheet_index)
sheet = workbook.add_sheet(name)
for row_index, row in enumerate(sheet_data):
for col_index, value in enumerate(row):
sheet.write(row_index, col_index, value)
workbook.save(file_name)
|
python
|
def write_xls(data, file_name, worksheet_names=None):
'''
Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional).
'''
workbook = xlwt.Workbook()
for sheet_index, sheet_data in enumerate(data):
if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]:
name = worksheet_names[sheet_index]
else:
name = 'Worksheet {}'.format(sheet_index)
sheet = workbook.add_sheet(name)
for row_index, row in enumerate(sheet_data):
for col_index, value in enumerate(row):
sheet.write(row_index, col_index, value)
workbook.save(file_name)
|
[
"def",
"write_xls",
"(",
"data",
",",
"file_name",
",",
"worksheet_names",
"=",
"None",
")",
":",
"workbook",
"=",
"xlwt",
".",
"Workbook",
"(",
")",
"for",
"sheet_index",
",",
"sheet_data",
"in",
"enumerate",
"(",
"data",
")",
":",
"if",
"worksheet_names",
"and",
"sheet_index",
"<",
"len",
"(",
"worksheet_names",
")",
"and",
"worksheet_names",
"[",
"sheet_index",
"]",
":",
"name",
"=",
"worksheet_names",
"[",
"sheet_index",
"]",
"else",
":",
"name",
"=",
"'Worksheet {}'",
".",
"format",
"(",
"sheet_index",
")",
"sheet",
"=",
"workbook",
".",
"add_sheet",
"(",
"name",
")",
"for",
"row_index",
",",
"row",
"in",
"enumerate",
"(",
"sheet_data",
")",
":",
"for",
"col_index",
",",
"value",
"in",
"enumerate",
"(",
"row",
")",
":",
"sheet",
".",
"write",
"(",
"row_index",
",",
"col_index",
",",
"value",
")",
"workbook",
".",
"save",
"(",
"file_name",
")"
] |
Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional).
|
[
"Writes",
"out",
"to",
"old",
"excel",
"format",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L342-L361
|
246,981 |
OpenGov/python_data_wrap
|
datawrap/tableloader.py
|
write_csv
|
def write_csv(data, file_name, encoding='utf-8'):
'''
Writes out to csv format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
'''
name_extension = len(data) > 1
root, ext = os.path.splitext(file_name)
for i, sheet in enumerate(data):
fname = file_name if not name_extension else root+"_"+str(i)+ext
with open(fname, 'wb') as date_file:
csv_file = csv.writer(date_file, encoding=encoding)
for line in sheet:
csv_file.writerow(line)
|
python
|
def write_csv(data, file_name, encoding='utf-8'):
'''
Writes out to csv format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
'''
name_extension = len(data) > 1
root, ext = os.path.splitext(file_name)
for i, sheet in enumerate(data):
fname = file_name if not name_extension else root+"_"+str(i)+ext
with open(fname, 'wb') as date_file:
csv_file = csv.writer(date_file, encoding=encoding)
for line in sheet:
csv_file.writerow(line)
|
[
"def",
"write_csv",
"(",
"data",
",",
"file_name",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"name_extension",
"=",
"len",
"(",
"data",
")",
">",
"1",
"root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"for",
"i",
",",
"sheet",
"in",
"enumerate",
"(",
"data",
")",
":",
"fname",
"=",
"file_name",
"if",
"not",
"name_extension",
"else",
"root",
"+",
"\"_\"",
"+",
"str",
"(",
"i",
")",
"+",
"ext",
"with",
"open",
"(",
"fname",
",",
"'wb'",
")",
"as",
"date_file",
":",
"csv_file",
"=",
"csv",
".",
"writer",
"(",
"date_file",
",",
"encoding",
"=",
"encoding",
")",
"for",
"line",
"in",
"sheet",
":",
"csv_file",
".",
"writerow",
"(",
"line",
")"
] |
Writes out to csv format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
|
[
"Writes",
"out",
"to",
"csv",
"format",
"."
] |
7de38bb30d7a500adc336a4a7999528d753e5600
|
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/tableloader.py#L363-L379
|
246,982 |
scdoshi/django-bits
|
bits/general.py
|
get_group
|
def get_group(name):
"""
Return group with given name, if it exists. Check cache first.
"""
group = cache.get('bits.general.group_%s' % name)
if not group:
group = Group.objects.get(name=name)
cache.set('bits.general.group_%s' % name, group, 365 * 24 * 60 * 60)
return group
|
python
|
def get_group(name):
"""
Return group with given name, if it exists. Check cache first.
"""
group = cache.get('bits.general.group_%s' % name)
if not group:
group = Group.objects.get(name=name)
cache.set('bits.general.group_%s' % name, group, 365 * 24 * 60 * 60)
return group
|
[
"def",
"get_group",
"(",
"name",
")",
":",
"group",
"=",
"cache",
".",
"get",
"(",
"'bits.general.group_%s'",
"%",
"name",
")",
"if",
"not",
"group",
":",
"group",
"=",
"Group",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"name",
")",
"cache",
".",
"set",
"(",
"'bits.general.group_%s'",
"%",
"name",
",",
"group",
",",
"365",
"*",
"24",
"*",
"60",
"*",
"60",
")",
"return",
"group"
] |
Return group with given name, if it exists. Check cache first.
|
[
"Return",
"group",
"with",
"given",
"name",
"if",
"it",
"exists",
".",
"Check",
"cache",
"first",
"."
] |
0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f
|
https://github.com/scdoshi/django-bits/blob/0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f/bits/general.py#L57-L65
|
246,983 |
abe-winter/pg13-py
|
pg13/sqex.py
|
uniqify
|
def uniqify(list_):
"inefficient on long lists; short lists only. preserves order."
a=[]
for x in list_:
if x not in a: a.append(x)
return a
|
python
|
def uniqify(list_):
"inefficient on long lists; short lists only. preserves order."
a=[]
for x in list_:
if x not in a: a.append(x)
return a
|
[
"def",
"uniqify",
"(",
"list_",
")",
":",
"a",
"=",
"[",
"]",
"for",
"x",
"in",
"list_",
":",
"if",
"x",
"not",
"in",
"a",
":",
"a",
".",
"append",
"(",
"x",
")",
"return",
"a"
] |
inefficient on long lists; short lists only. preserves order.
|
[
"inefficient",
"on",
"long",
"lists",
";",
"short",
"lists",
"only",
".",
"preserves",
"order",
"."
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L39-L44
|
246,984 |
abe-winter/pg13-py
|
pg13/sqex.py
|
eliminate_sequential_children
|
def eliminate_sequential_children(paths):
"helper for infer_columns. removes paths that are direct children of the n-1 or n-2 path"
return [p for i,p in enumerate(paths) if not ((i>0 and paths[i-1]==p[:-1]) or (i>1 and paths[i-2]==p[:-1]))]
|
python
|
def eliminate_sequential_children(paths):
"helper for infer_columns. removes paths that are direct children of the n-1 or n-2 path"
return [p for i,p in enumerate(paths) if not ((i>0 and paths[i-1]==p[:-1]) or (i>1 and paths[i-2]==p[:-1]))]
|
[
"def",
"eliminate_sequential_children",
"(",
"paths",
")",
":",
"return",
"[",
"p",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"paths",
")",
"if",
"not",
"(",
"(",
"i",
">",
"0",
"and",
"paths",
"[",
"i",
"-",
"1",
"]",
"==",
"p",
"[",
":",
"-",
"1",
"]",
")",
"or",
"(",
"i",
">",
"1",
"and",
"paths",
"[",
"i",
"-",
"2",
"]",
"==",
"p",
"[",
":",
"-",
"1",
"]",
")",
")",
"]"
] |
helper for infer_columns. removes paths that are direct children of the n-1 or n-2 path
|
[
"helper",
"for",
"infer_columns",
".",
"removes",
"paths",
"that",
"are",
"direct",
"children",
"of",
"the",
"n",
"-",
"1",
"or",
"n",
"-",
"2",
"path"
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L46-L48
|
246,985 |
abe-winter/pg13-py
|
pg13/sqex.py
|
collapse_group_expr
|
def collapse_group_expr(groupx,cols,ret_row):
"collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this."
for i,col in enumerate(cols.children):
if col==groupx: ret_row[i]=ret_row[i][0]
return ret_row
|
python
|
def collapse_group_expr(groupx,cols,ret_row):
"collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this."
for i,col in enumerate(cols.children):
if col==groupx: ret_row[i]=ret_row[i][0]
return ret_row
|
[
"def",
"collapse_group_expr",
"(",
"groupx",
",",
"cols",
",",
"ret_row",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"cols",
".",
"children",
")",
":",
"if",
"col",
"==",
"groupx",
":",
"ret_row",
"[",
"i",
"]",
"=",
"ret_row",
"[",
"i",
"]",
"[",
"0",
"]",
"return",
"ret_row"
] |
collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this.
|
[
"collapses",
"columns",
"matching",
"the",
"group",
"expression",
".",
"I",
"m",
"sure",
"this",
"is",
"buggy",
";",
"look",
"at",
"a",
"real",
"DB",
"s",
"imp",
"of",
"this",
"."
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L209-L213
|
246,986 |
abe-winter/pg13-py
|
pg13/sqex.py
|
NameIndexer.update_aliases
|
def update_aliases(aliases,aonly,x):
"helper for ctor. takes AliasX or string as second arg"
if isinstance(x,basestring): aliases[x]=x
elif isinstance(x,sqparse2.AliasX):
if not isinstance(x.alias,basestring): raise TypeError('alias not string',type(x.alias))
if isinstance(x.name,sqparse2.NameX): aliases.update({x.alias:x.name.name,x.name.name:x.name.name})
elif isinstance(x.name,sqparse2.SelectX):
aliases.update({x.alias:x.alias})
aonly[x.alias]=x.name
else: raise TypeError('aliasx_unk_thing',type(x.name)) # pragma: no cover
else: raise TypeError(type(x)) # pragma: no cover
|
python
|
def update_aliases(aliases,aonly,x):
"helper for ctor. takes AliasX or string as second arg"
if isinstance(x,basestring): aliases[x]=x
elif isinstance(x,sqparse2.AliasX):
if not isinstance(x.alias,basestring): raise TypeError('alias not string',type(x.alias))
if isinstance(x.name,sqparse2.NameX): aliases.update({x.alias:x.name.name,x.name.name:x.name.name})
elif isinstance(x.name,sqparse2.SelectX):
aliases.update({x.alias:x.alias})
aonly[x.alias]=x.name
else: raise TypeError('aliasx_unk_thing',type(x.name)) # pragma: no cover
else: raise TypeError(type(x)) # pragma: no cover
|
[
"def",
"update_aliases",
"(",
"aliases",
",",
"aonly",
",",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"basestring",
")",
":",
"aliases",
"[",
"x",
"]",
"=",
"x",
"elif",
"isinstance",
"(",
"x",
",",
"sqparse2",
".",
"AliasX",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
".",
"alias",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"'alias not string'",
",",
"type",
"(",
"x",
".",
"alias",
")",
")",
"if",
"isinstance",
"(",
"x",
".",
"name",
",",
"sqparse2",
".",
"NameX",
")",
":",
"aliases",
".",
"update",
"(",
"{",
"x",
".",
"alias",
":",
"x",
".",
"name",
".",
"name",
",",
"x",
".",
"name",
".",
"name",
":",
"x",
".",
"name",
".",
"name",
"}",
")",
"elif",
"isinstance",
"(",
"x",
".",
"name",
",",
"sqparse2",
".",
"SelectX",
")",
":",
"aliases",
".",
"update",
"(",
"{",
"x",
".",
"alias",
":",
"x",
".",
"alias",
"}",
")",
"aonly",
"[",
"x",
".",
"alias",
"]",
"=",
"x",
".",
"name",
"else",
":",
"raise",
"TypeError",
"(",
"'aliasx_unk_thing'",
",",
"type",
"(",
"x",
".",
"name",
")",
")",
"# pragma: no cover",
"else",
":",
"raise",
"TypeError",
"(",
"type",
"(",
"x",
")",
")",
"# pragma: no cover"
] |
helper for ctor. takes AliasX or string as second arg
|
[
"helper",
"for",
"ctor",
".",
"takes",
"AliasX",
"or",
"string",
"as",
"second",
"arg"
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L104-L114
|
246,987 |
abe-winter/pg13-py
|
pg13/sqex.py
|
NameIndexer.resolve_aonly
|
def resolve_aonly(self,tables_dict,table_ctor):
"circular depends on pgmock.Table. refactor."
for alias,selectx in self.aonly.items():
table = table_ctor(alias,infer_columns(selectx,tables_dict),None)
table.rows = run_select(selectx,tables_dict,table_ctor)
self.aonly[alias] = table
self.aonly_resolved = True
|
python
|
def resolve_aonly(self,tables_dict,table_ctor):
"circular depends on pgmock.Table. refactor."
for alias,selectx in self.aonly.items():
table = table_ctor(alias,infer_columns(selectx,tables_dict),None)
table.rows = run_select(selectx,tables_dict,table_ctor)
self.aonly[alias] = table
self.aonly_resolved = True
|
[
"def",
"resolve_aonly",
"(",
"self",
",",
"tables_dict",
",",
"table_ctor",
")",
":",
"for",
"alias",
",",
"selectx",
"in",
"self",
".",
"aonly",
".",
"items",
"(",
")",
":",
"table",
"=",
"table_ctor",
"(",
"alias",
",",
"infer_columns",
"(",
"selectx",
",",
"tables_dict",
")",
",",
"None",
")",
"table",
".",
"rows",
"=",
"run_select",
"(",
"selectx",
",",
"tables_dict",
",",
"table_ctor",
")",
"self",
".",
"aonly",
"[",
"alias",
"]",
"=",
"table",
"self",
".",
"aonly_resolved",
"=",
"True"
] |
circular depends on pgmock.Table. refactor.
|
[
"circular",
"depends",
"on",
"pgmock",
".",
"Table",
".",
"refactor",
"."
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L134-L140
|
246,988 |
abe-winter/pg13-py
|
pg13/sqex.py
|
NameIndexer.rowget
|
def rowget(self,tables_dict,row_list,index):
"row_list in self.row_order"
tmp=row_list
for i in self.index_tuple(tables_dict,index,False): tmp=tmp[i]
return tmp
|
python
|
def rowget(self,tables_dict,row_list,index):
"row_list in self.row_order"
tmp=row_list
for i in self.index_tuple(tables_dict,index,False): tmp=tmp[i]
return tmp
|
[
"def",
"rowget",
"(",
"self",
",",
"tables_dict",
",",
"row_list",
",",
"index",
")",
":",
"tmp",
"=",
"row_list",
"for",
"i",
"in",
"self",
".",
"index_tuple",
"(",
"tables_dict",
",",
"index",
",",
"False",
")",
":",
"tmp",
"=",
"tmp",
"[",
"i",
"]",
"return",
"tmp"
] |
row_list in self.row_order
|
[
"row_list",
"in",
"self",
".",
"row_order"
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L162-L166
|
246,989 |
abe-winter/pg13-py
|
pg13/sqex.py
|
Evaluator.eval_agg_call
|
def eval_agg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume multiple rows"
if not isinstance(self.c_row,list): raise TypeError('aggregate function expected a list of rows')
if len(exp.args.children)!=1: raise ValueError('aggregate function expected a single value',exp.args)
arg,=exp.args.children # intentional: error if len!=1
vals=[Evaluator(c_r,self.nix,self.tables).eval(arg) for c_r in self.c_row]
if not vals: return None
if exp.f=='min': return min(vals)
elif exp.f=='max': return max(vals)
elif exp.f=='count': return len(vals)
else: raise NotImplementedError('unk_func',exp.f)
|
python
|
def eval_agg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume multiple rows"
if not isinstance(self.c_row,list): raise TypeError('aggregate function expected a list of rows')
if len(exp.args.children)!=1: raise ValueError('aggregate function expected a single value',exp.args)
arg,=exp.args.children # intentional: error if len!=1
vals=[Evaluator(c_r,self.nix,self.tables).eval(arg) for c_r in self.c_row]
if not vals: return None
if exp.f=='min': return min(vals)
elif exp.f=='max': return max(vals)
elif exp.f=='count': return len(vals)
else: raise NotImplementedError('unk_func',exp.f)
|
[
"def",
"eval_agg_call",
"(",
"self",
",",
"exp",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"c_row",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'aggregate function expected a list of rows'",
")",
"if",
"len",
"(",
"exp",
".",
"args",
".",
"children",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'aggregate function expected a single value'",
",",
"exp",
".",
"args",
")",
"arg",
",",
"=",
"exp",
".",
"args",
".",
"children",
"# intentional: error if len!=1",
"vals",
"=",
"[",
"Evaluator",
"(",
"c_r",
",",
"self",
".",
"nix",
",",
"self",
".",
"tables",
")",
".",
"eval",
"(",
"arg",
")",
"for",
"c_r",
"in",
"self",
".",
"c_row",
"]",
"if",
"not",
"vals",
":",
"return",
"None",
"if",
"exp",
".",
"f",
"==",
"'min'",
":",
"return",
"min",
"(",
"vals",
")",
"elif",
"exp",
".",
"f",
"==",
"'max'",
":",
"return",
"max",
"(",
"vals",
")",
"elif",
"exp",
".",
"f",
"==",
"'count'",
":",
"return",
"len",
"(",
"vals",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'unk_func'",
",",
"exp",
".",
"f",
")"
] |
helper for eval_callx; evaluator for CallX that consume multiple rows
|
[
"helper",
"for",
"eval_callx",
";",
"evaluator",
"for",
"CallX",
"that",
"consume",
"multiple",
"rows"
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L261-L271
|
246,990 |
abe-winter/pg13-py
|
pg13/sqex.py
|
Evaluator.eval_nonagg_call
|
def eval_nonagg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume a single value"
# todo: get more concrete about argument counts
args=self.eval(exp.args)
if exp.f=='coalesce':
a,b=args # todo: does coalesce take more than 2 args?
return b if a is None else a
elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too
elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split())
else: raise NotImplementedError('unk_function',exp.f)
|
python
|
def eval_nonagg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume a single value"
# todo: get more concrete about argument counts
args=self.eval(exp.args)
if exp.f=='coalesce':
a,b=args # todo: does coalesce take more than 2 args?
return b if a is None else a
elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too
elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split())
else: raise NotImplementedError('unk_function',exp.f)
|
[
"def",
"eval_nonagg_call",
"(",
"self",
",",
"exp",
")",
":",
"# todo: get more concrete about argument counts",
"args",
"=",
"self",
".",
"eval",
"(",
"exp",
".",
"args",
")",
"if",
"exp",
".",
"f",
"==",
"'coalesce'",
":",
"a",
",",
"b",
"=",
"args",
"# todo: does coalesce take more than 2 args?",
"return",
"b",
"if",
"a",
"is",
"None",
"else",
"a",
"elif",
"exp",
".",
"f",
"==",
"'unnest'",
":",
"return",
"self",
".",
"eval",
"(",
"exp",
".",
"args",
")",
"[",
"0",
"]",
"# note: run_select does some work in this case too",
"elif",
"exp",
".",
"f",
"in",
"(",
"'to_tsquery'",
",",
"'to_tsvector'",
")",
":",
"return",
"set",
"(",
"self",
".",
"eval",
"(",
"exp",
".",
"args",
".",
"children",
"[",
"0",
"]",
")",
".",
"split",
"(",
")",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'unk_function'",
",",
"exp",
".",
"f",
")"
] |
helper for eval_callx; evaluator for CallX that consume a single value
|
[
"helper",
"for",
"eval_callx",
";",
"evaluator",
"for",
"CallX",
"that",
"consume",
"a",
"single",
"value"
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L273-L282
|
246,991 |
abe-winter/pg13-py
|
pg13/sqex.py
|
Evaluator.eval_callx
|
def eval_callx(self, exp):
"dispatch for CallX"
# below: this isn't contains(exp,consumes_row) -- it's just checking the current expression
return (self.eval_agg_call if consumes_rows(exp) else self.eval_nonagg_call)(exp)
|
python
|
def eval_callx(self, exp):
"dispatch for CallX"
# below: this isn't contains(exp,consumes_row) -- it's just checking the current expression
return (self.eval_agg_call if consumes_rows(exp) else self.eval_nonagg_call)(exp)
|
[
"def",
"eval_callx",
"(",
"self",
",",
"exp",
")",
":",
"# below: this isn't contains(exp,consumes_row) -- it's just checking the current expression",
"return",
"(",
"self",
".",
"eval_agg_call",
"if",
"consumes_rows",
"(",
"exp",
")",
"else",
"self",
".",
"eval_nonagg_call",
")",
"(",
"exp",
")"
] |
dispatch for CallX
|
[
"dispatch",
"for",
"CallX"
] |
c78806f99f35541a8756987e86edca3438aa97f5
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqex.py#L284-L287
|
246,992 |
alfredodeza/pecan-notario
|
pecan_notario/decorator.py
|
redirect_to_handler
|
def redirect_to_handler(error, location):
"""
Cause a requset with an error to internally redirect to a URI path.
This is generally for internal use, but can be called from within a Pecan
controller to trigger a validation failure from *within* the controller
itself, e.g.::
@expose()
@validate(some_schema, '/some/handler')
def some_controller(self, **kw):
if some_bad_condition():
error_exception = ...
redirect_to_handler(error_exception, '/some/handler')
"""
if callable(location):
location = location()
request.environ['REQUEST_METHOD'] = 'GET'
redirect(location, internal=True)
|
python
|
def redirect_to_handler(error, location):
"""
Cause a requset with an error to internally redirect to a URI path.
This is generally for internal use, but can be called from within a Pecan
controller to trigger a validation failure from *within* the controller
itself, e.g.::
@expose()
@validate(some_schema, '/some/handler')
def some_controller(self, **kw):
if some_bad_condition():
error_exception = ...
redirect_to_handler(error_exception, '/some/handler')
"""
if callable(location):
location = location()
request.environ['REQUEST_METHOD'] = 'GET'
redirect(location, internal=True)
|
[
"def",
"redirect_to_handler",
"(",
"error",
",",
"location",
")",
":",
"if",
"callable",
"(",
"location",
")",
":",
"location",
"=",
"location",
"(",
")",
"request",
".",
"environ",
"[",
"'REQUEST_METHOD'",
"]",
"=",
"'GET'",
"redirect",
"(",
"location",
",",
"internal",
"=",
"True",
")"
] |
Cause a requset with an error to internally redirect to a URI path.
This is generally for internal use, but can be called from within a Pecan
controller to trigger a validation failure from *within* the controller
itself, e.g.::
@expose()
@validate(some_schema, '/some/handler')
def some_controller(self, **kw):
if some_bad_condition():
error_exception = ...
redirect_to_handler(error_exception, '/some/handler')
|
[
"Cause",
"a",
"requset",
"with",
"an",
"error",
"to",
"internally",
"redirect",
"to",
"a",
"URI",
"path",
"."
] |
824e593e0ef16f276b5636315ba88f5c63b54816
|
https://github.com/alfredodeza/pecan-notario/blob/824e593e0ef16f276b5636315ba88f5c63b54816/pecan_notario/decorator.py#L63-L81
|
246,993 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._transfer_str
|
def _transfer_str(self, conn, tmp, name, data):
''' transfer string to remote file '''
if type(data) == dict:
data = utils.jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
afo.write(data.encode('utf8'))
except:
raise errors.AnsibleError("failure encoding into utf-8")
afo.flush()
afo.close()
remote = os.path.join(tmp, name)
try:
conn.put_file(afile, remote)
finally:
os.unlink(afile)
return remote
|
python
|
def _transfer_str(self, conn, tmp, name, data):
''' transfer string to remote file '''
if type(data) == dict:
data = utils.jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
afo.write(data.encode('utf8'))
except:
raise errors.AnsibleError("failure encoding into utf-8")
afo.flush()
afo.close()
remote = os.path.join(tmp, name)
try:
conn.put_file(afile, remote)
finally:
os.unlink(afile)
return remote
|
[
"def",
"_transfer_str",
"(",
"self",
",",
"conn",
",",
"tmp",
",",
"name",
",",
"data",
")",
":",
"if",
"type",
"(",
"data",
")",
"==",
"dict",
":",
"data",
"=",
"utils",
".",
"jsonify",
"(",
"data",
")",
"afd",
",",
"afile",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"afo",
"=",
"os",
".",
"fdopen",
"(",
"afd",
",",
"'w'",
")",
"try",
":",
"afo",
".",
"write",
"(",
"data",
".",
"encode",
"(",
"'utf8'",
")",
")",
"except",
":",
"raise",
"errors",
".",
"AnsibleError",
"(",
"\"failure encoding into utf-8\"",
")",
"afo",
".",
"flush",
"(",
")",
"afo",
".",
"close",
"(",
")",
"remote",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp",
",",
"name",
")",
"try",
":",
"conn",
".",
"put_file",
"(",
"afile",
",",
"remote",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"afile",
")",
"return",
"remote"
] |
transfer string to remote file
|
[
"transfer",
"string",
"to",
"remote",
"file"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L168-L188
|
246,994 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._execute_module
|
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None):
''' runs a module that has already been transferred '''
# hack to support fireball mode
if module_name == 'fireball':
args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
if 'port' not in args:
args += " port=%s" % C.ZEROMQ_PORT
(remote_module_path, is_new_style, shebang) = self._copy_module(conn, tmp, module_name, args, inject)
cmd_mod = ""
if self.sudo and self.sudo_user != 'root':
# deal with possible umask issues once sudo'ed to other user
cmd_chmod = "chmod a+r %s" % remote_module_path
self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False)
cmd = ""
if not is_new_style:
args = utils.template(self.basedir, args, inject)
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
if async_jid is None:
cmd = "%s %s" % (remote_module_path, argsfile)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
cmd = "%s" % (remote_module_path)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
if not shebang:
raise errors.AnsibleError("module is missing interpreter line")
cmd = shebang.replace("#!","") + " " + cmd
if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1':
cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp
res = self._low_level_exec_command(conn, cmd, tmp, sudoable=True)
return ReturnData(conn=conn, result=res['stdout'])
|
python
|
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None):
''' runs a module that has already been transferred '''
# hack to support fireball mode
if module_name == 'fireball':
args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
if 'port' not in args:
args += " port=%s" % C.ZEROMQ_PORT
(remote_module_path, is_new_style, shebang) = self._copy_module(conn, tmp, module_name, args, inject)
cmd_mod = ""
if self.sudo and self.sudo_user != 'root':
# deal with possible umask issues once sudo'ed to other user
cmd_chmod = "chmod a+r %s" % remote_module_path
self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False)
cmd = ""
if not is_new_style:
args = utils.template(self.basedir, args, inject)
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
if async_jid is None:
cmd = "%s %s" % (remote_module_path, argsfile)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
cmd = "%s" % (remote_module_path)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
if not shebang:
raise errors.AnsibleError("module is missing interpreter line")
cmd = shebang.replace("#!","") + " " + cmd
if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1':
cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp
res = self._low_level_exec_command(conn, cmd, tmp, sudoable=True)
return ReturnData(conn=conn, result=res['stdout'])
|
[
"def",
"_execute_module",
"(",
"self",
",",
"conn",
",",
"tmp",
",",
"module_name",
",",
"args",
",",
"async_jid",
"=",
"None",
",",
"async_module",
"=",
"None",
",",
"async_limit",
"=",
"None",
",",
"inject",
"=",
"None",
")",
":",
"# hack to support fireball mode",
"if",
"module_name",
"==",
"'fireball'",
":",
"args",
"=",
"\"%s password=%s\"",
"%",
"(",
"args",
",",
"base64",
".",
"b64encode",
"(",
"str",
"(",
"utils",
".",
"key_for_hostname",
"(",
"conn",
".",
"host",
")",
")",
")",
")",
"if",
"'port'",
"not",
"in",
"args",
":",
"args",
"+=",
"\" port=%s\"",
"%",
"C",
".",
"ZEROMQ_PORT",
"(",
"remote_module_path",
",",
"is_new_style",
",",
"shebang",
")",
"=",
"self",
".",
"_copy_module",
"(",
"conn",
",",
"tmp",
",",
"module_name",
",",
"args",
",",
"inject",
")",
"cmd_mod",
"=",
"\"\"",
"if",
"self",
".",
"sudo",
"and",
"self",
".",
"sudo_user",
"!=",
"'root'",
":",
"# deal with possible umask issues once sudo'ed to other user",
"cmd_chmod",
"=",
"\"chmod a+r %s\"",
"%",
"remote_module_path",
"self",
".",
"_low_level_exec_command",
"(",
"conn",
",",
"cmd_chmod",
",",
"tmp",
",",
"sudoable",
"=",
"False",
")",
"cmd",
"=",
"\"\"",
"if",
"not",
"is_new_style",
":",
"args",
"=",
"utils",
".",
"template",
"(",
"self",
".",
"basedir",
",",
"args",
",",
"inject",
")",
"argsfile",
"=",
"self",
".",
"_transfer_str",
"(",
"conn",
",",
"tmp",
",",
"'arguments'",
",",
"args",
")",
"if",
"async_jid",
"is",
"None",
":",
"cmd",
"=",
"\"%s %s\"",
"%",
"(",
"remote_module_path",
",",
"argsfile",
")",
"else",
":",
"cmd",
"=",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"remote_module_path",
",",
"async_jid",
",",
"async_limit",
",",
"async_module",
",",
"argsfile",
"]",
"]",
")",
"else",
":",
"if",
"async_jid",
"is",
"None",
":",
"cmd",
"=",
"\"%s\"",
"%",
"(",
"remote_module_path",
")",
"else",
":",
"cmd",
"=",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"remote_module_path",
",",
"async_jid",
",",
"async_limit",
",",
"async_module",
"]",
"]",
")",
"if",
"not",
"shebang",
":",
"raise",
"errors",
".",
"AnsibleError",
"(",
"\"module is missing interpreter line\"",
")",
"cmd",
"=",
"shebang",
".",
"replace",
"(",
"\"#!\"",
",",
"\"\"",
")",
"+",
"\" \"",
"+",
"cmd",
"if",
"tmp",
".",
"find",
"(",
"\"tmp\"",
")",
"!=",
"-",
"1",
"and",
"C",
".",
"DEFAULT_KEEP_REMOTE_FILES",
"!=",
"'1'",
":",
"cmd",
"=",
"cmd",
"+",
"\"; rm -rf %s >/dev/null 2>&1\"",
"%",
"tmp",
"res",
"=",
"self",
".",
"_low_level_exec_command",
"(",
"conn",
",",
"cmd",
",",
"tmp",
",",
"sudoable",
"=",
"True",
")",
"return",
"ReturnData",
"(",
"conn",
"=",
"conn",
",",
"result",
"=",
"res",
"[",
"'stdout'",
"]",
")"
] |
runs a module that has already been transferred
|
[
"runs",
"a",
"module",
"that",
"has",
"already",
"been",
"transferred"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L192-L232
|
246,995 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._executor
|
def _executor(self, host):
''' handler for multiprocessing library '''
try:
exec_rc = self._executor_internal(host)
#if type(exec_rc) != ReturnData and type(exec_rc) != ansible.runner.return_data.ReturnData:
# raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
|
python
|
def _executor(self, host):
''' handler for multiprocessing library '''
try:
exec_rc = self._executor_internal(host)
#if type(exec_rc) != ReturnData and type(exec_rc) != ansible.runner.return_data.ReturnData:
# raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
|
[
"def",
"_executor",
"(",
"self",
",",
"host",
")",
":",
"try",
":",
"exec_rc",
"=",
"self",
".",
"_executor_internal",
"(",
"host",
")",
"#if type(exec_rc) != ReturnData and type(exec_rc) != ansible.runner.return_data.ReturnData:",
"# raise Exception(\"unexpected return type: %s\" % type(exec_rc))",
"# redundant, right?",
"if",
"not",
"exec_rc",
".",
"comm_ok",
":",
"self",
".",
"callbacks",
".",
"on_unreachable",
"(",
"host",
",",
"exec_rc",
".",
"result",
")",
"return",
"exec_rc",
"except",
"errors",
".",
"AnsibleError",
",",
"ae",
":",
"msg",
"=",
"str",
"(",
"ae",
")",
"self",
".",
"callbacks",
".",
"on_unreachable",
"(",
"host",
",",
"msg",
")",
"return",
"ReturnData",
"(",
"host",
"=",
"host",
",",
"comm_ok",
"=",
"False",
",",
"result",
"=",
"dict",
"(",
"failed",
"=",
"True",
",",
"msg",
"=",
"msg",
")",
")",
"except",
"Exception",
":",
"msg",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"callbacks",
".",
"on_unreachable",
"(",
"host",
",",
"msg",
")",
"return",
"ReturnData",
"(",
"host",
"=",
"host",
",",
"comm_ok",
"=",
"False",
",",
"result",
"=",
"dict",
"(",
"failed",
"=",
"True",
",",
"msg",
"=",
"msg",
")",
")"
] |
handler for multiprocessing library
|
[
"handler",
"for",
"multiprocessing",
"library"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L240-L258
|
246,996 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._executor_internal
|
def _executor_internal(self, host):
''' executes any module one or more times '''
host_variables = self.inventory.get_variables(host)
if self.transport in [ 'paramiko', 'ssh' ]:
port = host_variables.get('ansible_ssh_port', self.remote_port)
if port is None:
port = C.DEFAULT_REMOTE_PORT
else:
# fireball, local, etc
port = self.remote_port
inject = {}
inject.update(host_variables)
inject.update(self.module_vars)
inject.update(self.setup_cache[host])
inject['hostvars'] = HostVars(self.setup_cache, self.inventory)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
# allow with_foo to work in playbooks...
items = None
items_plugin = self.module_vars.get('items_lookup_plugin', None)
if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
items_terms = self.module_vars.get('items_lookup_terms', '')
items_terms = utils.template_ds(self.basedir, items_terms, inject)
items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=self.basedir).run(items_terms, inject=inject)
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
if len(items) and self.module_name in [ 'apt', 'yum' ]:
# hack for apt and soon yum, with_items maps back into a single module call
inject['item'] = ",".join(items)
items = None
# logic to decide how to run things depends on whether with_items is used
if items is None:
return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port)
elif len(items) > 0:
# executing using with_items, so make multiple calls
# TODO: refactor
aggregrate = {}
all_comm_ok = True
all_changed = False
all_failed = False
results = []
for x in items:
inject['item'] = x
result = self._executor_internal_inner(host, self.module_name, self.module_args, inject, port)
results.append(result.result)
if result.comm_ok == False:
all_comm_ok = False
all_failed = True
break
for x in results:
if x.get('changed') == True:
all_changed = True
if (x.get('failed') == True) or (('rc' in x) and (x['rc'] != 0)):
all_failed = True
break
msg = 'All items completed'
if all_failed:
msg = "One or more items failed."
rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
if not all_failed:
del rd_result['failed']
return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
else:
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, comm_ok=True, result=dict(skipped=True))
|
python
|
def _executor_internal(self, host):
''' executes any module one or more times '''
host_variables = self.inventory.get_variables(host)
if self.transport in [ 'paramiko', 'ssh' ]:
port = host_variables.get('ansible_ssh_port', self.remote_port)
if port is None:
port = C.DEFAULT_REMOTE_PORT
else:
# fireball, local, etc
port = self.remote_port
inject = {}
inject.update(host_variables)
inject.update(self.module_vars)
inject.update(self.setup_cache[host])
inject['hostvars'] = HostVars(self.setup_cache, self.inventory)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
# allow with_foo to work in playbooks...
items = None
items_plugin = self.module_vars.get('items_lookup_plugin', None)
if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
items_terms = self.module_vars.get('items_lookup_terms', '')
items_terms = utils.template_ds(self.basedir, items_terms, inject)
items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=self.basedir).run(items_terms, inject=inject)
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
if len(items) and self.module_name in [ 'apt', 'yum' ]:
# hack for apt and soon yum, with_items maps back into a single module call
inject['item'] = ",".join(items)
items = None
# logic to decide how to run things depends on whether with_items is used
if items is None:
return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port)
elif len(items) > 0:
# executing using with_items, so make multiple calls
# TODO: refactor
aggregrate = {}
all_comm_ok = True
all_changed = False
all_failed = False
results = []
for x in items:
inject['item'] = x
result = self._executor_internal_inner(host, self.module_name, self.module_args, inject, port)
results.append(result.result)
if result.comm_ok == False:
all_comm_ok = False
all_failed = True
break
for x in results:
if x.get('changed') == True:
all_changed = True
if (x.get('failed') == True) or (('rc' in x) and (x['rc'] != 0)):
all_failed = True
break
msg = 'All items completed'
if all_failed:
msg = "One or more items failed."
rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
if not all_failed:
del rd_result['failed']
return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
else:
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, comm_ok=True, result=dict(skipped=True))
|
[
"def",
"_executor_internal",
"(",
"self",
",",
"host",
")",
":",
"host_variables",
"=",
"self",
".",
"inventory",
".",
"get_variables",
"(",
"host",
")",
"if",
"self",
".",
"transport",
"in",
"[",
"'paramiko'",
",",
"'ssh'",
"]",
":",
"port",
"=",
"host_variables",
".",
"get",
"(",
"'ansible_ssh_port'",
",",
"self",
".",
"remote_port",
")",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"C",
".",
"DEFAULT_REMOTE_PORT",
"else",
":",
"# fireball, local, etc",
"port",
"=",
"self",
".",
"remote_port",
"inject",
"=",
"{",
"}",
"inject",
".",
"update",
"(",
"host_variables",
")",
"inject",
".",
"update",
"(",
"self",
".",
"module_vars",
")",
"inject",
".",
"update",
"(",
"self",
".",
"setup_cache",
"[",
"host",
"]",
")",
"inject",
"[",
"'hostvars'",
"]",
"=",
"HostVars",
"(",
"self",
".",
"setup_cache",
",",
"self",
".",
"inventory",
")",
"inject",
"[",
"'group_names'",
"]",
"=",
"host_variables",
".",
"get",
"(",
"'group_names'",
",",
"[",
"]",
")",
"inject",
"[",
"'groups'",
"]",
"=",
"self",
".",
"inventory",
".",
"groups_list",
"(",
")",
"# allow with_foo to work in playbooks...",
"items",
"=",
"None",
"items_plugin",
"=",
"self",
".",
"module_vars",
".",
"get",
"(",
"'items_lookup_plugin'",
",",
"None",
")",
"if",
"items_plugin",
"is",
"not",
"None",
"and",
"items_plugin",
"in",
"utils",
".",
"plugins",
".",
"lookup_loader",
":",
"items_terms",
"=",
"self",
".",
"module_vars",
".",
"get",
"(",
"'items_lookup_terms'",
",",
"''",
")",
"items_terms",
"=",
"utils",
".",
"template_ds",
"(",
"self",
".",
"basedir",
",",
"items_terms",
",",
"inject",
")",
"items",
"=",
"utils",
".",
"plugins",
".",
"lookup_loader",
".",
"get",
"(",
"items_plugin",
",",
"runner",
"=",
"self",
",",
"basedir",
"=",
"self",
".",
"basedir",
")",
".",
"run",
"(",
"items_terms",
",",
"inject",
"=",
"inject",
")",
"if",
"type",
"(",
"items",
")",
"!=",
"list",
":",
"raise",
"errors",
".",
"AnsibleError",
"(",
"\"lookup plugins have to return a list: %r\"",
"%",
"items",
")",
"if",
"len",
"(",
"items",
")",
"and",
"self",
".",
"module_name",
"in",
"[",
"'apt'",
",",
"'yum'",
"]",
":",
"# hack for apt and soon yum, with_items maps back into a single module call",
"inject",
"[",
"'item'",
"]",
"=",
"\",\"",
".",
"join",
"(",
"items",
")",
"items",
"=",
"None",
"# logic to decide how to run things depends on whether with_items is used",
"if",
"items",
"is",
"None",
":",
"return",
"self",
".",
"_executor_internal_inner",
"(",
"host",
",",
"self",
".",
"module_name",
",",
"self",
".",
"module_args",
",",
"inject",
",",
"port",
")",
"elif",
"len",
"(",
"items",
")",
">",
"0",
":",
"# executing using with_items, so make multiple calls",
"# TODO: refactor",
"aggregrate",
"=",
"{",
"}",
"all_comm_ok",
"=",
"True",
"all_changed",
"=",
"False",
"all_failed",
"=",
"False",
"results",
"=",
"[",
"]",
"for",
"x",
"in",
"items",
":",
"inject",
"[",
"'item'",
"]",
"=",
"x",
"result",
"=",
"self",
".",
"_executor_internal_inner",
"(",
"host",
",",
"self",
".",
"module_name",
",",
"self",
".",
"module_args",
",",
"inject",
",",
"port",
")",
"results",
".",
"append",
"(",
"result",
".",
"result",
")",
"if",
"result",
".",
"comm_ok",
"==",
"False",
":",
"all_comm_ok",
"=",
"False",
"all_failed",
"=",
"True",
"break",
"for",
"x",
"in",
"results",
":",
"if",
"x",
".",
"get",
"(",
"'changed'",
")",
"==",
"True",
":",
"all_changed",
"=",
"True",
"if",
"(",
"x",
".",
"get",
"(",
"'failed'",
")",
"==",
"True",
")",
"or",
"(",
"(",
"'rc'",
"in",
"x",
")",
"and",
"(",
"x",
"[",
"'rc'",
"]",
"!=",
"0",
")",
")",
":",
"all_failed",
"=",
"True",
"break",
"msg",
"=",
"'All items completed'",
"if",
"all_failed",
":",
"msg",
"=",
"\"One or more items failed.\"",
"rd_result",
"=",
"dict",
"(",
"failed",
"=",
"all_failed",
",",
"changed",
"=",
"all_changed",
",",
"results",
"=",
"results",
",",
"msg",
"=",
"msg",
")",
"if",
"not",
"all_failed",
":",
"del",
"rd_result",
"[",
"'failed'",
"]",
"return",
"ReturnData",
"(",
"host",
"=",
"host",
",",
"comm_ok",
"=",
"all_comm_ok",
",",
"result",
"=",
"rd_result",
")",
"else",
":",
"self",
".",
"callbacks",
".",
"on_skipped",
"(",
"host",
",",
"None",
")",
"return",
"ReturnData",
"(",
"host",
"=",
"host",
",",
"comm_ok",
"=",
"True",
",",
"result",
"=",
"dict",
"(",
"skipped",
"=",
"True",
")",
")"
] |
executes any module one or more times
|
[
"executes",
"any",
"module",
"one",
"or",
"more",
"times"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L262-L332
|
246,997 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._low_level_exec_command
|
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None):
''' execute a command string over SSH, return the output '''
if executable is None:
executable = '/bin/sh'
sudo_user = self.sudo_user
rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc != None:
return dict(rc=rc, stdout=out, stderr=err )
else:
return dict(stdout=out, stderr=err )
|
python
|
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None):
''' execute a command string over SSH, return the output '''
if executable is None:
executable = '/bin/sh'
sudo_user = self.sudo_user
rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc != None:
return dict(rc=rc, stdout=out, stderr=err )
else:
return dict(stdout=out, stderr=err )
|
[
"def",
"_low_level_exec_command",
"(",
"self",
",",
"conn",
",",
"cmd",
",",
"tmp",
",",
"sudoable",
"=",
"False",
",",
"executable",
"=",
"None",
")",
":",
"if",
"executable",
"is",
"None",
":",
"executable",
"=",
"'/bin/sh'",
"sudo_user",
"=",
"self",
".",
"sudo_user",
"rc",
",",
"stdin",
",",
"stdout",
",",
"stderr",
"=",
"conn",
".",
"exec_command",
"(",
"cmd",
",",
"tmp",
",",
"sudo_user",
",",
"sudoable",
"=",
"sudoable",
",",
"executable",
"=",
"executable",
")",
"if",
"type",
"(",
"stdout",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"out",
"=",
"''",
".",
"join",
"(",
"stdout",
".",
"readlines",
"(",
")",
")",
"else",
":",
"out",
"=",
"stdout",
"if",
"type",
"(",
"stderr",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"err",
"=",
"''",
".",
"join",
"(",
"stderr",
".",
"readlines",
"(",
")",
")",
"else",
":",
"err",
"=",
"stderr",
"if",
"rc",
"!=",
"None",
":",
"return",
"dict",
"(",
"rc",
"=",
"rc",
",",
"stdout",
"=",
"out",
",",
"stderr",
"=",
"err",
")",
"else",
":",
"return",
"dict",
"(",
"stdout",
"=",
"out",
",",
"stderr",
"=",
"err",
")"
] |
execute a command string over SSH, return the output
|
[
"execute",
"a",
"command",
"string",
"over",
"SSH",
"return",
"the",
"output"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L448-L470
|
246,998 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._remote_md5
|
def _remote_md5(self, conn, tmp, path):
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1" % (path,path)
md5s = [
"(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux
"(/sbin/md5sum -q %s 2>/dev/null)" % path, # ?
"(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+
"(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd
"(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd
"(/bin/md5 -q %s 2>/dev/null)" % path # Openbsd
]
cmd = " || ".join(md5s)
cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path)
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False)
data2 = utils.last_non_blank_line(data['stdout'])
try:
return data2.split()[0]
except IndexError:
sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % md5s)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDMD5SUM"
|
python
|
def _remote_md5(self, conn, tmp, path):
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1" % (path,path)
md5s = [
"(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux
"(/sbin/md5sum -q %s 2>/dev/null)" % path, # ?
"(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+
"(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd
"(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd
"(/bin/md5 -q %s 2>/dev/null)" % path # Openbsd
]
cmd = " || ".join(md5s)
cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path)
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False)
data2 = utils.last_non_blank_line(data['stdout'])
try:
return data2.split()[0]
except IndexError:
sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % md5s)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDMD5SUM"
|
[
"def",
"_remote_md5",
"(",
"self",
",",
"conn",
",",
"tmp",
",",
"path",
")",
":",
"test",
"=",
"\"rc=0; [ -r \\\"%s\\\" ] || rc=2; [ -f \\\"%s\\\" ] || rc=1\"",
"%",
"(",
"path",
",",
"path",
")",
"md5s",
"=",
"[",
"\"(/usr/bin/md5sum %s 2>/dev/null)\"",
"%",
"path",
",",
"# Linux",
"\"(/sbin/md5sum -q %s 2>/dev/null)\"",
"%",
"path",
",",
"# ?",
"\"(/usr/bin/digest -a md5 %s 2>/dev/null)\"",
"%",
"path",
",",
"# Solaris 10+",
"\"(/sbin/md5 -q %s 2>/dev/null)\"",
"%",
"path",
",",
"# Freebsd",
"\"(/usr/bin/md5 -n %s 2>/dev/null)\"",
"%",
"path",
",",
"# Netbsd",
"\"(/bin/md5 -q %s 2>/dev/null)\"",
"%",
"path",
"# Openbsd",
"]",
"cmd",
"=",
"\" || \"",
".",
"join",
"(",
"md5s",
")",
"cmd",
"=",
"\"%s; %s || (echo \\\"${rc} %s\\\")\"",
"%",
"(",
"test",
",",
"cmd",
",",
"path",
")",
"data",
"=",
"self",
".",
"_low_level_exec_command",
"(",
"conn",
",",
"cmd",
",",
"tmp",
",",
"sudoable",
"=",
"False",
")",
"data2",
"=",
"utils",
".",
"last_non_blank_line",
"(",
"data",
"[",
"'stdout'",
"]",
")",
"try",
":",
"return",
"data2",
".",
"split",
"(",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"warning: md5sum command failed unusually, please report this to the list so it can be fixed\\n\"",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"command: %s\\n\"",
"%",
"md5s",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"----\\n\"",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"output: %s\\n\"",
"%",
"data",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"----\\n\"",
")",
"# this will signal that it changed and allow things to keep going",
"return",
"\"INVALIDMD5SUM\""
] |
takes a remote md5sum without requiring python, and returns 0 if no file
|
[
"takes",
"a",
"remote",
"md5sum",
"without",
"requiring",
"python",
"and",
"returns",
"0",
"if",
"no",
"file"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L474-L500
|
246,999 |
cirruscluster/cirruscluster
|
cirruscluster/ext/ansible/runner/__init__.py
|
Runner._make_tmp_path
|
def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile)
if self.sudo and self.sudo_user != 'root':
basetmp = os.path.join('/tmp', basefile)
cmd = 'mkdir -p %s' % basetmp
if self.remote_user != 'root':
cmd += ' && chmod a+rx %s' % basetmp
cmd += ' && echo %s' % basetmp
result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
rc = utils.last_non_blank_line(result['stdout']).strip() + '/'
return rc
|
python
|
def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile)
if self.sudo and self.sudo_user != 'root':
basetmp = os.path.join('/tmp', basefile)
cmd = 'mkdir -p %s' % basetmp
if self.remote_user != 'root':
cmd += ' && chmod a+rx %s' % basetmp
cmd += ' && echo %s' % basetmp
result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
rc = utils.last_non_blank_line(result['stdout']).strip() + '/'
return rc
|
[
"def",
"_make_tmp_path",
"(",
"self",
",",
"conn",
")",
":",
"basefile",
"=",
"'ansible-%s-%s'",
"%",
"(",
"time",
".",
"time",
"(",
")",
",",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"48",
")",
")",
"basetmp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"C",
".",
"DEFAULT_REMOTE_TMP",
",",
"basefile",
")",
"if",
"self",
".",
"sudo",
"and",
"self",
".",
"sudo_user",
"!=",
"'root'",
":",
"basetmp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'/tmp'",
",",
"basefile",
")",
"cmd",
"=",
"'mkdir -p %s'",
"%",
"basetmp",
"if",
"self",
".",
"remote_user",
"!=",
"'root'",
":",
"cmd",
"+=",
"' && chmod a+rx %s'",
"%",
"basetmp",
"cmd",
"+=",
"' && echo %s'",
"%",
"basetmp",
"result",
"=",
"self",
".",
"_low_level_exec_command",
"(",
"conn",
",",
"cmd",
",",
"None",
",",
"sudoable",
"=",
"False",
")",
"rc",
"=",
"utils",
".",
"last_non_blank_line",
"(",
"result",
"[",
"'stdout'",
"]",
")",
".",
"strip",
"(",
")",
"+",
"'/'",
"return",
"rc"
] |
make and return a temporary path on a remote box
|
[
"make",
"and",
"return",
"a",
"temporary",
"path",
"on",
"a",
"remote",
"box"
] |
977409929dd81322d886425cdced10608117d5d7
|
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L504-L519
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.