repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
msoulier/tftpy | tftpy/TftpStates.py | TftpState.sendOACK | def sendOACK(self):
"""This method sends an OACK packet with the options from the current
context."""
log.debug("In sendOACK with options %s", self.context.options)
pkt = TftpPacketOACK()
pkt.options = self.context.options
self.context.sock.sendto(pkt.encode().buffer,
(self.context.host,
self.context.tidport))
self.context.last_pkt = pkt | python | def sendOACK(self):
"""This method sends an OACK packet with the options from the current
context."""
log.debug("In sendOACK with options %s", self.context.options)
pkt = TftpPacketOACK()
pkt.options = self.context.options
self.context.sock.sendto(pkt.encode().buffer,
(self.context.host,
self.context.tidport))
self.context.last_pkt = pkt | [
"def",
"sendOACK",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"In sendOACK with options %s\"",
",",
"self",
".",
"context",
".",
"options",
")",
"pkt",
"=",
"TftpPacketOACK",
"(",
")",
"pkt",
".",
"options",
"=",
"self",
".",
"context",
".",
"options",
"self",
".",
"context",
".",
"sock",
".",
"sendto",
"(",
"pkt",
".",
"encode",
"(",
")",
".",
"buffer",
",",
"(",
"self",
".",
"context",
".",
"host",
",",
"self",
".",
"context",
".",
"tidport",
")",
")",
"self",
".",
"context",
".",
"last_pkt",
"=",
"pkt"
] | This method sends an OACK packet with the options from the current
context. | [
"This",
"method",
"sends",
"an",
"OACK",
"packet",
"with",
"the",
"options",
"from",
"the",
"current",
"context",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L142-L151 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpState.resendLast | def resendLast(self):
"Resend the last sent packet due to a timeout."
log.warning("Resending packet %s on sessions %s"
% (self.context.last_pkt, self))
self.context.metrics.resent_bytes += len(self.context.last_pkt.buffer)
self.context.metrics.add_dup(self.context.last_pkt)
sendto_port = self.context.tidport
if not sendto_port:
# If the tidport wasn't set, then the remote end hasn't even
# started talking to us yet. That's not good. Maybe it's not
# there.
sendto_port = self.context.port
self.context.sock.sendto(self.context.last_pkt.encode().buffer,
(self.context.host, sendto_port))
if self.context.packethook:
self.context.packethook(self.context.last_pkt) | python | def resendLast(self):
"Resend the last sent packet due to a timeout."
log.warning("Resending packet %s on sessions %s"
% (self.context.last_pkt, self))
self.context.metrics.resent_bytes += len(self.context.last_pkt.buffer)
self.context.metrics.add_dup(self.context.last_pkt)
sendto_port = self.context.tidport
if not sendto_port:
# If the tidport wasn't set, then the remote end hasn't even
# started talking to us yet. That's not good. Maybe it's not
# there.
sendto_port = self.context.port
self.context.sock.sendto(self.context.last_pkt.encode().buffer,
(self.context.host, sendto_port))
if self.context.packethook:
self.context.packethook(self.context.last_pkt) | [
"def",
"resendLast",
"(",
"self",
")",
":",
"log",
".",
"warning",
"(",
"\"Resending packet %s on sessions %s\"",
"%",
"(",
"self",
".",
"context",
".",
"last_pkt",
",",
"self",
")",
")",
"self",
".",
"context",
".",
"metrics",
".",
"resent_bytes",
"+=",
"len",
"(",
"self",
".",
"context",
".",
"last_pkt",
".",
"buffer",
")",
"self",
".",
"context",
".",
"metrics",
".",
"add_dup",
"(",
"self",
".",
"context",
".",
"last_pkt",
")",
"sendto_port",
"=",
"self",
".",
"context",
".",
"tidport",
"if",
"not",
"sendto_port",
":",
"# If the tidport wasn't set, then the remote end hasn't even",
"# started talking to us yet. That's not good. Maybe it's not",
"# there.",
"sendto_port",
"=",
"self",
".",
"context",
".",
"port",
"self",
".",
"context",
".",
"sock",
".",
"sendto",
"(",
"self",
".",
"context",
".",
"last_pkt",
".",
"encode",
"(",
")",
".",
"buffer",
",",
"(",
"self",
".",
"context",
".",
"host",
",",
"sendto_port",
")",
")",
"if",
"self",
".",
"context",
".",
"packethook",
":",
"self",
".",
"context",
".",
"packethook",
"(",
"self",
".",
"context",
".",
"last_pkt",
")"
] | Resend the last sent packet due to a timeout. | [
"Resend",
"the",
"last",
"sent",
"packet",
"due",
"to",
"a",
"timeout",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L153-L168 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpState.handleDat | def handleDat(self, pkt):
"""This method handles a DAT packet during a client download, or a
server upload."""
log.info("Handling DAT packet - block %d" % pkt.blocknumber)
log.debug("Expecting block %s", self.context.next_block)
if pkt.blocknumber == self.context.next_block:
log.debug("Good, received block %d in sequence", pkt.blocknumber)
self.sendACK()
self.context.next_block += 1
log.debug("Writing %d bytes to output file", len(pkt.data))
self.context.fileobj.write(pkt.data)
self.context.metrics.bytes += len(pkt.data)
# Check for end-of-file, any less than full data packet.
if len(pkt.data) < self.context.getBlocksize():
log.info("End of file detected")
return None
elif pkt.blocknumber < self.context.next_block:
if pkt.blocknumber == 0:
log.warning("There is no block zero!")
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("There is no block zero!")
log.warning("Dropping duplicate block %d" % pkt.blocknumber)
self.context.metrics.add_dup(pkt)
log.debug("ACKing block %d again, just in case", pkt.blocknumber)
self.sendACK(pkt.blocknumber)
else:
# FIXME: should we be more tolerant and just discard instead?
msg = "Whoa! Received future block %d but expected %d" \
% (pkt.blocknumber, self.context.next_block)
log.error(msg)
raise TftpException(msg)
# Default is to ack
return TftpStateExpectDAT(self.context) | python | def handleDat(self, pkt):
"""This method handles a DAT packet during a client download, or a
server upload."""
log.info("Handling DAT packet - block %d" % pkt.blocknumber)
log.debug("Expecting block %s", self.context.next_block)
if pkt.blocknumber == self.context.next_block:
log.debug("Good, received block %d in sequence", pkt.blocknumber)
self.sendACK()
self.context.next_block += 1
log.debug("Writing %d bytes to output file", len(pkt.data))
self.context.fileobj.write(pkt.data)
self.context.metrics.bytes += len(pkt.data)
# Check for end-of-file, any less than full data packet.
if len(pkt.data) < self.context.getBlocksize():
log.info("End of file detected")
return None
elif pkt.blocknumber < self.context.next_block:
if pkt.blocknumber == 0:
log.warning("There is no block zero!")
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("There is no block zero!")
log.warning("Dropping duplicate block %d" % pkt.blocknumber)
self.context.metrics.add_dup(pkt)
log.debug("ACKing block %d again, just in case", pkt.blocknumber)
self.sendACK(pkt.blocknumber)
else:
# FIXME: should we be more tolerant and just discard instead?
msg = "Whoa! Received future block %d but expected %d" \
% (pkt.blocknumber, self.context.next_block)
log.error(msg)
raise TftpException(msg)
# Default is to ack
return TftpStateExpectDAT(self.context) | [
"def",
"handleDat",
"(",
"self",
",",
"pkt",
")",
":",
"log",
".",
"info",
"(",
"\"Handling DAT packet - block %d\"",
"%",
"pkt",
".",
"blocknumber",
")",
"log",
".",
"debug",
"(",
"\"Expecting block %s\"",
",",
"self",
".",
"context",
".",
"next_block",
")",
"if",
"pkt",
".",
"blocknumber",
"==",
"self",
".",
"context",
".",
"next_block",
":",
"log",
".",
"debug",
"(",
"\"Good, received block %d in sequence\"",
",",
"pkt",
".",
"blocknumber",
")",
"self",
".",
"sendACK",
"(",
")",
"self",
".",
"context",
".",
"next_block",
"+=",
"1",
"log",
".",
"debug",
"(",
"\"Writing %d bytes to output file\"",
",",
"len",
"(",
"pkt",
".",
"data",
")",
")",
"self",
".",
"context",
".",
"fileobj",
".",
"write",
"(",
"pkt",
".",
"data",
")",
"self",
".",
"context",
".",
"metrics",
".",
"bytes",
"+=",
"len",
"(",
"pkt",
".",
"data",
")",
"# Check for end-of-file, any less than full data packet.",
"if",
"len",
"(",
"pkt",
".",
"data",
")",
"<",
"self",
".",
"context",
".",
"getBlocksize",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"End of file detected\"",
")",
"return",
"None",
"elif",
"pkt",
".",
"blocknumber",
"<",
"self",
".",
"context",
".",
"next_block",
":",
"if",
"pkt",
".",
"blocknumber",
"==",
"0",
":",
"log",
".",
"warning",
"(",
"\"There is no block zero!\"",
")",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"There is no block zero!\"",
")",
"log",
".",
"warning",
"(",
"\"Dropping duplicate block %d\"",
"%",
"pkt",
".",
"blocknumber",
")",
"self",
".",
"context",
".",
"metrics",
".",
"add_dup",
"(",
"pkt",
")",
"log",
".",
"debug",
"(",
"\"ACKing block %d again, just in case\"",
",",
"pkt",
".",
"blocknumber",
")",
"self",
".",
"sendACK",
"(",
"pkt",
".",
"blocknumber",
")",
"else",
":",
"# FIXME: should we be more tolerant and just discard instead?",
"msg",
"=",
"\"Whoa! Received future block %d but expected %d\"",
"%",
"(",
"pkt",
".",
"blocknumber",
",",
"self",
".",
"context",
".",
"next_block",
")",
"log",
".",
"error",
"(",
"msg",
")",
"raise",
"TftpException",
"(",
"msg",
")",
"# Default is to ack",
"return",
"TftpStateExpectDAT",
"(",
"self",
".",
"context",
")"
] | This method handles a DAT packet during a client download, or a
server upload. | [
"This",
"method",
"handles",
"a",
"DAT",
"packet",
"during",
"a",
"client",
"download",
"or",
"a",
"server",
"upload",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L170-L207 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpServerState.serverInitial | def serverInitial(self, pkt, raddress, rport):
"""This method performs initial setup for a server context transfer,
put here to refactor code out of the TftpStateServerRecvRRQ and
TftpStateServerRecvWRQ classes, since their initial setup is
identical. The method returns a boolean, sendoack, to indicate whether
it is required to send an OACK to the client."""
options = pkt.options
sendoack = False
if not self.context.tidport:
self.context.tidport = rport
log.info("Setting tidport to %s" % rport)
log.debug("Setting default options, blksize")
self.context.options = { 'blksize': DEF_BLKSIZE }
if options:
log.debug("Options requested: %s", options)
supported_options = self.returnSupportedOptions(options)
self.context.options.update(supported_options)
sendoack = True
# FIXME - only octet mode is supported at this time.
if pkt.mode != 'octet':
#self.sendError(TftpErrors.IllegalTftpOp)
#raise TftpException("Only octet transfers are supported at this time.")
log.warning("Received non-octet mode request. I'll reply with binary data.")
# test host/port of client end
if self.context.host != raddress or self.context.port != rport:
self.sendError(TftpErrors.UnknownTID)
log.error("Expected traffic from %s:%s but received it "
"from %s:%s instead."
% (self.context.host,
self.context.port,
raddress,
rport))
# FIXME: increment an error count?
# Return same state, we're still waiting for valid traffic.
return self
log.debug("Requested filename is %s", pkt.filename)
# Build the filename on this server and ensure it is contained
# in the specified root directory.
#
# Filenames that begin with server root are accepted. It's
# assumed the client and server are tightly connected and this
# provides backwards compatibility.
#
# Filenames otherwise are relative to the server root. If they
# begin with a '/' strip it off as otherwise os.path.join will
# treat it as absolute (regardless of whether it is ntpath or
# posixpath module
if pkt.filename.startswith(self.context.root):
full_path = pkt.filename
else:
full_path = os.path.join(self.context.root, pkt.filename.lstrip('/'))
# Use abspath to eliminate any remaining relative elements
# (e.g. '..') and ensure that is still within the server's
# root directory
self.full_path = os.path.abspath(full_path)
log.debug("full_path is %s", full_path)
if self.full_path.startswith(self.context.root):
log.info("requested file is in the server root - good")
else:
log.warning("requested file is not within the server root - bad")
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("bad file path")
self.context.file_to_transfer = pkt.filename
return sendoack | python | def serverInitial(self, pkt, raddress, rport):
"""This method performs initial setup for a server context transfer,
put here to refactor code out of the TftpStateServerRecvRRQ and
TftpStateServerRecvWRQ classes, since their initial setup is
identical. The method returns a boolean, sendoack, to indicate whether
it is required to send an OACK to the client."""
options = pkt.options
sendoack = False
if not self.context.tidport:
self.context.tidport = rport
log.info("Setting tidport to %s" % rport)
log.debug("Setting default options, blksize")
self.context.options = { 'blksize': DEF_BLKSIZE }
if options:
log.debug("Options requested: %s", options)
supported_options = self.returnSupportedOptions(options)
self.context.options.update(supported_options)
sendoack = True
# FIXME - only octet mode is supported at this time.
if pkt.mode != 'octet':
#self.sendError(TftpErrors.IllegalTftpOp)
#raise TftpException("Only octet transfers are supported at this time.")
log.warning("Received non-octet mode request. I'll reply with binary data.")
# test host/port of client end
if self.context.host != raddress or self.context.port != rport:
self.sendError(TftpErrors.UnknownTID)
log.error("Expected traffic from %s:%s but received it "
"from %s:%s instead."
% (self.context.host,
self.context.port,
raddress,
rport))
# FIXME: increment an error count?
# Return same state, we're still waiting for valid traffic.
return self
log.debug("Requested filename is %s", pkt.filename)
# Build the filename on this server and ensure it is contained
# in the specified root directory.
#
# Filenames that begin with server root are accepted. It's
# assumed the client and server are tightly connected and this
# provides backwards compatibility.
#
# Filenames otherwise are relative to the server root. If they
# begin with a '/' strip it off as otherwise os.path.join will
# treat it as absolute (regardless of whether it is ntpath or
# posixpath module
if pkt.filename.startswith(self.context.root):
full_path = pkt.filename
else:
full_path = os.path.join(self.context.root, pkt.filename.lstrip('/'))
# Use abspath to eliminate any remaining relative elements
# (e.g. '..') and ensure that is still within the server's
# root directory
self.full_path = os.path.abspath(full_path)
log.debug("full_path is %s", full_path)
if self.full_path.startswith(self.context.root):
log.info("requested file is in the server root - good")
else:
log.warning("requested file is not within the server root - bad")
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("bad file path")
self.context.file_to_transfer = pkt.filename
return sendoack | [
"def",
"serverInitial",
"(",
"self",
",",
"pkt",
",",
"raddress",
",",
"rport",
")",
":",
"options",
"=",
"pkt",
".",
"options",
"sendoack",
"=",
"False",
"if",
"not",
"self",
".",
"context",
".",
"tidport",
":",
"self",
".",
"context",
".",
"tidport",
"=",
"rport",
"log",
".",
"info",
"(",
"\"Setting tidport to %s\"",
"%",
"rport",
")",
"log",
".",
"debug",
"(",
"\"Setting default options, blksize\"",
")",
"self",
".",
"context",
".",
"options",
"=",
"{",
"'blksize'",
":",
"DEF_BLKSIZE",
"}",
"if",
"options",
":",
"log",
".",
"debug",
"(",
"\"Options requested: %s\"",
",",
"options",
")",
"supported_options",
"=",
"self",
".",
"returnSupportedOptions",
"(",
"options",
")",
"self",
".",
"context",
".",
"options",
".",
"update",
"(",
"supported_options",
")",
"sendoack",
"=",
"True",
"# FIXME - only octet mode is supported at this time.",
"if",
"pkt",
".",
"mode",
"!=",
"'octet'",
":",
"#self.sendError(TftpErrors.IllegalTftpOp)",
"#raise TftpException(\"Only octet transfers are supported at this time.\")",
"log",
".",
"warning",
"(",
"\"Received non-octet mode request. I'll reply with binary data.\"",
")",
"# test host/port of client end",
"if",
"self",
".",
"context",
".",
"host",
"!=",
"raddress",
"or",
"self",
".",
"context",
".",
"port",
"!=",
"rport",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"UnknownTID",
")",
"log",
".",
"error",
"(",
"\"Expected traffic from %s:%s but received it \"",
"\"from %s:%s instead.\"",
"%",
"(",
"self",
".",
"context",
".",
"host",
",",
"self",
".",
"context",
".",
"port",
",",
"raddress",
",",
"rport",
")",
")",
"# FIXME: increment an error count?",
"# Return same state, we're still waiting for valid traffic.",
"return",
"self",
"log",
".",
"debug",
"(",
"\"Requested filename is %s\"",
",",
"pkt",
".",
"filename",
")",
"# Build the filename on this server and ensure it is contained",
"# in the specified root directory.",
"#",
"# Filenames that begin with server root are accepted. It's",
"# assumed the client and server are tightly connected and this",
"# provides backwards compatibility.",
"#",
"# Filenames otherwise are relative to the server root. If they",
"# begin with a '/' strip it off as otherwise os.path.join will",
"# treat it as absolute (regardless of whether it is ntpath or",
"# posixpath module",
"if",
"pkt",
".",
"filename",
".",
"startswith",
"(",
"self",
".",
"context",
".",
"root",
")",
":",
"full_path",
"=",
"pkt",
".",
"filename",
"else",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"context",
".",
"root",
",",
"pkt",
".",
"filename",
".",
"lstrip",
"(",
"'/'",
")",
")",
"# Use abspath to eliminate any remaining relative elements",
"# (e.g. '..') and ensure that is still within the server's",
"# root directory",
"self",
".",
"full_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"full_path",
")",
"log",
".",
"debug",
"(",
"\"full_path is %s\"",
",",
"full_path",
")",
"if",
"self",
".",
"full_path",
".",
"startswith",
"(",
"self",
".",
"context",
".",
"root",
")",
":",
"log",
".",
"info",
"(",
"\"requested file is in the server root - good\"",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"requested file is not within the server root - bad\"",
")",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"bad file path\"",
")",
"self",
".",
"context",
".",
"file_to_transfer",
"=",
"pkt",
".",
"filename",
"return",
"sendoack"
] | This method performs initial setup for a server context transfer,
put here to refactor code out of the TftpStateServerRecvRRQ and
TftpStateServerRecvWRQ classes, since their initial setup is
identical. The method returns a boolean, sendoack, to indicate whether
it is required to send an OACK to the client. | [
"This",
"method",
"performs",
"initial",
"setup",
"for",
"a",
"server",
"context",
"transfer",
"put",
"here",
"to",
"refactor",
"code",
"out",
"of",
"the",
"TftpStateServerRecvRRQ",
"and",
"TftpStateServerRecvWRQ",
"classes",
"since",
"their",
"initial",
"setup",
"is",
"identical",
".",
"The",
"method",
"returns",
"a",
"boolean",
"sendoack",
"to",
"indicate",
"whether",
"it",
"is",
"required",
"to",
"send",
"an",
"OACK",
"to",
"the",
"client",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L219-L291 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpStateServerRecvRRQ.handle | def handle(self, pkt, raddress, rport):
"Handle an initial RRQ packet as a server."
log.debug("In TftpStateServerRecvRRQ.handle")
sendoack = self.serverInitial(pkt, raddress, rport)
path = self.full_path
log.info("Opening file %s for reading" % path)
if os.path.exists(path):
# Note: Open in binary mode for win32 portability, since win32
# blows.
self.context.fileobj = open(path, "rb")
elif self.context.dyn_file_func:
log.debug("No such file %s but using dyn_file_func", path)
self.context.fileobj = \
self.context.dyn_file_func(self.context.file_to_transfer, raddress=raddress, rport=rport)
if self.context.fileobj is None:
log.debug("dyn_file_func returned 'None', treating as "
"FileNotFound")
self.sendError(TftpErrors.FileNotFound)
raise TftpException("File not found: %s" % path)
else:
log.warn("File not found: %s", path)
self.sendError(TftpErrors.FileNotFound)
raise TftpException("File not found: {}".format(path))
# Options negotiation.
if sendoack and 'tsize' in self.context.options:
# getting the file size for the tsize option. As we handle
# file-like objects and not only real files, we use this seeking
# method instead of asking the OS
self.context.fileobj.seek(0, os.SEEK_END)
tsize = str(self.context.fileobj.tell())
self.context.fileobj.seek(0, 0)
self.context.options['tsize'] = tsize
if sendoack:
# Note, next_block is 0 here since that's the proper
# acknowledgement to an OACK.
# FIXME: perhaps we do need a TftpStateExpectOACK class...
self.sendOACK()
# Note, self.context.next_block is already 0.
else:
self.context.next_block = 1
log.debug("No requested options, starting send...")
self.context.pending_complete = self.sendDAT()
# Note, we expect an ack regardless of whether we sent a DAT or an
# OACK.
return TftpStateExpectACK(self.context) | python | def handle(self, pkt, raddress, rport):
"Handle an initial RRQ packet as a server."
log.debug("In TftpStateServerRecvRRQ.handle")
sendoack = self.serverInitial(pkt, raddress, rport)
path = self.full_path
log.info("Opening file %s for reading" % path)
if os.path.exists(path):
# Note: Open in binary mode for win32 portability, since win32
# blows.
self.context.fileobj = open(path, "rb")
elif self.context.dyn_file_func:
log.debug("No such file %s but using dyn_file_func", path)
self.context.fileobj = \
self.context.dyn_file_func(self.context.file_to_transfer, raddress=raddress, rport=rport)
if self.context.fileobj is None:
log.debug("dyn_file_func returned 'None', treating as "
"FileNotFound")
self.sendError(TftpErrors.FileNotFound)
raise TftpException("File not found: %s" % path)
else:
log.warn("File not found: %s", path)
self.sendError(TftpErrors.FileNotFound)
raise TftpException("File not found: {}".format(path))
# Options negotiation.
if sendoack and 'tsize' in self.context.options:
# getting the file size for the tsize option. As we handle
# file-like objects and not only real files, we use this seeking
# method instead of asking the OS
self.context.fileobj.seek(0, os.SEEK_END)
tsize = str(self.context.fileobj.tell())
self.context.fileobj.seek(0, 0)
self.context.options['tsize'] = tsize
if sendoack:
# Note, next_block is 0 here since that's the proper
# acknowledgement to an OACK.
# FIXME: perhaps we do need a TftpStateExpectOACK class...
self.sendOACK()
# Note, self.context.next_block is already 0.
else:
self.context.next_block = 1
log.debug("No requested options, starting send...")
self.context.pending_complete = self.sendDAT()
# Note, we expect an ack regardless of whether we sent a DAT or an
# OACK.
return TftpStateExpectACK(self.context) | [
"def",
"handle",
"(",
"self",
",",
"pkt",
",",
"raddress",
",",
"rport",
")",
":",
"log",
".",
"debug",
"(",
"\"In TftpStateServerRecvRRQ.handle\"",
")",
"sendoack",
"=",
"self",
".",
"serverInitial",
"(",
"pkt",
",",
"raddress",
",",
"rport",
")",
"path",
"=",
"self",
".",
"full_path",
"log",
".",
"info",
"(",
"\"Opening file %s for reading\"",
"%",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"# Note: Open in binary mode for win32 portability, since win32",
"# blows.",
"self",
".",
"context",
".",
"fileobj",
"=",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"elif",
"self",
".",
"context",
".",
"dyn_file_func",
":",
"log",
".",
"debug",
"(",
"\"No such file %s but using dyn_file_func\"",
",",
"path",
")",
"self",
".",
"context",
".",
"fileobj",
"=",
"self",
".",
"context",
".",
"dyn_file_func",
"(",
"self",
".",
"context",
".",
"file_to_transfer",
",",
"raddress",
"=",
"raddress",
",",
"rport",
"=",
"rport",
")",
"if",
"self",
".",
"context",
".",
"fileobj",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"\"dyn_file_func returned 'None', treating as \"",
"\"FileNotFound\"",
")",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"FileNotFound",
")",
"raise",
"TftpException",
"(",
"\"File not found: %s\"",
"%",
"path",
")",
"else",
":",
"log",
".",
"warn",
"(",
"\"File not found: %s\"",
",",
"path",
")",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"FileNotFound",
")",
"raise",
"TftpException",
"(",
"\"File not found: {}\"",
".",
"format",
"(",
"path",
")",
")",
"# Options negotiation.",
"if",
"sendoack",
"and",
"'tsize'",
"in",
"self",
".",
"context",
".",
"options",
":",
"# getting the file size for the tsize option. As we handle",
"# file-like objects and not only real files, we use this seeking",
"# method instead of asking the OS",
"self",
".",
"context",
".",
"fileobj",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_END",
")",
"tsize",
"=",
"str",
"(",
"self",
".",
"context",
".",
"fileobj",
".",
"tell",
"(",
")",
")",
"self",
".",
"context",
".",
"fileobj",
".",
"seek",
"(",
"0",
",",
"0",
")",
"self",
".",
"context",
".",
"options",
"[",
"'tsize'",
"]",
"=",
"tsize",
"if",
"sendoack",
":",
"# Note, next_block is 0 here since that's the proper",
"# acknowledgement to an OACK.",
"# FIXME: perhaps we do need a TftpStateExpectOACK class...",
"self",
".",
"sendOACK",
"(",
")",
"# Note, self.context.next_block is already 0.",
"else",
":",
"self",
".",
"context",
".",
"next_block",
"=",
"1",
"log",
".",
"debug",
"(",
"\"No requested options, starting send...\"",
")",
"self",
".",
"context",
".",
"pending_complete",
"=",
"self",
".",
"sendDAT",
"(",
")",
"# Note, we expect an ack regardless of whether we sent a DAT or an",
"# OACK.",
"return",
"TftpStateExpectACK",
"(",
"self",
".",
"context",
")"
] | Handle an initial RRQ packet as a server. | [
"Handle",
"an",
"initial",
"RRQ",
"packet",
"as",
"a",
"server",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L297-L344 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpStateServerRecvWRQ.make_subdirs | def make_subdirs(self):
"""The purpose of this method is to, if necessary, create all of the
subdirectories leading up to the file to the written."""
# Pull off everything below the root.
subpath = self.full_path[len(self.context.root):]
log.debug("make_subdirs: subpath is %s", subpath)
# Split on directory separators, but drop the last one, as it should
# be the filename.
dirs = subpath.split(os.sep)[:-1]
log.debug("dirs is %s", dirs)
current = self.context.root
for dir in dirs:
if dir:
current = os.path.join(current, dir)
if os.path.isdir(current):
log.debug("%s is already an existing directory", current)
else:
os.mkdir(current, 0o700) | python | def make_subdirs(self):
"""The purpose of this method is to, if necessary, create all of the
subdirectories leading up to the file to the written."""
# Pull off everything below the root.
subpath = self.full_path[len(self.context.root):]
log.debug("make_subdirs: subpath is %s", subpath)
# Split on directory separators, but drop the last one, as it should
# be the filename.
dirs = subpath.split(os.sep)[:-1]
log.debug("dirs is %s", dirs)
current = self.context.root
for dir in dirs:
if dir:
current = os.path.join(current, dir)
if os.path.isdir(current):
log.debug("%s is already an existing directory", current)
else:
os.mkdir(current, 0o700) | [
"def",
"make_subdirs",
"(",
"self",
")",
":",
"# Pull off everything below the root.",
"subpath",
"=",
"self",
".",
"full_path",
"[",
"len",
"(",
"self",
".",
"context",
".",
"root",
")",
":",
"]",
"log",
".",
"debug",
"(",
"\"make_subdirs: subpath is %s\"",
",",
"subpath",
")",
"# Split on directory separators, but drop the last one, as it should",
"# be the filename.",
"dirs",
"=",
"subpath",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
":",
"-",
"1",
"]",
"log",
".",
"debug",
"(",
"\"dirs is %s\"",
",",
"dirs",
")",
"current",
"=",
"self",
".",
"context",
".",
"root",
"for",
"dir",
"in",
"dirs",
":",
"if",
"dir",
":",
"current",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current",
",",
"dir",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"current",
")",
":",
"log",
".",
"debug",
"(",
"\"%s is already an existing directory\"",
",",
"current",
")",
"else",
":",
"os",
".",
"mkdir",
"(",
"current",
",",
"0o700",
")"
] | The purpose of this method is to, if necessary, create all of the
subdirectories leading up to the file to the written. | [
"The",
"purpose",
"of",
"this",
"method",
"is",
"to",
"if",
"necessary",
"create",
"all",
"of",
"the",
"subdirectories",
"leading",
"up",
"to",
"the",
"file",
"to",
"the",
"written",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L352-L369 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpStateServerRecvWRQ.handle | def handle(self, pkt, raddress, rport):
"Handle an initial WRQ packet as a server."
log.debug("In TftpStateServerRecvWRQ.handle")
sendoack = self.serverInitial(pkt, raddress, rport)
path = self.full_path
if self.context.upload_open:
f = self.context.upload_open(path, self.context)
if f is None:
self.sendError(TftpErrors.AccessViolation)
raise TftpException("Dynamic path %s not permitted" % path)
else:
self.context.fileobj = f
else:
log.info("Opening file %s for writing" % path)
if os.path.exists(path):
# FIXME: correct behavior?
log.warning("File %s exists already, overwriting..." % (
self.context.file_to_transfer))
# FIXME: I think we should upload to a temp file and not overwrite
# the existing file until the file is successfully uploaded.
self.make_subdirs()
self.context.fileobj = open(path, "wb")
# Options negotiation.
if sendoack:
log.debug("Sending OACK to client")
self.sendOACK()
else:
log.debug("No requested options, expecting transfer to begin...")
self.sendACK()
# Whether we're sending an oack or not, we're expecting a DAT for
# block 1
self.context.next_block = 1
# We may have sent an OACK, but we're expecting a DAT as the response
# to either the OACK or an ACK, so lets unconditionally use the
# TftpStateExpectDAT state.
return TftpStateExpectDAT(self.context) | python | def handle(self, pkt, raddress, rport):
"Handle an initial WRQ packet as a server."
log.debug("In TftpStateServerRecvWRQ.handle")
sendoack = self.serverInitial(pkt, raddress, rport)
path = self.full_path
if self.context.upload_open:
f = self.context.upload_open(path, self.context)
if f is None:
self.sendError(TftpErrors.AccessViolation)
raise TftpException("Dynamic path %s not permitted" % path)
else:
self.context.fileobj = f
else:
log.info("Opening file %s for writing" % path)
if os.path.exists(path):
# FIXME: correct behavior?
log.warning("File %s exists already, overwriting..." % (
self.context.file_to_transfer))
# FIXME: I think we should upload to a temp file and not overwrite
# the existing file until the file is successfully uploaded.
self.make_subdirs()
self.context.fileobj = open(path, "wb")
# Options negotiation.
if sendoack:
log.debug("Sending OACK to client")
self.sendOACK()
else:
log.debug("No requested options, expecting transfer to begin...")
self.sendACK()
# Whether we're sending an oack or not, we're expecting a DAT for
# block 1
self.context.next_block = 1
# We may have sent an OACK, but we're expecting a DAT as the response
# to either the OACK or an ACK, so lets unconditionally use the
# TftpStateExpectDAT state.
return TftpStateExpectDAT(self.context) | [
"def",
"handle",
"(",
"self",
",",
"pkt",
",",
"raddress",
",",
"rport",
")",
":",
"log",
".",
"debug",
"(",
"\"In TftpStateServerRecvWRQ.handle\"",
")",
"sendoack",
"=",
"self",
".",
"serverInitial",
"(",
"pkt",
",",
"raddress",
",",
"rport",
")",
"path",
"=",
"self",
".",
"full_path",
"if",
"self",
".",
"context",
".",
"upload_open",
":",
"f",
"=",
"self",
".",
"context",
".",
"upload_open",
"(",
"path",
",",
"self",
".",
"context",
")",
"if",
"f",
"is",
"None",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"AccessViolation",
")",
"raise",
"TftpException",
"(",
"\"Dynamic path %s not permitted\"",
"%",
"path",
")",
"else",
":",
"self",
".",
"context",
".",
"fileobj",
"=",
"f",
"else",
":",
"log",
".",
"info",
"(",
"\"Opening file %s for writing\"",
"%",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"# FIXME: correct behavior?",
"log",
".",
"warning",
"(",
"\"File %s exists already, overwriting...\"",
"%",
"(",
"self",
".",
"context",
".",
"file_to_transfer",
")",
")",
"# FIXME: I think we should upload to a temp file and not overwrite",
"# the existing file until the file is successfully uploaded.",
"self",
".",
"make_subdirs",
"(",
")",
"self",
".",
"context",
".",
"fileobj",
"=",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"# Options negotiation.",
"if",
"sendoack",
":",
"log",
".",
"debug",
"(",
"\"Sending OACK to client\"",
")",
"self",
".",
"sendOACK",
"(",
")",
"else",
":",
"log",
".",
"debug",
"(",
"\"No requested options, expecting transfer to begin...\"",
")",
"self",
".",
"sendACK",
"(",
")",
"# Whether we're sending an oack or not, we're expecting a DAT for",
"# block 1",
"self",
".",
"context",
".",
"next_block",
"=",
"1",
"# We may have sent an OACK, but we're expecting a DAT as the response",
"# to either the OACK or an ACK, so lets unconditionally use the",
"# TftpStateExpectDAT state.",
"return",
"TftpStateExpectDAT",
"(",
"self",
".",
"context",
")"
] | Handle an initial WRQ packet as a server. | [
"Handle",
"an",
"initial",
"WRQ",
"packet",
"as",
"a",
"server",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L371-L407 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpStateExpectACK.handle | def handle(self, pkt, raddress, rport):
"Handle a packet, hopefully an ACK since we just sent a DAT."
if isinstance(pkt, TftpPacketACK):
log.debug("Received ACK for packet %d" % pkt.blocknumber)
# Is this an ack to the one we just sent?
if self.context.next_block == pkt.blocknumber:
if self.context.pending_complete:
log.info("Received ACK to final DAT, we're done.")
return None
else:
log.debug("Good ACK, sending next DAT")
self.context.next_block += 1
log.debug("Incremented next_block to %d",
self.context.next_block)
self.context.pending_complete = self.sendDAT()
elif pkt.blocknumber < self.context.next_block:
log.warning("Received duplicate ACK for block %d"
% pkt.blocknumber)
self.context.metrics.add_dup(pkt)
else:
log.warning("Oooh, time warp. Received ACK to packet we "
"didn't send yet. Discarding.")
self.context.metrics.errors += 1
return self
elif isinstance(pkt, TftpPacketERR):
log.error("Received ERR packet from peer: %s" % str(pkt))
raise TftpException("Received ERR packet from peer: %s" % str(pkt))
else:
log.warning("Discarding unsupported packet: %s" % str(pkt))
return self | python | def handle(self, pkt, raddress, rport):
"Handle a packet, hopefully an ACK since we just sent a DAT."
if isinstance(pkt, TftpPacketACK):
log.debug("Received ACK for packet %d" % pkt.blocknumber)
# Is this an ack to the one we just sent?
if self.context.next_block == pkt.blocknumber:
if self.context.pending_complete:
log.info("Received ACK to final DAT, we're done.")
return None
else:
log.debug("Good ACK, sending next DAT")
self.context.next_block += 1
log.debug("Incremented next_block to %d",
self.context.next_block)
self.context.pending_complete = self.sendDAT()
elif pkt.blocknumber < self.context.next_block:
log.warning("Received duplicate ACK for block %d"
% pkt.blocknumber)
self.context.metrics.add_dup(pkt)
else:
log.warning("Oooh, time warp. Received ACK to packet we "
"didn't send yet. Discarding.")
self.context.metrics.errors += 1
return self
elif isinstance(pkt, TftpPacketERR):
log.error("Received ERR packet from peer: %s" % str(pkt))
raise TftpException("Received ERR packet from peer: %s" % str(pkt))
else:
log.warning("Discarding unsupported packet: %s" % str(pkt))
return self | [
"def",
"handle",
"(",
"self",
",",
"pkt",
",",
"raddress",
",",
"rport",
")",
":",
"if",
"isinstance",
"(",
"pkt",
",",
"TftpPacketACK",
")",
":",
"log",
".",
"debug",
"(",
"\"Received ACK for packet %d\"",
"%",
"pkt",
".",
"blocknumber",
")",
"# Is this an ack to the one we just sent?",
"if",
"self",
".",
"context",
".",
"next_block",
"==",
"pkt",
".",
"blocknumber",
":",
"if",
"self",
".",
"context",
".",
"pending_complete",
":",
"log",
".",
"info",
"(",
"\"Received ACK to final DAT, we're done.\"",
")",
"return",
"None",
"else",
":",
"log",
".",
"debug",
"(",
"\"Good ACK, sending next DAT\"",
")",
"self",
".",
"context",
".",
"next_block",
"+=",
"1",
"log",
".",
"debug",
"(",
"\"Incremented next_block to %d\"",
",",
"self",
".",
"context",
".",
"next_block",
")",
"self",
".",
"context",
".",
"pending_complete",
"=",
"self",
".",
"sendDAT",
"(",
")",
"elif",
"pkt",
".",
"blocknumber",
"<",
"self",
".",
"context",
".",
"next_block",
":",
"log",
".",
"warning",
"(",
"\"Received duplicate ACK for block %d\"",
"%",
"pkt",
".",
"blocknumber",
")",
"self",
".",
"context",
".",
"metrics",
".",
"add_dup",
"(",
"pkt",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"Oooh, time warp. Received ACK to packet we \"",
"\"didn't send yet. Discarding.\"",
")",
"self",
".",
"context",
".",
"metrics",
".",
"errors",
"+=",
"1",
"return",
"self",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketERR",
")",
":",
"log",
".",
"error",
"(",
"\"Received ERR packet from peer: %s\"",
"%",
"str",
"(",
"pkt",
")",
")",
"raise",
"TftpException",
"(",
"\"Received ERR packet from peer: %s\"",
"%",
"str",
"(",
"pkt",
")",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"Discarding unsupported packet: %s\"",
"%",
"str",
"(",
"pkt",
")",
")",
"return",
"self"
] | Handle a packet, hopefully an ACK since we just sent a DAT. | [
"Handle",
"a",
"packet",
"hopefully",
"an",
"ACK",
"since",
"we",
"just",
"sent",
"a",
"DAT",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L438-L469 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpStateExpectDAT.handle | def handle(self, pkt, raddress, rport):
"""Handle the packet in response to an ACK, which should be a DAT."""
if isinstance(pkt, TftpPacketDAT):
return self.handleDat(pkt)
# Every other packet type is a problem.
elif isinstance(pkt, TftpPacketACK):
# Umm, we ACK, you don't.
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received ACK from peer when expecting DAT")
elif isinstance(pkt, TftpPacketWRQ):
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received WRQ from peer when expecting DAT")
elif isinstance(pkt, TftpPacketERR):
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received ERR from peer: " + str(pkt))
else:
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received unknown packet type from peer: " + str(pkt)) | python | def handle(self, pkt, raddress, rport):
"""Handle the packet in response to an ACK, which should be a DAT."""
if isinstance(pkt, TftpPacketDAT):
return self.handleDat(pkt)
# Every other packet type is a problem.
elif isinstance(pkt, TftpPacketACK):
# Umm, we ACK, you don't.
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received ACK from peer when expecting DAT")
elif isinstance(pkt, TftpPacketWRQ):
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received WRQ from peer when expecting DAT")
elif isinstance(pkt, TftpPacketERR):
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received ERR from peer: " + str(pkt))
else:
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received unknown packet type from peer: " + str(pkt)) | [
"def",
"handle",
"(",
"self",
",",
"pkt",
",",
"raddress",
",",
"rport",
")",
":",
"if",
"isinstance",
"(",
"pkt",
",",
"TftpPacketDAT",
")",
":",
"return",
"self",
".",
"handleDat",
"(",
"pkt",
")",
"# Every other packet type is a problem.",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketACK",
")",
":",
"# Umm, we ACK, you don't.",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received ACK from peer when expecting DAT\"",
")",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketWRQ",
")",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received WRQ from peer when expecting DAT\"",
")",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketERR",
")",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received ERR from peer: \"",
"+",
"str",
"(",
"pkt",
")",
")",
"else",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received unknown packet type from peer: \"",
"+",
"str",
"(",
"pkt",
")",
")"
] | Handle the packet in response to an ACK, which should be a DAT. | [
"Handle",
"the",
"packet",
"in",
"response",
"to",
"an",
"ACK",
"which",
"should",
"be",
"a",
"DAT",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L473-L494 | train |
msoulier/tftpy | tftpy/TftpStates.py | TftpStateSentRRQ.handle | def handle(self, pkt, raddress, rport):
"""Handle the packet in response to an RRQ to the server."""
if not self.context.tidport:
self.context.tidport = rport
log.info("Set remote port for session to %s" % rport)
# Now check the packet type and dispatch it properly.
if isinstance(pkt, TftpPacketOACK):
log.info("Received OACK from server")
try:
self.handleOACK(pkt)
except TftpException as err:
log.error("Failed to negotiate options: %s" % str(err))
self.sendError(TftpErrors.FailedNegotiation)
raise
else:
log.debug("Sending ACK to OACK")
self.sendACK(blocknumber=0)
log.debug("Changing state to TftpStateExpectDAT")
return TftpStateExpectDAT(self.context)
elif isinstance(pkt, TftpPacketDAT):
# If there are any options set, then the server didn't honour any
# of them.
log.info("Received DAT from server")
if self.context.options:
log.info("Server ignored options, falling back to defaults")
self.context.options = { 'blksize': DEF_BLKSIZE }
return self.handleDat(pkt)
# Every other packet type is a problem.
elif isinstance(pkt, TftpPacketACK):
# Umm, we ACK, the server doesn't.
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received ACK from server while in download")
elif isinstance(pkt, TftpPacketWRQ):
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received WRQ from server while in download")
elif isinstance(pkt, TftpPacketERR):
self.sendError(TftpErrors.IllegalTftpOp)
log.debug("Received ERR packet: %s", pkt)
if pkt.errorcode == TftpErrors.FileNotFound:
raise TftpFileNotFoundError("File not found")
else:
raise TftpException("Received ERR from server: {}".format(pkt))
else:
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received unknown packet type from server: %s" % pkt)
# By default, no state change.
return self | python | def handle(self, pkt, raddress, rport):
"""Handle the packet in response to an RRQ to the server."""
if not self.context.tidport:
self.context.tidport = rport
log.info("Set remote port for session to %s" % rport)
# Now check the packet type and dispatch it properly.
if isinstance(pkt, TftpPacketOACK):
log.info("Received OACK from server")
try:
self.handleOACK(pkt)
except TftpException as err:
log.error("Failed to negotiate options: %s" % str(err))
self.sendError(TftpErrors.FailedNegotiation)
raise
else:
log.debug("Sending ACK to OACK")
self.sendACK(blocknumber=0)
log.debug("Changing state to TftpStateExpectDAT")
return TftpStateExpectDAT(self.context)
elif isinstance(pkt, TftpPacketDAT):
# If there are any options set, then the server didn't honour any
# of them.
log.info("Received DAT from server")
if self.context.options:
log.info("Server ignored options, falling back to defaults")
self.context.options = { 'blksize': DEF_BLKSIZE }
return self.handleDat(pkt)
# Every other packet type is a problem.
elif isinstance(pkt, TftpPacketACK):
# Umm, we ACK, the server doesn't.
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received ACK from server while in download")
elif isinstance(pkt, TftpPacketWRQ):
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received WRQ from server while in download")
elif isinstance(pkt, TftpPacketERR):
self.sendError(TftpErrors.IllegalTftpOp)
log.debug("Received ERR packet: %s", pkt)
if pkt.errorcode == TftpErrors.FileNotFound:
raise TftpFileNotFoundError("File not found")
else:
raise TftpException("Received ERR from server: {}".format(pkt))
else:
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Received unknown packet type from server: %s" % pkt)
# By default, no state change.
return self | [
"def",
"handle",
"(",
"self",
",",
"pkt",
",",
"raddress",
",",
"rport",
")",
":",
"if",
"not",
"self",
".",
"context",
".",
"tidport",
":",
"self",
".",
"context",
".",
"tidport",
"=",
"rport",
"log",
".",
"info",
"(",
"\"Set remote port for session to %s\"",
"%",
"rport",
")",
"# Now check the packet type and dispatch it properly.",
"if",
"isinstance",
"(",
"pkt",
",",
"TftpPacketOACK",
")",
":",
"log",
".",
"info",
"(",
"\"Received OACK from server\"",
")",
"try",
":",
"self",
".",
"handleOACK",
"(",
"pkt",
")",
"except",
"TftpException",
"as",
"err",
":",
"log",
".",
"error",
"(",
"\"Failed to negotiate options: %s\"",
"%",
"str",
"(",
"err",
")",
")",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"FailedNegotiation",
")",
"raise",
"else",
":",
"log",
".",
"debug",
"(",
"\"Sending ACK to OACK\"",
")",
"self",
".",
"sendACK",
"(",
"blocknumber",
"=",
"0",
")",
"log",
".",
"debug",
"(",
"\"Changing state to TftpStateExpectDAT\"",
")",
"return",
"TftpStateExpectDAT",
"(",
"self",
".",
"context",
")",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketDAT",
")",
":",
"# If there are any options set, then the server didn't honour any",
"# of them.",
"log",
".",
"info",
"(",
"\"Received DAT from server\"",
")",
"if",
"self",
".",
"context",
".",
"options",
":",
"log",
".",
"info",
"(",
"\"Server ignored options, falling back to defaults\"",
")",
"self",
".",
"context",
".",
"options",
"=",
"{",
"'blksize'",
":",
"DEF_BLKSIZE",
"}",
"return",
"self",
".",
"handleDat",
"(",
"pkt",
")",
"# Every other packet type is a problem.",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketACK",
")",
":",
"# Umm, we ACK, the server doesn't.",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received ACK from server while in download\"",
")",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketWRQ",
")",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received WRQ from server while in download\"",
")",
"elif",
"isinstance",
"(",
"pkt",
",",
"TftpPacketERR",
")",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"log",
".",
"debug",
"(",
"\"Received ERR packet: %s\"",
",",
"pkt",
")",
"if",
"pkt",
".",
"errorcode",
"==",
"TftpErrors",
".",
"FileNotFound",
":",
"raise",
"TftpFileNotFoundError",
"(",
"\"File not found\"",
")",
"else",
":",
"raise",
"TftpException",
"(",
"\"Received ERR from server: {}\"",
".",
"format",
"(",
"pkt",
")",
")",
"else",
":",
"self",
".",
"sendError",
"(",
"TftpErrors",
".",
"IllegalTftpOp",
")",
"raise",
"TftpException",
"(",
"\"Received unknown packet type from server: %s\"",
"%",
"pkt",
")",
"# By default, no state change.",
"return",
"self"
] | Handle the packet in response to an RRQ to the server. | [
"Handle",
"the",
"packet",
"in",
"response",
"to",
"an",
"RRQ",
"to",
"the",
"server",
"."
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L556-L611 | train |
msoulier/tftpy | tftpy/TftpServer.py | TftpServer.stop | def stop(self, now=False):
"""Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT"""
if now:
self.shutdown_immediately = True
else:
self.shutdown_gracefully = True | python | def stop(self, now=False):
"""Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT"""
if now:
self.shutdown_immediately = True
else:
self.shutdown_gracefully = True | [
"def",
"stop",
"(",
"self",
",",
"now",
"=",
"False",
")",
":",
"if",
"now",
":",
"self",
".",
"shutdown_immediately",
"=",
"True",
"else",
":",
"self",
".",
"shutdown_gracefully",
"=",
"True"
] | Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT | [
"Stop",
"the",
"server",
"gracefully",
".",
"Do",
"not",
"take",
"any",
"new",
"transfers",
"but",
"complete",
"the",
"existing",
"ones",
".",
"If",
"force",
"is",
"True",
"drop",
"everything",
"and",
"stop",
".",
"Note",
"immediately",
"will",
"not",
"interrupt",
"the",
"select",
"loop",
"it",
"will",
"happen",
"when",
"the",
"server",
"returns",
"on",
"ready",
"data",
"or",
"a",
"timeout",
".",
"ie",
".",
"SOCK_TIMEOUT"
] | af2f2fe89a3bf45748b78703820efb0986a8207a | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpServer.py#L249-L258 | train |
avelkoski/FRB | fred/helpers/__init__.py | _fetch | def _fetch(url, ssl_verify = True):
"""
Helper funcation to fetch content from a given url.
"""
req = Request(url)
if ssl_verify:
page = urlopen(req)
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
page = urlopen(req, context=ctx)
content = page.read().decode('utf-8')
page.close()
return content | python | def _fetch(url, ssl_verify = True):
"""
Helper funcation to fetch content from a given url.
"""
req = Request(url)
if ssl_verify:
page = urlopen(req)
else:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
page = urlopen(req, context=ctx)
content = page.read().decode('utf-8')
page.close()
return content | [
"def",
"_fetch",
"(",
"url",
",",
"ssl_verify",
"=",
"True",
")",
":",
"req",
"=",
"Request",
"(",
"url",
")",
"if",
"ssl_verify",
":",
"page",
"=",
"urlopen",
"(",
"req",
")",
"else",
":",
"ctx",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"ctx",
".",
"check_hostname",
"=",
"False",
"ctx",
".",
"verify_mode",
"=",
"ssl",
".",
"CERT_NONE",
"page",
"=",
"urlopen",
"(",
"req",
",",
"context",
"=",
"ctx",
")",
"content",
"=",
"page",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"page",
".",
"close",
"(",
")",
"return",
"content"
] | Helper funcation to fetch content from a given url. | [
"Helper",
"funcation",
"to",
"fetch",
"content",
"from",
"a",
"given",
"url",
"."
] | 692bcf576e17bd1a81db2b7644f4f61aeb39e5c7 | https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L25-L40 | train |
avelkoski/FRB | fred/helpers/__init__.py | _dict | def _dict(content):
"""
Helper funcation that converts text-based get response
to a python dictionary for additional manipulation.
"""
if _has_pandas:
data = _data_frame(content).to_dict(orient='records')
else:
response = loads(content)
key = [x for x in response.keys() if x in c.response_data][0]
data = response[key]
return data | python | def _dict(content):
"""
Helper funcation that converts text-based get response
to a python dictionary for additional manipulation.
"""
if _has_pandas:
data = _data_frame(content).to_dict(orient='records')
else:
response = loads(content)
key = [x for x in response.keys() if x in c.response_data][0]
data = response[key]
return data | [
"def",
"_dict",
"(",
"content",
")",
":",
"if",
"_has_pandas",
":",
"data",
"=",
"_data_frame",
"(",
"content",
")",
".",
"to_dict",
"(",
"orient",
"=",
"'records'",
")",
"else",
":",
"response",
"=",
"loads",
"(",
"content",
")",
"key",
"=",
"[",
"x",
"for",
"x",
"in",
"response",
".",
"keys",
"(",
")",
"if",
"x",
"in",
"c",
".",
"response_data",
"]",
"[",
"0",
"]",
"data",
"=",
"response",
"[",
"key",
"]",
"return",
"data"
] | Helper funcation that converts text-based get response
to a python dictionary for additional manipulation. | [
"Helper",
"funcation",
"that",
"converts",
"text",
"-",
"based",
"get",
"response",
"to",
"a",
"python",
"dictionary",
"for",
"additional",
"manipulation",
"."
] | 692bcf576e17bd1a81db2b7644f4f61aeb39e5c7 | https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L61-L72 | train |
avelkoski/FRB | fred/helpers/__init__.py | _data_frame | def _data_frame(content):
"""
Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation.
"""
response = loads(content)
key = [x for x in response.keys() if x in c.response_data][0]
frame = DataFrame(response[key])
final_frame = _convert(frame)
return final_frame | python | def _data_frame(content):
"""
Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation.
"""
response = loads(content)
key = [x for x in response.keys() if x in c.response_data][0]
frame = DataFrame(response[key])
final_frame = _convert(frame)
return final_frame | [
"def",
"_data_frame",
"(",
"content",
")",
":",
"response",
"=",
"loads",
"(",
"content",
")",
"key",
"=",
"[",
"x",
"for",
"x",
"in",
"response",
".",
"keys",
"(",
")",
"if",
"x",
"in",
"c",
".",
"response_data",
"]",
"[",
"0",
"]",
"frame",
"=",
"DataFrame",
"(",
"response",
"[",
"key",
"]",
")",
"final_frame",
"=",
"_convert",
"(",
"frame",
")",
"return",
"final_frame"
] | Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation. | [
"Helper",
"funcation",
"that",
"converts",
"text",
"-",
"based",
"get",
"response",
"to",
"a",
"pandas",
"dataframe",
"for",
"additional",
"manipulation",
"."
] | 692bcf576e17bd1a81db2b7644f4f61aeb39e5c7 | https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L74-L83 | train |
avelkoski/FRB | fred/helpers/__init__.py | _tab | def _tab(content):
"""
Helper funcation that converts text-based get response
to tab separated values for additional manipulation.
"""
response = _data_frame(content).to_csv(index=False,sep='\t')
return response | python | def _tab(content):
"""
Helper funcation that converts text-based get response
to tab separated values for additional manipulation.
"""
response = _data_frame(content).to_csv(index=False,sep='\t')
return response | [
"def",
"_tab",
"(",
"content",
")",
":",
"response",
"=",
"_data_frame",
"(",
"content",
")",
".",
"to_csv",
"(",
"index",
"=",
"False",
",",
"sep",
"=",
"'\\t'",
")",
"return",
"response"
] | Helper funcation that converts text-based get response
to tab separated values for additional manipulation. | [
"Helper",
"funcation",
"that",
"converts",
"text",
"-",
"based",
"get",
"response",
"to",
"tab",
"separated",
"values",
"for",
"additional",
"manipulation",
"."
] | 692bcf576e17bd1a81db2b7644f4f61aeb39e5c7 | https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L93-L99 | train |
avelkoski/FRB | fred/helpers/__init__.py | _pipe | def _pipe(content):
"""
Helper funcation that converts text-based get response
to pipe separated values for additional manipulation.
"""
response = _data_frame(content).to_csv(index=False,sep='|')
return response | python | def _pipe(content):
"""
Helper funcation that converts text-based get response
to pipe separated values for additional manipulation.
"""
response = _data_frame(content).to_csv(index=False,sep='|')
return response | [
"def",
"_pipe",
"(",
"content",
")",
":",
"response",
"=",
"_data_frame",
"(",
"content",
")",
".",
"to_csv",
"(",
"index",
"=",
"False",
",",
"sep",
"=",
"'|'",
")",
"return",
"response"
] | Helper funcation that converts text-based get response
to pipe separated values for additional manipulation. | [
"Helper",
"funcation",
"that",
"converts",
"text",
"-",
"based",
"get",
"response",
"to",
"pipe",
"separated",
"values",
"for",
"additional",
"manipulation",
"."
] | 692bcf576e17bd1a81db2b7644f4f61aeb39e5c7 | https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L101-L107 | train |
avelkoski/FRB | fred/helpers/__init__.py | _get_request | def _get_request(url_root,api_key,path,response_type,params, ssl_verify):
"""
Helper funcation that requests a get response from FRED.
"""
url = _url_builder(url_root,api_key,path,params)
content = _fetch(url, ssl_verify)
response = _dispatch(response_type)(content)
return response | python | def _get_request(url_root,api_key,path,response_type,params, ssl_verify):
"""
Helper funcation that requests a get response from FRED.
"""
url = _url_builder(url_root,api_key,path,params)
content = _fetch(url, ssl_verify)
response = _dispatch(response_type)(content)
return response | [
"def",
"_get_request",
"(",
"url_root",
",",
"api_key",
",",
"path",
",",
"response_type",
",",
"params",
",",
"ssl_verify",
")",
":",
"url",
"=",
"_url_builder",
"(",
"url_root",
",",
"api_key",
",",
"path",
",",
"params",
")",
"content",
"=",
"_fetch",
"(",
"url",
",",
"ssl_verify",
")",
"response",
"=",
"_dispatch",
"(",
"response_type",
")",
"(",
"content",
")",
"return",
"response"
] | Helper funcation that requests a get response from FRED. | [
"Helper",
"funcation",
"that",
"requests",
"a",
"get",
"response",
"from",
"FRED",
"."
] | 692bcf576e17bd1a81db2b7644f4f61aeb39e5c7 | https://github.com/avelkoski/FRB/blob/692bcf576e17bd1a81db2b7644f4f61aeb39e5c7/fred/helpers/__init__.py#L141-L148 | train |
NicolasLM/atoma | atoma/atom.py | parse_atom_file | def parse_atom_file(filename: str) -> AtomFeed:
"""Parse an Atom feed from a local XML file."""
root = parse_xml(filename).getroot()
return _parse_atom(root) | python | def parse_atom_file(filename: str) -> AtomFeed:
"""Parse an Atom feed from a local XML file."""
root = parse_xml(filename).getroot()
return _parse_atom(root) | [
"def",
"parse_atom_file",
"(",
"filename",
":",
"str",
")",
"->",
"AtomFeed",
":",
"root",
"=",
"parse_xml",
"(",
"filename",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_atom",
"(",
"root",
")"
] | Parse an Atom feed from a local XML file. | [
"Parse",
"an",
"Atom",
"feed",
"from",
"a",
"local",
"XML",
"file",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/atom.py#L275-L278 | train |
NicolasLM/atoma | atoma/atom.py | parse_atom_bytes | def parse_atom_bytes(data: bytes) -> AtomFeed:
"""Parse an Atom feed from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_atom(root) | python | def parse_atom_bytes(data: bytes) -> AtomFeed:
"""Parse an Atom feed from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_atom(root) | [
"def",
"parse_atom_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"AtomFeed",
":",
"root",
"=",
"parse_xml",
"(",
"BytesIO",
"(",
"data",
")",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_atom",
"(",
"root",
")"
] | Parse an Atom feed from a byte-string containing XML data. | [
"Parse",
"an",
"Atom",
"feed",
"from",
"a",
"byte",
"-",
"string",
"containing",
"XML",
"data",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/atom.py#L281-L284 | train |
NicolasLM/atoma | atoma/rss.py | _get_link | def _get_link(element: Element) -> Optional[str]:
"""Attempt to retrieve item link.
Use the GUID as a fallback if it is a permalink.
"""
link = get_text(element, 'link')
if link is not None:
return link
guid = get_child(element, 'guid')
if guid is not None and guid.attrib.get('isPermaLink') == 'true':
return get_text(element, 'guid')
return None | python | def _get_link(element: Element) -> Optional[str]:
"""Attempt to retrieve item link.
Use the GUID as a fallback if it is a permalink.
"""
link = get_text(element, 'link')
if link is not None:
return link
guid = get_child(element, 'guid')
if guid is not None and guid.attrib.get('isPermaLink') == 'true':
return get_text(element, 'guid')
return None | [
"def",
"_get_link",
"(",
"element",
":",
"Element",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"link",
"=",
"get_text",
"(",
"element",
",",
"'link'",
")",
"if",
"link",
"is",
"not",
"None",
":",
"return",
"link",
"guid",
"=",
"get_child",
"(",
"element",
",",
"'guid'",
")",
"if",
"guid",
"is",
"not",
"None",
"and",
"guid",
".",
"attrib",
".",
"get",
"(",
"'isPermaLink'",
")",
"==",
"'true'",
":",
"return",
"get_text",
"(",
"element",
",",
"'guid'",
")",
"return",
"None"
] | Attempt to retrieve item link.
Use the GUID as a fallback if it is a permalink. | [
"Attempt",
"to",
"retrieve",
"item",
"link",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/rss.py#L118-L131 | train |
NicolasLM/atoma | atoma/rss.py | parse_rss_file | def parse_rss_file(filename: str) -> RSSChannel:
"""Parse an RSS feed from a local XML file."""
root = parse_xml(filename).getroot()
return _parse_rss(root) | python | def parse_rss_file(filename: str) -> RSSChannel:
"""Parse an RSS feed from a local XML file."""
root = parse_xml(filename).getroot()
return _parse_rss(root) | [
"def",
"parse_rss_file",
"(",
"filename",
":",
"str",
")",
"->",
"RSSChannel",
":",
"root",
"=",
"parse_xml",
"(",
"filename",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_rss",
"(",
"root",
")"
] | Parse an RSS feed from a local XML file. | [
"Parse",
"an",
"RSS",
"feed",
"from",
"a",
"local",
"XML",
"file",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/rss.py#L212-L215 | train |
NicolasLM/atoma | atoma/rss.py | parse_rss_bytes | def parse_rss_bytes(data: bytes) -> RSSChannel:
"""Parse an RSS feed from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_rss(root) | python | def parse_rss_bytes(data: bytes) -> RSSChannel:
"""Parse an RSS feed from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_rss(root) | [
"def",
"parse_rss_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"RSSChannel",
":",
"root",
"=",
"parse_xml",
"(",
"BytesIO",
"(",
"data",
")",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_rss",
"(",
"root",
")"
] | Parse an RSS feed from a byte-string containing XML data. | [
"Parse",
"an",
"RSS",
"feed",
"from",
"a",
"byte",
"-",
"string",
"containing",
"XML",
"data",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/rss.py#L218-L221 | train |
NicolasLM/atoma | atoma/json_feed.py | parse_json_feed_file | def parse_json_feed_file(filename: str) -> JSONFeed:
"""Parse a JSON feed from a local json file."""
with open(filename) as f:
try:
root = json.load(f)
except json.decoder.JSONDecodeError:
raise FeedJSONError('Not a valid JSON document')
return parse_json_feed(root) | python | def parse_json_feed_file(filename: str) -> JSONFeed:
"""Parse a JSON feed from a local json file."""
with open(filename) as f:
try:
root = json.load(f)
except json.decoder.JSONDecodeError:
raise FeedJSONError('Not a valid JSON document')
return parse_json_feed(root) | [
"def",
"parse_json_feed_file",
"(",
"filename",
":",
"str",
")",
"->",
"JSONFeed",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"try",
":",
"root",
"=",
"json",
".",
"load",
"(",
"f",
")",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"raise",
"FeedJSONError",
"(",
"'Not a valid JSON document'",
")",
"return",
"parse_json_feed",
"(",
"root",
")"
] | Parse a JSON feed from a local json file. | [
"Parse",
"a",
"JSON",
"feed",
"from",
"a",
"local",
"json",
"file",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/json_feed.py#L205-L213 | train |
NicolasLM/atoma | atoma/json_feed.py | parse_json_feed_bytes | def parse_json_feed_bytes(data: bytes) -> JSONFeed:
"""Parse a JSON feed from a byte-string containing JSON data."""
try:
root = json.loads(data)
except json.decoder.JSONDecodeError:
raise FeedJSONError('Not a valid JSON document')
return parse_json_feed(root) | python | def parse_json_feed_bytes(data: bytes) -> JSONFeed:
"""Parse a JSON feed from a byte-string containing JSON data."""
try:
root = json.loads(data)
except json.decoder.JSONDecodeError:
raise FeedJSONError('Not a valid JSON document')
return parse_json_feed(root) | [
"def",
"parse_json_feed_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"JSONFeed",
":",
"try",
":",
"root",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"raise",
"FeedJSONError",
"(",
"'Not a valid JSON document'",
")",
"return",
"parse_json_feed",
"(",
"root",
")"
] | Parse a JSON feed from a byte-string containing JSON data. | [
"Parse",
"a",
"JSON",
"feed",
"from",
"a",
"byte",
"-",
"string",
"containing",
"JSON",
"data",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/json_feed.py#L216-L223 | train |
NicolasLM/atoma | atoma/opml.py | parse_opml_file | def parse_opml_file(filename: str) -> OPML:
"""Parse an OPML document from a local XML file."""
root = parse_xml(filename).getroot()
return _parse_opml(root) | python | def parse_opml_file(filename: str) -> OPML:
"""Parse an OPML document from a local XML file."""
root = parse_xml(filename).getroot()
return _parse_opml(root) | [
"def",
"parse_opml_file",
"(",
"filename",
":",
"str",
")",
"->",
"OPML",
":",
"root",
"=",
"parse_xml",
"(",
"filename",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_opml",
"(",
"root",
")"
] | Parse an OPML document from a local XML file. | [
"Parse",
"an",
"OPML",
"document",
"from",
"a",
"local",
"XML",
"file",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L82-L85 | train |
NicolasLM/atoma | atoma/opml.py | parse_opml_bytes | def parse_opml_bytes(data: bytes) -> OPML:
"""Parse an OPML document from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_opml(root) | python | def parse_opml_bytes(data: bytes) -> OPML:
"""Parse an OPML document from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_opml(root) | [
"def",
"parse_opml_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"OPML",
":",
"root",
"=",
"parse_xml",
"(",
"BytesIO",
"(",
"data",
")",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_opml",
"(",
"root",
")"
] | Parse an OPML document from a byte-string containing XML data. | [
"Parse",
"an",
"OPML",
"document",
"from",
"a",
"byte",
"-",
"string",
"containing",
"XML",
"data",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L88-L91 | train |
NicolasLM/atoma | atoma/opml.py | get_feed_list | def get_feed_list(opml_obj: OPML) -> List[str]:
"""Walk an OPML document to extract the list of feed it contains."""
rv = list()
def collect(obj):
for outline in obj.outlines:
if outline.type == 'rss' and outline.xml_url:
rv.append(outline.xml_url)
if outline.outlines:
collect(outline)
collect(opml_obj)
return rv | python | def get_feed_list(opml_obj: OPML) -> List[str]:
"""Walk an OPML document to extract the list of feed it contains."""
rv = list()
def collect(obj):
for outline in obj.outlines:
if outline.type == 'rss' and outline.xml_url:
rv.append(outline.xml_url)
if outline.outlines:
collect(outline)
collect(opml_obj)
return rv | [
"def",
"get_feed_list",
"(",
"opml_obj",
":",
"OPML",
")",
"->",
"List",
"[",
"str",
"]",
":",
"rv",
"=",
"list",
"(",
")",
"def",
"collect",
"(",
"obj",
")",
":",
"for",
"outline",
"in",
"obj",
".",
"outlines",
":",
"if",
"outline",
".",
"type",
"==",
"'rss'",
"and",
"outline",
".",
"xml_url",
":",
"rv",
".",
"append",
"(",
"outline",
".",
"xml_url",
")",
"if",
"outline",
".",
"outlines",
":",
"collect",
"(",
"outline",
")",
"collect",
"(",
"opml_obj",
")",
"return",
"rv"
] | Walk an OPML document to extract the list of feed it contains. | [
"Walk",
"an",
"OPML",
"document",
"to",
"extract",
"the",
"list",
"of",
"feed",
"it",
"contains",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/opml.py#L94-L107 | train |
NicolasLM/atoma | atoma/simple.py | simple_parse_file | def simple_parse_file(filename: str) -> Feed:
"""Parse an Atom, RSS or JSON feed from a local file."""
pairs = (
(rss.parse_rss_file, _adapt_rss_channel),
(atom.parse_atom_file, _adapt_atom_feed),
(json_feed.parse_json_feed_file, _adapt_json_feed)
)
return _simple_parse(pairs, filename) | python | def simple_parse_file(filename: str) -> Feed:
"""Parse an Atom, RSS or JSON feed from a local file."""
pairs = (
(rss.parse_rss_file, _adapt_rss_channel),
(atom.parse_atom_file, _adapt_atom_feed),
(json_feed.parse_json_feed_file, _adapt_json_feed)
)
return _simple_parse(pairs, filename) | [
"def",
"simple_parse_file",
"(",
"filename",
":",
"str",
")",
"->",
"Feed",
":",
"pairs",
"=",
"(",
"(",
"rss",
".",
"parse_rss_file",
",",
"_adapt_rss_channel",
")",
",",
"(",
"atom",
".",
"parse_atom_file",
",",
"_adapt_atom_feed",
")",
",",
"(",
"json_feed",
".",
"parse_json_feed_file",
",",
"_adapt_json_feed",
")",
")",
"return",
"_simple_parse",
"(",
"pairs",
",",
"filename",
")"
] | Parse an Atom, RSS or JSON feed from a local file. | [
"Parse",
"an",
"Atom",
"RSS",
"or",
"JSON",
"feed",
"from",
"a",
"local",
"file",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/simple.py#L207-L214 | train |
NicolasLM/atoma | atoma/simple.py | simple_parse_bytes | def simple_parse_bytes(data: bytes) -> Feed:
"""Parse an Atom, RSS or JSON feed from a byte-string containing data."""
pairs = (
(rss.parse_rss_bytes, _adapt_rss_channel),
(atom.parse_atom_bytes, _adapt_atom_feed),
(json_feed.parse_json_feed_bytes, _adapt_json_feed)
)
return _simple_parse(pairs, data) | python | def simple_parse_bytes(data: bytes) -> Feed:
"""Parse an Atom, RSS or JSON feed from a byte-string containing data."""
pairs = (
(rss.parse_rss_bytes, _adapt_rss_channel),
(atom.parse_atom_bytes, _adapt_atom_feed),
(json_feed.parse_json_feed_bytes, _adapt_json_feed)
)
return _simple_parse(pairs, data) | [
"def",
"simple_parse_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"Feed",
":",
"pairs",
"=",
"(",
"(",
"rss",
".",
"parse_rss_bytes",
",",
"_adapt_rss_channel",
")",
",",
"(",
"atom",
".",
"parse_atom_bytes",
",",
"_adapt_atom_feed",
")",
",",
"(",
"json_feed",
".",
"parse_json_feed_bytes",
",",
"_adapt_json_feed",
")",
")",
"return",
"_simple_parse",
"(",
"pairs",
",",
"data",
")"
] | Parse an Atom, RSS or JSON feed from a byte-string containing data. | [
"Parse",
"an",
"Atom",
"RSS",
"or",
"JSON",
"feed",
"from",
"a",
"byte",
"-",
"string",
"containing",
"data",
"."
] | 16c6956112f975eb2ce774b2d5f8e9ddffde569f | https://github.com/NicolasLM/atoma/blob/16c6956112f975eb2ce774b2d5f8e9ddffde569f/atoma/simple.py#L217-L224 | train |
Atomistica/atomistica | src/python/atomistica/deformation.py | get_shear_distance | def get_shear_distance(a):
"""
Returns the distance a volume has moved during simple shear. Considers
either Lees-Edwards boundary conditions or sheared cells.
"""
cx, cy, cz = a.cell
if 'shear_dx' in a.info:
assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1])
assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2])
assert abs(cy[0]) < 1e-12, 'cx[0] = {0}'.format(cy[0])
assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2])
assert abs(cz[0]) < 1e-12, 'cz[0] = {0}'.format(cz[0])
assert abs(cz[1]) < 1e-12, 'cz[1] = {0}'.format(cz[1])
dx, dy, dz = a.info['shear_dx']
else:
assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1])
assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2])
assert abs(cy[0]) < 1e-12, 'cy[0] = {0}'.format(cy[0])
assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2])
dx, dy, sz = cz
return dx, dy | python | def get_shear_distance(a):
"""
Returns the distance a volume has moved during simple shear. Considers
either Lees-Edwards boundary conditions or sheared cells.
"""
cx, cy, cz = a.cell
if 'shear_dx' in a.info:
assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1])
assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2])
assert abs(cy[0]) < 1e-12, 'cx[0] = {0}'.format(cy[0])
assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2])
assert abs(cz[0]) < 1e-12, 'cz[0] = {0}'.format(cz[0])
assert abs(cz[1]) < 1e-12, 'cz[1] = {0}'.format(cz[1])
dx, dy, dz = a.info['shear_dx']
else:
assert abs(cx[1]) < 1e-12, 'cx[1] = {0}'.format(cx[1])
assert abs(cx[2]) < 1e-12, 'cx[2] = {0}'.format(cx[2])
assert abs(cy[0]) < 1e-12, 'cy[0] = {0}'.format(cy[0])
assert abs(cy[2]) < 1e-12, 'cy[2] = {0}'.format(cy[2])
dx, dy, sz = cz
return dx, dy | [
"def",
"get_shear_distance",
"(",
"a",
")",
":",
"cx",
",",
"cy",
",",
"cz",
"=",
"a",
".",
"cell",
"if",
"'shear_dx'",
"in",
"a",
".",
"info",
":",
"assert",
"abs",
"(",
"cx",
"[",
"1",
"]",
")",
"<",
"1e-12",
",",
"'cx[1] = {0}'",
".",
"format",
"(",
"cx",
"[",
"1",
"]",
")",
"assert",
"abs",
"(",
"cx",
"[",
"2",
"]",
")",
"<",
"1e-12",
",",
"'cx[2] = {0}'",
".",
"format",
"(",
"cx",
"[",
"2",
"]",
")",
"assert",
"abs",
"(",
"cy",
"[",
"0",
"]",
")",
"<",
"1e-12",
",",
"'cx[0] = {0}'",
".",
"format",
"(",
"cy",
"[",
"0",
"]",
")",
"assert",
"abs",
"(",
"cy",
"[",
"2",
"]",
")",
"<",
"1e-12",
",",
"'cy[2] = {0}'",
".",
"format",
"(",
"cy",
"[",
"2",
"]",
")",
"assert",
"abs",
"(",
"cz",
"[",
"0",
"]",
")",
"<",
"1e-12",
",",
"'cz[0] = {0}'",
".",
"format",
"(",
"cz",
"[",
"0",
"]",
")",
"assert",
"abs",
"(",
"cz",
"[",
"1",
"]",
")",
"<",
"1e-12",
",",
"'cz[1] = {0}'",
".",
"format",
"(",
"cz",
"[",
"1",
"]",
")",
"dx",
",",
"dy",
",",
"dz",
"=",
"a",
".",
"info",
"[",
"'shear_dx'",
"]",
"else",
":",
"assert",
"abs",
"(",
"cx",
"[",
"1",
"]",
")",
"<",
"1e-12",
",",
"'cx[1] = {0}'",
".",
"format",
"(",
"cx",
"[",
"1",
"]",
")",
"assert",
"abs",
"(",
"cx",
"[",
"2",
"]",
")",
"<",
"1e-12",
",",
"'cx[2] = {0}'",
".",
"format",
"(",
"cx",
"[",
"2",
"]",
")",
"assert",
"abs",
"(",
"cy",
"[",
"0",
"]",
")",
"<",
"1e-12",
",",
"'cy[0] = {0}'",
".",
"format",
"(",
"cy",
"[",
"0",
"]",
")",
"assert",
"abs",
"(",
"cy",
"[",
"2",
"]",
")",
"<",
"1e-12",
",",
"'cy[2] = {0}'",
".",
"format",
"(",
"cy",
"[",
"2",
"]",
")",
"dx",
",",
"dy",
",",
"sz",
"=",
"cz",
"return",
"dx",
",",
"dy"
] | Returns the distance a volume has moved during simple shear. Considers
either Lees-Edwards boundary conditions or sheared cells. | [
"Returns",
"the",
"distance",
"a",
"volume",
"has",
"moved",
"during",
"simple",
"shear",
".",
"Considers",
"either",
"Lees",
"-",
"Edwards",
"boundary",
"conditions",
"or",
"sheared",
"cells",
"."
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/deformation.py#L30-L50 | train |
Atomistica/atomistica | src/python/atomistica/atomic_strain.py | array_inverse | def array_inverse(A):
"""
Compute inverse for each matrix in a list of matrices.
This is faster than calling numpy.linalg.inv for each matrix.
"""
A = np.ascontiguousarray(A, dtype=float)
b = np.identity(A.shape[2], dtype=A.dtype)
n_eq = A.shape[1]
n_rhs = A.shape[2]
pivots = np.zeros(n_eq, np.intc)
identity = np.eye(n_eq)
def lapack_inverse(a):
b = np.copy(identity)
pivots = np.zeros(n_eq, np.intc)
results = np.linalg.lapack_lite.dgesv(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise np.linalg.LinAlgError('Singular matrix')
return b
return np.array([lapack_inverse(a) for a in A]) | python | def array_inverse(A):
"""
Compute inverse for each matrix in a list of matrices.
This is faster than calling numpy.linalg.inv for each matrix.
"""
A = np.ascontiguousarray(A, dtype=float)
b = np.identity(A.shape[2], dtype=A.dtype)
n_eq = A.shape[1]
n_rhs = A.shape[2]
pivots = np.zeros(n_eq, np.intc)
identity = np.eye(n_eq)
def lapack_inverse(a):
b = np.copy(identity)
pivots = np.zeros(n_eq, np.intc)
results = np.linalg.lapack_lite.dgesv(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise np.linalg.LinAlgError('Singular matrix')
return b
return np.array([lapack_inverse(a) for a in A]) | [
"def",
"array_inverse",
"(",
"A",
")",
":",
"A",
"=",
"np",
".",
"ascontiguousarray",
"(",
"A",
",",
"dtype",
"=",
"float",
")",
"b",
"=",
"np",
".",
"identity",
"(",
"A",
".",
"shape",
"[",
"2",
"]",
",",
"dtype",
"=",
"A",
".",
"dtype",
")",
"n_eq",
"=",
"A",
".",
"shape",
"[",
"1",
"]",
"n_rhs",
"=",
"A",
".",
"shape",
"[",
"2",
"]",
"pivots",
"=",
"np",
".",
"zeros",
"(",
"n_eq",
",",
"np",
".",
"intc",
")",
"identity",
"=",
"np",
".",
"eye",
"(",
"n_eq",
")",
"def",
"lapack_inverse",
"(",
"a",
")",
":",
"b",
"=",
"np",
".",
"copy",
"(",
"identity",
")",
"pivots",
"=",
"np",
".",
"zeros",
"(",
"n_eq",
",",
"np",
".",
"intc",
")",
"results",
"=",
"np",
".",
"linalg",
".",
"lapack_lite",
".",
"dgesv",
"(",
"n_eq",
",",
"n_rhs",
",",
"a",
",",
"n_eq",
",",
"pivots",
",",
"b",
",",
"n_eq",
",",
"0",
")",
"if",
"results",
"[",
"'info'",
"]",
">",
"0",
":",
"raise",
"np",
".",
"linalg",
".",
"LinAlgError",
"(",
"'Singular matrix'",
")",
"return",
"b",
"return",
"np",
".",
"array",
"(",
"[",
"lapack_inverse",
"(",
"a",
")",
"for",
"a",
"in",
"A",
"]",
")"
] | Compute inverse for each matrix in a list of matrices.
This is faster than calling numpy.linalg.inv for each matrix. | [
"Compute",
"inverse",
"for",
"each",
"matrix",
"in",
"a",
"list",
"of",
"matrices",
".",
"This",
"is",
"faster",
"than",
"calling",
"numpy",
".",
"linalg",
".",
"inv",
"for",
"each",
"matrix",
"."
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L66-L86 | train |
Atomistica/atomistica | src/python/atomistica/atomic_strain.py | get_delta_plus_epsilon | def get_delta_plus_epsilon(nat, i_now, dr_now, dr_old):
"""
Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix
"""
XIJ = get_XIJ(nat, i_now, dr_now, dr_old)
YIJ = get_YIJ(nat, i_now, dr_old)
YIJ_invert = array_inverse(YIJ)
# Perform sum_k X_ik Y_jk^-1
epsilon = np.sum(XIJ.reshape(-1,3,1,3)*YIJ_invert.reshape(-1,1,3,3), axis=3)
return epsilon | python | def get_delta_plus_epsilon(nat, i_now, dr_now, dr_old):
"""
Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix
"""
XIJ = get_XIJ(nat, i_now, dr_now, dr_old)
YIJ = get_YIJ(nat, i_now, dr_old)
YIJ_invert = array_inverse(YIJ)
# Perform sum_k X_ik Y_jk^-1
epsilon = np.sum(XIJ.reshape(-1,3,1,3)*YIJ_invert.reshape(-1,1,3,3), axis=3)
return epsilon | [
"def",
"get_delta_plus_epsilon",
"(",
"nat",
",",
"i_now",
",",
"dr_now",
",",
"dr_old",
")",
":",
"XIJ",
"=",
"get_XIJ",
"(",
"nat",
",",
"i_now",
",",
"dr_now",
",",
"dr_old",
")",
"YIJ",
"=",
"get_YIJ",
"(",
"nat",
",",
"i_now",
",",
"dr_old",
")",
"YIJ_invert",
"=",
"array_inverse",
"(",
"YIJ",
")",
"# Perform sum_k X_ik Y_jk^-1",
"epsilon",
"=",
"np",
".",
"sum",
"(",
"XIJ",
".",
"reshape",
"(",
"-",
"1",
",",
"3",
",",
"1",
",",
"3",
")",
"*",
"YIJ_invert",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
",",
"3",
",",
"3",
")",
",",
"axis",
"=",
"3",
")",
"return",
"epsilon"
] | Calculate delta_ij+epsilon_ij, i.e. the deformation gradient matrix | [
"Calculate",
"delta_ij",
"+",
"epsilon_ij",
"i",
".",
"e",
".",
"the",
"deformation",
"gradient",
"matrix"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L89-L101 | train |
Atomistica/atomistica | src/python/atomistica/atomic_strain.py | get_D_square_min | def get_D_square_min(atoms_now, atoms_old, i_now, j_now, delta_plus_epsilon=None):
"""
Calculate the D^2_min norm of Falk and Langer
"""
nat = len(atoms_now)
assert len(atoms_now) == len(atoms_old)
pos_now = atoms_now.positions
pos_old = atoms_old.positions
# Compute current and old distance vectors. Note that current distance
# vectors cannot be taken from the neighbor calculation, because neighbors
# are calculated from the sheared cell while these distance need to come
# from the unsheared cell. Taking the distance from the unsheared cell
# make periodic boundary conditions (and flipping of cell) a lot easier.
dr_now = mic(pos_now[i_now] - pos_now[j_now], atoms_now.cell)
dr_old = mic(pos_old[i_now] - pos_old[j_now], atoms_old.cell)
# Sanity check: Shape needs to be identical!
assert dr_now.shape == dr_old.shape
if delta_plus_epsilon is None:
# Get minimum strain tensor
delta_plus_epsilon = get_delta_plus_epsilon(nat, i_now, dr_now, dr_old)
# Spread epsilon out for each neighbor index
delta_plus_epsilon_n = delta_plus_epsilon[i_now]
# Compute D^2_min
d_sq_n = np.sum(
(
dr_now-
np.sum(delta_plus_epsilon_n.reshape(-1,3,3)*dr_old.reshape(-1,1,3),
axis=2)
)**2,
axis=1)
# For each atom, sum over all neighbors
d_sq = np.bincount(i_now, weights=d_sq_n)
return delta_plus_epsilon, d_sq | python | def get_D_square_min(atoms_now, atoms_old, i_now, j_now, delta_plus_epsilon=None):
"""
Calculate the D^2_min norm of Falk and Langer
"""
nat = len(atoms_now)
assert len(atoms_now) == len(atoms_old)
pos_now = atoms_now.positions
pos_old = atoms_old.positions
# Compute current and old distance vectors. Note that current distance
# vectors cannot be taken from the neighbor calculation, because neighbors
# are calculated from the sheared cell while these distance need to come
# from the unsheared cell. Taking the distance from the unsheared cell
# make periodic boundary conditions (and flipping of cell) a lot easier.
dr_now = mic(pos_now[i_now] - pos_now[j_now], atoms_now.cell)
dr_old = mic(pos_old[i_now] - pos_old[j_now], atoms_old.cell)
# Sanity check: Shape needs to be identical!
assert dr_now.shape == dr_old.shape
if delta_plus_epsilon is None:
# Get minimum strain tensor
delta_plus_epsilon = get_delta_plus_epsilon(nat, i_now, dr_now, dr_old)
# Spread epsilon out for each neighbor index
delta_plus_epsilon_n = delta_plus_epsilon[i_now]
# Compute D^2_min
d_sq_n = np.sum(
(
dr_now-
np.sum(delta_plus_epsilon_n.reshape(-1,3,3)*dr_old.reshape(-1,1,3),
axis=2)
)**2,
axis=1)
# For each atom, sum over all neighbors
d_sq = np.bincount(i_now, weights=d_sq_n)
return delta_plus_epsilon, d_sq | [
"def",
"get_D_square_min",
"(",
"atoms_now",
",",
"atoms_old",
",",
"i_now",
",",
"j_now",
",",
"delta_plus_epsilon",
"=",
"None",
")",
":",
"nat",
"=",
"len",
"(",
"atoms_now",
")",
"assert",
"len",
"(",
"atoms_now",
")",
"==",
"len",
"(",
"atoms_old",
")",
"pos_now",
"=",
"atoms_now",
".",
"positions",
"pos_old",
"=",
"atoms_old",
".",
"positions",
"# Compute current and old distance vectors. Note that current distance",
"# vectors cannot be taken from the neighbor calculation, because neighbors",
"# are calculated from the sheared cell while these distance need to come",
"# from the unsheared cell. Taking the distance from the unsheared cell",
"# make periodic boundary conditions (and flipping of cell) a lot easier.",
"dr_now",
"=",
"mic",
"(",
"pos_now",
"[",
"i_now",
"]",
"-",
"pos_now",
"[",
"j_now",
"]",
",",
"atoms_now",
".",
"cell",
")",
"dr_old",
"=",
"mic",
"(",
"pos_old",
"[",
"i_now",
"]",
"-",
"pos_old",
"[",
"j_now",
"]",
",",
"atoms_old",
".",
"cell",
")",
"# Sanity check: Shape needs to be identical!",
"assert",
"dr_now",
".",
"shape",
"==",
"dr_old",
".",
"shape",
"if",
"delta_plus_epsilon",
"is",
"None",
":",
"# Get minimum strain tensor",
"delta_plus_epsilon",
"=",
"get_delta_plus_epsilon",
"(",
"nat",
",",
"i_now",
",",
"dr_now",
",",
"dr_old",
")",
"# Spread epsilon out for each neighbor index",
"delta_plus_epsilon_n",
"=",
"delta_plus_epsilon",
"[",
"i_now",
"]",
"# Compute D^2_min",
"d_sq_n",
"=",
"np",
".",
"sum",
"(",
"(",
"dr_now",
"-",
"np",
".",
"sum",
"(",
"delta_plus_epsilon_n",
".",
"reshape",
"(",
"-",
"1",
",",
"3",
",",
"3",
")",
"*",
"dr_old",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
",",
"3",
")",
",",
"axis",
"=",
"2",
")",
")",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"# For each atom, sum over all neighbors",
"d_sq",
"=",
"np",
".",
"bincount",
"(",
"i_now",
",",
"weights",
"=",
"d_sq_n",
")",
"return",
"delta_plus_epsilon",
",",
"d_sq"
] | Calculate the D^2_min norm of Falk and Langer | [
"Calculate",
"the",
"D^2_min",
"norm",
"of",
"Falk",
"and",
"Langer"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/atomic_strain.py#L104-L144 | train |
Atomistica/atomistica | src/python/atomistica/hardware.py | dhms | def dhms(secs):
"""return days,hours,minutes and seconds"""
dhms = [0, 0, 0, 0]
dhms[0] = int(secs // 86400)
s = secs % 86400
dhms[1] = int(s // 3600)
s = secs % 3600
dhms[2] = int(s // 60)
s = secs % 60
dhms[3] = int(s+.5)
return dhms | python | def dhms(secs):
"""return days,hours,minutes and seconds"""
dhms = [0, 0, 0, 0]
dhms[0] = int(secs // 86400)
s = secs % 86400
dhms[1] = int(s // 3600)
s = secs % 3600
dhms[2] = int(s // 60)
s = secs % 60
dhms[3] = int(s+.5)
return dhms | [
"def",
"dhms",
"(",
"secs",
")",
":",
"dhms",
"=",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
"dhms",
"[",
"0",
"]",
"=",
"int",
"(",
"secs",
"//",
"86400",
")",
"s",
"=",
"secs",
"%",
"86400",
"dhms",
"[",
"1",
"]",
"=",
"int",
"(",
"s",
"//",
"3600",
")",
"s",
"=",
"secs",
"%",
"3600",
"dhms",
"[",
"2",
"]",
"=",
"int",
"(",
"s",
"//",
"60",
")",
"s",
"=",
"secs",
"%",
"60",
"dhms",
"[",
"3",
"]",
"=",
"int",
"(",
"s",
"+",
".5",
")",
"return",
"dhms"
] | return days,hours,minutes and seconds | [
"return",
"days",
"hours",
"minutes",
"and",
"seconds"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/hardware.py#L52-L62 | train |
Atomistica/atomistica | src/python/atomistica/hardware.py | hms | def hms(secs):
"""return hours,minutes and seconds"""
hms = [0, 0, 0]
hms[0] = int(secs // 3600)
s = secs % 3600
hms[1] = int(s // 60)
s = secs % 60
hms[2] = int(s+.5)
return hms | python | def hms(secs):
"""return hours,minutes and seconds"""
hms = [0, 0, 0]
hms[0] = int(secs // 3600)
s = secs % 3600
hms[1] = int(s // 60)
s = secs % 60
hms[2] = int(s+.5)
return hms | [
"def",
"hms",
"(",
"secs",
")",
":",
"hms",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"hms",
"[",
"0",
"]",
"=",
"int",
"(",
"secs",
"//",
"3600",
")",
"s",
"=",
"secs",
"%",
"3600",
"hms",
"[",
"1",
"]",
"=",
"int",
"(",
"s",
"//",
"60",
")",
"s",
"=",
"secs",
"%",
"60",
"hms",
"[",
"2",
"]",
"=",
"int",
"(",
"s",
"+",
".5",
")",
"return",
"hms"
] | return hours,minutes and seconds | [
"return",
"hours",
"minutes",
"and",
"seconds"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/hardware.py#L65-L73 | train |
Atomistica/atomistica | src/python/atomistica/analysis.py | get_enclosing_orthorhombic_box | def get_enclosing_orthorhombic_box(cell):
"""
Return lower and upper bounds of the orthorhombic box that encloses
the parallelepiped spanned by the three cell vectors of cell.
"""
# Cell vectors
cx, cy, cz = cell
# The cell has eight corners, one is at the origin, three at cx, cy, cz
# and the last ones are...
c1 = cx+cy
c2 = cx+cz
c3 = cy+cz
c4 = cx+cy+cz
corners = np.array([[0,0,0],cx,cy,cz,c1,c2,c3,c4])
lower = np.min(corners, axis=0)
upper = np.max(corners, axis=0)
return lower, upper | python | def get_enclosing_orthorhombic_box(cell):
"""
Return lower and upper bounds of the orthorhombic box that encloses
the parallelepiped spanned by the three cell vectors of cell.
"""
# Cell vectors
cx, cy, cz = cell
# The cell has eight corners, one is at the origin, three at cx, cy, cz
# and the last ones are...
c1 = cx+cy
c2 = cx+cz
c3 = cy+cz
c4 = cx+cy+cz
corners = np.array([[0,0,0],cx,cy,cz,c1,c2,c3,c4])
lower = np.min(corners, axis=0)
upper = np.max(corners, axis=0)
return lower, upper | [
"def",
"get_enclosing_orthorhombic_box",
"(",
"cell",
")",
":",
"# Cell vectors",
"cx",
",",
"cy",
",",
"cz",
"=",
"cell",
"# The cell has eight corners, one is at the origin, three at cx, cy, cz",
"# and the last ones are...",
"c1",
"=",
"cx",
"+",
"cy",
"c2",
"=",
"cx",
"+",
"cz",
"c3",
"=",
"cy",
"+",
"cz",
"c4",
"=",
"cx",
"+",
"cy",
"+",
"cz",
"corners",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"0",
",",
"0",
"]",
",",
"cx",
",",
"cy",
",",
"cz",
",",
"c1",
",",
"c2",
",",
"c3",
",",
"c4",
"]",
")",
"lower",
"=",
"np",
".",
"min",
"(",
"corners",
",",
"axis",
"=",
"0",
")",
"upper",
"=",
"np",
".",
"max",
"(",
"corners",
",",
"axis",
"=",
"0",
")",
"return",
"lower",
",",
"upper"
] | Return lower and upper bounds of the orthorhombic box that encloses
the parallelepiped spanned by the three cell vectors of cell. | [
"Return",
"lower",
"and",
"upper",
"bounds",
"of",
"the",
"orthorhombic",
"box",
"that",
"encloses",
"the",
"parallelepiped",
"spanned",
"by",
"the",
"three",
"cell",
"vectors",
"of",
"cell",
"."
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/analysis.py#L38-L58 | train |
Atomistica/atomistica | src/python/atomistica/analysis.py | stress_invariants | def stress_invariants(s):
"""Receives a list of stress tensors and returns the three invariants.
Return hydrostatic pressure, octahedral shear stress and J3
"""
s = np.asarray(s)
if s.shape == (6,):
s = s.reshape(1,-1)
elif s.shape == (3,3):
s = s.reshape(1,-1,-1)
if len(s.shape) == 3:
s = np.transpose([s[:,0,0],s[:,1,1],s[:,2,2],
(s[:,0,1]+s[:,1,0])/2,
(s[:,1,2]+s[:,2,1])/2,
(s[:,2,0]+s[:,0,2])/2])
I1 = s[:,0]+s[:,1]+s[:,2]
I2 = s[:,0]*s[:,1]+s[:,1]*s[:,2]+s[:,2]*s[:,0]-s[:,3]**2-s[:,4]**2-s[:,5]**2
I3 = s[:,0]*s[:,1]*s[:,2]+2*s[:,3]*s[:,4]*s[:,5]-s[:,3]**2*s[:,2]-s[:,4]**2*s[:,0]-s[:,5]**2*s[:,1]
J2 = I1**2/3-I2
J3 = 2*I1**3/27-I1*I2/3+I3
# Return hydrostatic pressure, octahedral shear stress and J3
return -I1/3, np.sqrt(2*J2/3), J3 | python | def stress_invariants(s):
"""Receives a list of stress tensors and returns the three invariants.
Return hydrostatic pressure, octahedral shear stress and J3
"""
s = np.asarray(s)
if s.shape == (6,):
s = s.reshape(1,-1)
elif s.shape == (3,3):
s = s.reshape(1,-1,-1)
if len(s.shape) == 3:
s = np.transpose([s[:,0,0],s[:,1,1],s[:,2,2],
(s[:,0,1]+s[:,1,0])/2,
(s[:,1,2]+s[:,2,1])/2,
(s[:,2,0]+s[:,0,2])/2])
I1 = s[:,0]+s[:,1]+s[:,2]
I2 = s[:,0]*s[:,1]+s[:,1]*s[:,2]+s[:,2]*s[:,0]-s[:,3]**2-s[:,4]**2-s[:,5]**2
I3 = s[:,0]*s[:,1]*s[:,2]+2*s[:,3]*s[:,4]*s[:,5]-s[:,3]**2*s[:,2]-s[:,4]**2*s[:,0]-s[:,5]**2*s[:,1]
J2 = I1**2/3-I2
J3 = 2*I1**3/27-I1*I2/3+I3
# Return hydrostatic pressure, octahedral shear stress and J3
return -I1/3, np.sqrt(2*J2/3), J3 | [
"def",
"stress_invariants",
"(",
"s",
")",
":",
"s",
"=",
"np",
".",
"asarray",
"(",
"s",
")",
"if",
"s",
".",
"shape",
"==",
"(",
"6",
",",
")",
":",
"s",
"=",
"s",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"elif",
"s",
".",
"shape",
"==",
"(",
"3",
",",
"3",
")",
":",
"s",
"=",
"s",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"if",
"len",
"(",
"s",
".",
"shape",
")",
"==",
"3",
":",
"s",
"=",
"np",
".",
"transpose",
"(",
"[",
"s",
"[",
":",
",",
"0",
",",
"0",
"]",
",",
"s",
"[",
":",
",",
"1",
",",
"1",
"]",
",",
"s",
"[",
":",
",",
"2",
",",
"2",
"]",
",",
"(",
"s",
"[",
":",
",",
"0",
",",
"1",
"]",
"+",
"s",
"[",
":",
",",
"1",
",",
"0",
"]",
")",
"/",
"2",
",",
"(",
"s",
"[",
":",
",",
"1",
",",
"2",
"]",
"+",
"s",
"[",
":",
",",
"2",
",",
"1",
"]",
")",
"/",
"2",
",",
"(",
"s",
"[",
":",
",",
"2",
",",
"0",
"]",
"+",
"s",
"[",
":",
",",
"0",
",",
"2",
"]",
")",
"/",
"2",
"]",
")",
"I1",
"=",
"s",
"[",
":",
",",
"0",
"]",
"+",
"s",
"[",
":",
",",
"1",
"]",
"+",
"s",
"[",
":",
",",
"2",
"]",
"I2",
"=",
"s",
"[",
":",
",",
"0",
"]",
"*",
"s",
"[",
":",
",",
"1",
"]",
"+",
"s",
"[",
":",
",",
"1",
"]",
"*",
"s",
"[",
":",
",",
"2",
"]",
"+",
"s",
"[",
":",
",",
"2",
"]",
"*",
"s",
"[",
":",
",",
"0",
"]",
"-",
"s",
"[",
":",
",",
"3",
"]",
"**",
"2",
"-",
"s",
"[",
":",
",",
"4",
"]",
"**",
"2",
"-",
"s",
"[",
":",
",",
"5",
"]",
"**",
"2",
"I3",
"=",
"s",
"[",
":",
",",
"0",
"]",
"*",
"s",
"[",
":",
",",
"1",
"]",
"*",
"s",
"[",
":",
",",
"2",
"]",
"+",
"2",
"*",
"s",
"[",
":",
",",
"3",
"]",
"*",
"s",
"[",
":",
",",
"4",
"]",
"*",
"s",
"[",
":",
",",
"5",
"]",
"-",
"s",
"[",
":",
",",
"3",
"]",
"**",
"2",
"*",
"s",
"[",
":",
",",
"2",
"]",
"-",
"s",
"[",
":",
",",
"4",
"]",
"**",
"2",
"*",
"s",
"[",
":",
",",
"0",
"]",
"-",
"s",
"[",
":",
",",
"5",
"]",
"**",
"2",
"*",
"s",
"[",
":",
",",
"1",
"]",
"J2",
"=",
"I1",
"**",
"2",
"/",
"3",
"-",
"I2",
"J3",
"=",
"2",
"*",
"I1",
"**",
"3",
"/",
"27",
"-",
"I1",
"*",
"I2",
"/",
"3",
"+",
"I3",
"# Return hydrostatic pressure, octahedral shear stress and J3",
"return",
"-",
"I1",
"/",
"3",
",",
"np",
".",
"sqrt",
"(",
"2",
"*",
"J2",
"/",
"3",
")",
",",
"J3"
] | Receives a list of stress tensors and returns the three invariants.
Return hydrostatic pressure, octahedral shear stress and J3 | [
"Receives",
"a",
"list",
"of",
"stress",
"tensors",
"and",
"returns",
"the",
"three",
"invariants",
".",
"Return",
"hydrostatic",
"pressure",
"octahedral",
"shear",
"stress",
"and",
"J3"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/analysis.py#L181-L203 | train |
Atomistica/atomistica | tools/meta.py | scanmeta | def scanmeta(f):
"""Scan file headers for @meta ... @endmeta information and store that into
a dictionary.
"""
print(f)
if isinstance(f, str):
f = io.open(f, mode='r', encoding='latin-1')
done = False
l = f.readline()
s = None
while l and s is None:
i = l.find('!')
if i >= 0:
l = l[i+1:]
i = l.find('@meta')
if i >= 0:
l = l[i+5:]
i = l.find('@endmeta')
if i >= 0:
s = l[:i]
done = True
else:
s = l
l = f.readline()
if not done and not l:
return { }
while l and not done:
i = l.find('!')
if i >= 0:
l = l[i+1:]
i = l.find('@endmeta')
if i >= 0:
s += ' '+l[:i]
done = True
else:
s += ' '+l
l = f.readline()
s = map(lambda x: x.split(':'), s.split())
d = { }
for x in s:
if len(x) > 2 or len(x) == 0:
raise RuntimeError('Syntax error in meta information.')
elif len(x) == 2:
d[x[0]] = x[1]
else:
d[x[0]] = None
return d | python | def scanmeta(f):
"""Scan file headers for @meta ... @endmeta information and store that into
a dictionary.
"""
print(f)
if isinstance(f, str):
f = io.open(f, mode='r', encoding='latin-1')
done = False
l = f.readline()
s = None
while l and s is None:
i = l.find('!')
if i >= 0:
l = l[i+1:]
i = l.find('@meta')
if i >= 0:
l = l[i+5:]
i = l.find('@endmeta')
if i >= 0:
s = l[:i]
done = True
else:
s = l
l = f.readline()
if not done and not l:
return { }
while l and not done:
i = l.find('!')
if i >= 0:
l = l[i+1:]
i = l.find('@endmeta')
if i >= 0:
s += ' '+l[:i]
done = True
else:
s += ' '+l
l = f.readline()
s = map(lambda x: x.split(':'), s.split())
d = { }
for x in s:
if len(x) > 2 or len(x) == 0:
raise RuntimeError('Syntax error in meta information.')
elif len(x) == 2:
d[x[0]] = x[1]
else:
d[x[0]] = None
return d | [
"def",
"scanmeta",
"(",
"f",
")",
":",
"print",
"(",
"f",
")",
"if",
"isinstance",
"(",
"f",
",",
"str",
")",
":",
"f",
"=",
"io",
".",
"open",
"(",
"f",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'latin-1'",
")",
"done",
"=",
"False",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"s",
"=",
"None",
"while",
"l",
"and",
"s",
"is",
"None",
":",
"i",
"=",
"l",
".",
"find",
"(",
"'!'",
")",
"if",
"i",
">=",
"0",
":",
"l",
"=",
"l",
"[",
"i",
"+",
"1",
":",
"]",
"i",
"=",
"l",
".",
"find",
"(",
"'@meta'",
")",
"if",
"i",
">=",
"0",
":",
"l",
"=",
"l",
"[",
"i",
"+",
"5",
":",
"]",
"i",
"=",
"l",
".",
"find",
"(",
"'@endmeta'",
")",
"if",
"i",
">=",
"0",
":",
"s",
"=",
"l",
"[",
":",
"i",
"]",
"done",
"=",
"True",
"else",
":",
"s",
"=",
"l",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"not",
"done",
"and",
"not",
"l",
":",
"return",
"{",
"}",
"while",
"l",
"and",
"not",
"done",
":",
"i",
"=",
"l",
".",
"find",
"(",
"'!'",
")",
"if",
"i",
">=",
"0",
":",
"l",
"=",
"l",
"[",
"i",
"+",
"1",
":",
"]",
"i",
"=",
"l",
".",
"find",
"(",
"'@endmeta'",
")",
"if",
"i",
">=",
"0",
":",
"s",
"+=",
"' '",
"+",
"l",
"[",
":",
"i",
"]",
"done",
"=",
"True",
"else",
":",
"s",
"+=",
"' '",
"+",
"l",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"s",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
"':'",
")",
",",
"s",
".",
"split",
"(",
")",
")",
"d",
"=",
"{",
"}",
"for",
"x",
"in",
"s",
":",
"if",
"len",
"(",
"x",
")",
">",
"2",
"or",
"len",
"(",
"x",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"'Syntax error in meta information.'",
")",
"elif",
"len",
"(",
"x",
")",
"==",
"2",
":",
"d",
"[",
"x",
"[",
"0",
"]",
"]",
"=",
"x",
"[",
"1",
"]",
"else",
":",
"d",
"[",
"x",
"[",
"0",
"]",
"]",
"=",
"None",
"return",
"d"
] | Scan file headers for @meta ... @endmeta information and store that into
a dictionary. | [
"Scan",
"file",
"headers",
"for"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/tools/meta.py#L14-L67 | train |
Atomistica/atomistica | src/python/atomistica/snippets.py | mic | def mic(dr, cell, pbc=None):
"""
Apply minimum image convention to an array of distance vectors.
"""
# Check where distance larger than 1/2 cell. Particles have crossed
# periodic boundaries then and need to be unwrapped.
rec = np.linalg.inv(cell)
if pbc is not None:
rec *= np.array(pbc, dtype=int).reshape(3,1)
dri = np.round(np.dot(dr, rec))
# Unwrap
return dr - np.dot(dri, cell) | python | def mic(dr, cell, pbc=None):
"""
Apply minimum image convention to an array of distance vectors.
"""
# Check where distance larger than 1/2 cell. Particles have crossed
# periodic boundaries then and need to be unwrapped.
rec = np.linalg.inv(cell)
if pbc is not None:
rec *= np.array(pbc, dtype=int).reshape(3,1)
dri = np.round(np.dot(dr, rec))
# Unwrap
return dr - np.dot(dri, cell) | [
"def",
"mic",
"(",
"dr",
",",
"cell",
",",
"pbc",
"=",
"None",
")",
":",
"# Check where distance larger than 1/2 cell. Particles have crossed",
"# periodic boundaries then and need to be unwrapped.",
"rec",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"cell",
")",
"if",
"pbc",
"is",
"not",
"None",
":",
"rec",
"*=",
"np",
".",
"array",
"(",
"pbc",
",",
"dtype",
"=",
"int",
")",
".",
"reshape",
"(",
"3",
",",
"1",
")",
"dri",
"=",
"np",
".",
"round",
"(",
"np",
".",
"dot",
"(",
"dr",
",",
"rec",
")",
")",
"# Unwrap",
"return",
"dr",
"-",
"np",
".",
"dot",
"(",
"dri",
",",
"cell",
")"
] | Apply minimum image convention to an array of distance vectors. | [
"Apply",
"minimum",
"image",
"convention",
"to",
"an",
"array",
"of",
"distance",
"vectors",
"."
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/snippets.py#L31-L43 | train |
Atomistica/atomistica | src/python/tools/a_run.py | s_from_dhms | def s_from_dhms(time):
"""return seconds from dhms"""
dhms_s = { 's' : 1, 'm' : 60, 'h' : 3600, 'd' : 86400 }
time = time.lower()
word_list = re.findall('\d*[^\d]*',time)
seconds=0
for word in word_list:
if word != '':
sec = 1
for t in list(dhms_s.keys()):
nw = word.replace(t,'')
if nw != word:
sec = dhms_s[t]
word = nw
break
try:
seconds += int(word) * sec
except:
raise RuntimeError('unknown format in timestring ' + time)
return seconds | python | def s_from_dhms(time):
"""return seconds from dhms"""
dhms_s = { 's' : 1, 'm' : 60, 'h' : 3600, 'd' : 86400 }
time = time.lower()
word_list = re.findall('\d*[^\d]*',time)
seconds=0
for word in word_list:
if word != '':
sec = 1
for t in list(dhms_s.keys()):
nw = word.replace(t,'')
if nw != word:
sec = dhms_s[t]
word = nw
break
try:
seconds += int(word) * sec
except:
raise RuntimeError('unknown format in timestring ' + time)
return seconds | [
"def",
"s_from_dhms",
"(",
"time",
")",
":",
"dhms_s",
"=",
"{",
"'s'",
":",
"1",
",",
"'m'",
":",
"60",
",",
"'h'",
":",
"3600",
",",
"'d'",
":",
"86400",
"}",
"time",
"=",
"time",
".",
"lower",
"(",
")",
"word_list",
"=",
"re",
".",
"findall",
"(",
"'\\d*[^\\d]*'",
",",
"time",
")",
"seconds",
"=",
"0",
"for",
"word",
"in",
"word_list",
":",
"if",
"word",
"!=",
"''",
":",
"sec",
"=",
"1",
"for",
"t",
"in",
"list",
"(",
"dhms_s",
".",
"keys",
"(",
")",
")",
":",
"nw",
"=",
"word",
".",
"replace",
"(",
"t",
",",
"''",
")",
"if",
"nw",
"!=",
"word",
":",
"sec",
"=",
"dhms_s",
"[",
"t",
"]",
"word",
"=",
"nw",
"break",
"try",
":",
"seconds",
"+=",
"int",
"(",
"word",
")",
"*",
"sec",
"except",
":",
"raise",
"RuntimeError",
"(",
"'unknown format in timestring '",
"+",
"time",
")",
"return",
"seconds"
] | return seconds from dhms | [
"return",
"seconds",
"from",
"dhms"
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/tools/a_run.py#L33-L52 | train |
Atomistica/atomistica | src/python/atomistica/join_calculators.py | JoinCalculators.get_stress | def get_stress(self, a):
"""Calculate stress tensor."""
s = np.zeros( 6, dtype=float )
for c in self.calcs:
s += c.get_stress(a)
return s | python | def get_stress(self, a):
"""Calculate stress tensor."""
s = np.zeros( 6, dtype=float )
for c in self.calcs:
s += c.get_stress(a)
return s | [
"def",
"get_stress",
"(",
"self",
",",
"a",
")",
":",
"s",
"=",
"np",
".",
"zeros",
"(",
"6",
",",
"dtype",
"=",
"float",
")",
"for",
"c",
"in",
"self",
".",
"calcs",
":",
"s",
"+=",
"c",
".",
"get_stress",
"(",
"a",
")",
"return",
"s"
] | Calculate stress tensor. | [
"Calculate",
"stress",
"tensor",
"."
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/join_calculators.py#L66-L71 | train |
Atomistica/atomistica | src/python/atomistica/join_calculators.py | JoinCalculators.set_atoms | def set_atoms(self, a):
"""Assign an atoms object."""
for c in self.calcs:
if hasattr(c, "set_atoms"):
c.set_atoms(a) | python | def set_atoms(self, a):
"""Assign an atoms object."""
for c in self.calcs:
if hasattr(c, "set_atoms"):
c.set_atoms(a) | [
"def",
"set_atoms",
"(",
"self",
",",
"a",
")",
":",
"for",
"c",
"in",
"self",
".",
"calcs",
":",
"if",
"hasattr",
"(",
"c",
",",
"\"set_atoms\"",
")",
":",
"c",
".",
"set_atoms",
"(",
"a",
")"
] | Assign an atoms object. | [
"Assign",
"an",
"atoms",
"object",
"."
] | 5ed79d776c92b91a566be22615bfb304ecc75db7 | https://github.com/Atomistica/atomistica/blob/5ed79d776c92b91a566be22615bfb304ecc75db7/src/python/atomistica/join_calculators.py#L79-L83 | train |
thieman/py-dag | dag/__init__.py | DAG.rename_edges | def rename_edges(self, old_task_name, new_task_name, graph=None):
""" Change references to a task in existing edges. """
if not graph:
graph = self.graph
for node, edges in graph.items():
if node == old_task_name:
graph[new_task_name] = copy(edges)
del graph[old_task_name]
else:
if old_task_name in edges:
edges.remove(old_task_name)
edges.add(new_task_name) | python | def rename_edges(self, old_task_name, new_task_name, graph=None):
""" Change references to a task in existing edges. """
if not graph:
graph = self.graph
for node, edges in graph.items():
if node == old_task_name:
graph[new_task_name] = copy(edges)
del graph[old_task_name]
else:
if old_task_name in edges:
edges.remove(old_task_name)
edges.add(new_task_name) | [
"def",
"rename_edges",
"(",
"self",
",",
"old_task_name",
",",
"new_task_name",
",",
"graph",
"=",
"None",
")",
":",
"if",
"not",
"graph",
":",
"graph",
"=",
"self",
".",
"graph",
"for",
"node",
",",
"edges",
"in",
"graph",
".",
"items",
"(",
")",
":",
"if",
"node",
"==",
"old_task_name",
":",
"graph",
"[",
"new_task_name",
"]",
"=",
"copy",
"(",
"edges",
")",
"del",
"graph",
"[",
"old_task_name",
"]",
"else",
":",
"if",
"old_task_name",
"in",
"edges",
":",
"edges",
".",
"remove",
"(",
"old_task_name",
")",
"edges",
".",
"add",
"(",
"new_task_name",
")"
] | Change references to a task in existing edges. | [
"Change",
"references",
"to",
"a",
"task",
"in",
"existing",
"edges",
"."
] | 5b5eed396c930751576bdf0d45907a665aac000b | https://github.com/thieman/py-dag/blob/5b5eed396c930751576bdf0d45907a665aac000b/dag/__init__.py#L77-L90 | train |
thieman/py-dag | dag/__init__.py | DAG.predecessors | def predecessors(self, node, graph=None):
""" Returns a list of all predecessors of the given node """
if graph is None:
graph = self.graph
return [key for key in graph if node in graph[key]] | python | def predecessors(self, node, graph=None):
""" Returns a list of all predecessors of the given node """
if graph is None:
graph = self.graph
return [key for key in graph if node in graph[key]] | [
"def",
"predecessors",
"(",
"self",
",",
"node",
",",
"graph",
"=",
"None",
")",
":",
"if",
"graph",
"is",
"None",
":",
"graph",
"=",
"self",
".",
"graph",
"return",
"[",
"key",
"for",
"key",
"in",
"graph",
"if",
"node",
"in",
"graph",
"[",
"key",
"]",
"]"
] | Returns a list of all predecessors of the given node | [
"Returns",
"a",
"list",
"of",
"all",
"predecessors",
"of",
"the",
"given",
"node"
] | 5b5eed396c930751576bdf0d45907a665aac000b | https://github.com/thieman/py-dag/blob/5b5eed396c930751576bdf0d45907a665aac000b/dag/__init__.py#L92-L96 | train |
buriburisuri/sugartensor | sugartensor/sg_initializer.py | constant | def constant(name, shape, value=0, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True):
r"""Creates a tensor variable of which initial values are `value` and shape is `shape`.
Args:
name: The name of new variable.
shape: A tuple/list of integers or an integer.
If shape is an integer, it is converted to a list.
value: A Python scalar. All elements of the initialized variable
will be set to this value. Default is 0.
dtype: The data type. Only floating point types are supported. Default is float32.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`.
"""
shape = shape if isinstance(shape, (tuple, list)) else [shape]
x = tf.get_variable(name, shape, dtype=dtype,
initializer=tf.constant_initializer(value),
regularizer=regularizer, trainable=trainable)
# add summary
if summary:
tf.sg_summary_param(x)
return x | python | def constant(name, shape, value=0, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True):
r"""Creates a tensor variable of which initial values are `value` and shape is `shape`.
Args:
name: The name of new variable.
shape: A tuple/list of integers or an integer.
If shape is an integer, it is converted to a list.
value: A Python scalar. All elements of the initialized variable
will be set to this value. Default is 0.
dtype: The data type. Only floating point types are supported. Default is float32.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`.
"""
shape = shape if isinstance(shape, (tuple, list)) else [shape]
x = tf.get_variable(name, shape, dtype=dtype,
initializer=tf.constant_initializer(value),
regularizer=regularizer, trainable=trainable)
# add summary
if summary:
tf.sg_summary_param(x)
return x | [
"def",
"constant",
"(",
"name",
",",
"shape",
",",
"value",
"=",
"0",
",",
"dtype",
"=",
"tf",
".",
"sg_floatx",
",",
"summary",
"=",
"True",
",",
"regularizer",
"=",
"None",
",",
"trainable",
"=",
"True",
")",
":",
"shape",
"=",
"shape",
"if",
"isinstance",
"(",
"shape",
",",
"(",
"tuple",
",",
"list",
")",
")",
"else",
"[",
"shape",
"]",
"x",
"=",
"tf",
".",
"get_variable",
"(",
"name",
",",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"tf",
".",
"constant_initializer",
"(",
"value",
")",
",",
"regularizer",
"=",
"regularizer",
",",
"trainable",
"=",
"trainable",
")",
"# add summary",
"if",
"summary",
":",
"tf",
".",
"sg_summary_param",
"(",
"x",
")",
"return",
"x"
] | r"""Creates a tensor variable of which initial values are `value` and shape is `shape`.
Args:
name: The name of new variable.
shape: A tuple/list of integers or an integer.
If shape is an integer, it is converted to a list.
value: A Python scalar. All elements of the initialized variable
will be set to this value. Default is 0.
dtype: The data type. Only floating point types are supported. Default is float32.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`. | [
"r",
"Creates",
"a",
"tensor",
"variable",
"of",
"which",
"initial",
"values",
"are",
"value",
"and",
"shape",
"is",
"shape",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_initializer.py#L10-L36 | train |
buriburisuri/sugartensor | sugartensor/sg_queue.py | sg_producer_func | def sg_producer_func(func):
r"""Decorates a function `func` as sg_producer_func.
Args:
func: A function to decorate.
"""
@wraps(func)
def wrapper(**kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
**kwargs:
source: A source queue list to enqueue
dtypes: Input data types of each tensor
out_dtypes: Output data types of each tensor ( If None, same as dtypes )
capacity: Queue capacity. Default is 32.
num_threads: Number of threads. Default is 1.
"""
# default option
opt = tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1)
# source queue list check
assert opt.source is not None, 'source is mandatory.'
if type(opt.source) is not list and type(opt.source) is not tuple:
opt.source = [opt.source]
if type(opt.dtypes) is not list and type(opt.dtypes) is not tuple:
opt.dtypes = [opt.dtypes]
# default out_dtypes
if opt.out_dtypes is None:
opt.out_dtypes = opt.dtypes
if type(opt.out_dtypes) is not list and type(opt.out_dtypes) is not tuple:
opt.out_dtypes = [opt.out_dtypes]
assert len(opt.source) == len(opt.dtypes), 'Source and dtypes should have same length.'
# enqueue function
def enqueue_func(sess, op):
# read data from source queue
data = func(sess.run(opt.source))
# create feeder dict
feed_dict = {}
for ph, col in zip(placeholders, data):
feed_dict[ph] = col
# run session
sess.run(op, feed_dict=feed_dict)
# create place holder list
placeholders = []
for dtype in opt.dtypes:
placeholders.append(tf.placeholder(dtype=dtype))
# create FIFO queue
queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes)
# enqueue operation
enqueue_op = queue.enqueue(placeholders)
# create queue runner
runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * opt.num_threads)
# register to global collection
tf.train.add_queue_runner(runner)
# return de-queue operation
return queue.dequeue()
return wrapper | python | def sg_producer_func(func):
r"""Decorates a function `func` as sg_producer_func.
Args:
func: A function to decorate.
"""
@wraps(func)
def wrapper(**kwargs):
r"""Manages arguments of `tf.sg_opt`.
Args:
**kwargs:
source: A source queue list to enqueue
dtypes: Input data types of each tensor
out_dtypes: Output data types of each tensor ( If None, same as dtypes )
capacity: Queue capacity. Default is 32.
num_threads: Number of threads. Default is 1.
"""
# default option
opt = tf.sg_opt(kwargs) + tf.sg_opt(dtypes=[tf.sg_floatx], capacity=32, num_threads=1)
# source queue list check
assert opt.source is not None, 'source is mandatory.'
if type(opt.source) is not list and type(opt.source) is not tuple:
opt.source = [opt.source]
if type(opt.dtypes) is not list and type(opt.dtypes) is not tuple:
opt.dtypes = [opt.dtypes]
# default out_dtypes
if opt.out_dtypes is None:
opt.out_dtypes = opt.dtypes
if type(opt.out_dtypes) is not list and type(opt.out_dtypes) is not tuple:
opt.out_dtypes = [opt.out_dtypes]
assert len(opt.source) == len(opt.dtypes), 'Source and dtypes should have same length.'
# enqueue function
def enqueue_func(sess, op):
# read data from source queue
data = func(sess.run(opt.source))
# create feeder dict
feed_dict = {}
for ph, col in zip(placeholders, data):
feed_dict[ph] = col
# run session
sess.run(op, feed_dict=feed_dict)
# create place holder list
placeholders = []
for dtype in opt.dtypes:
placeholders.append(tf.placeholder(dtype=dtype))
# create FIFO queue
queue = tf.FIFOQueue(opt.capacity, dtypes=opt.out_dtypes)
# enqueue operation
enqueue_op = queue.enqueue(placeholders)
# create queue runner
runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * opt.num_threads)
# register to global collection
tf.train.add_queue_runner(runner)
# return de-queue operation
return queue.dequeue()
return wrapper | [
"def",
"sg_producer_func",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"*",
"kwargs",
")",
":",
"r\"\"\"Manages arguments of `tf.sg_opt`.\n\n Args:\n **kwargs:\n source: A source queue list to enqueue\n dtypes: Input data types of each tensor\n out_dtypes: Output data types of each tensor ( If None, same as dtypes )\n capacity: Queue capacity. Default is 32.\n num_threads: Number of threads. Default is 1.\n \"\"\"",
"# default option",
"opt",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"+",
"tf",
".",
"sg_opt",
"(",
"dtypes",
"=",
"[",
"tf",
".",
"sg_floatx",
"]",
",",
"capacity",
"=",
"32",
",",
"num_threads",
"=",
"1",
")",
"# source queue list check",
"assert",
"opt",
".",
"source",
"is",
"not",
"None",
",",
"'source is mandatory.'",
"if",
"type",
"(",
"opt",
".",
"source",
")",
"is",
"not",
"list",
"and",
"type",
"(",
"opt",
".",
"source",
")",
"is",
"not",
"tuple",
":",
"opt",
".",
"source",
"=",
"[",
"opt",
".",
"source",
"]",
"if",
"type",
"(",
"opt",
".",
"dtypes",
")",
"is",
"not",
"list",
"and",
"type",
"(",
"opt",
".",
"dtypes",
")",
"is",
"not",
"tuple",
":",
"opt",
".",
"dtypes",
"=",
"[",
"opt",
".",
"dtypes",
"]",
"# default out_dtypes",
"if",
"opt",
".",
"out_dtypes",
"is",
"None",
":",
"opt",
".",
"out_dtypes",
"=",
"opt",
".",
"dtypes",
"if",
"type",
"(",
"opt",
".",
"out_dtypes",
")",
"is",
"not",
"list",
"and",
"type",
"(",
"opt",
".",
"out_dtypes",
")",
"is",
"not",
"tuple",
":",
"opt",
".",
"out_dtypes",
"=",
"[",
"opt",
".",
"out_dtypes",
"]",
"assert",
"len",
"(",
"opt",
".",
"source",
")",
"==",
"len",
"(",
"opt",
".",
"dtypes",
")",
",",
"'Source and dtypes should have same length.'",
"# enqueue function",
"def",
"enqueue_func",
"(",
"sess",
",",
"op",
")",
":",
"# read data from source queue",
"data",
"=",
"func",
"(",
"sess",
".",
"run",
"(",
"opt",
".",
"source",
")",
")",
"# create feeder dict",
"feed_dict",
"=",
"{",
"}",
"for",
"ph",
",",
"col",
"in",
"zip",
"(",
"placeholders",
",",
"data",
")",
":",
"feed_dict",
"[",
"ph",
"]",
"=",
"col",
"# run session",
"sess",
".",
"run",
"(",
"op",
",",
"feed_dict",
"=",
"feed_dict",
")",
"# create place holder list",
"placeholders",
"=",
"[",
"]",
"for",
"dtype",
"in",
"opt",
".",
"dtypes",
":",
"placeholders",
".",
"append",
"(",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"dtype",
")",
")",
"# create FIFO queue",
"queue",
"=",
"tf",
".",
"FIFOQueue",
"(",
"opt",
".",
"capacity",
",",
"dtypes",
"=",
"opt",
".",
"out_dtypes",
")",
"# enqueue operation",
"enqueue_op",
"=",
"queue",
".",
"enqueue",
"(",
"placeholders",
")",
"# create queue runner",
"runner",
"=",
"_FuncQueueRunner",
"(",
"enqueue_func",
",",
"queue",
",",
"[",
"enqueue_op",
"]",
"*",
"opt",
".",
"num_threads",
")",
"# register to global collection",
"tf",
".",
"train",
".",
"add_queue_runner",
"(",
"runner",
")",
"# return de-queue operation",
"return",
"queue",
".",
"dequeue",
"(",
")",
"return",
"wrapper"
] | r"""Decorates a function `func` as sg_producer_func.
Args:
func: A function to decorate. | [
"r",
"Decorates",
"a",
"function",
"func",
"as",
"sg_producer_func",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_queue.py#L11-L77 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_transpose | def sg_transpose(tensor, opt):
r"""Permutes the dimensions according to `opt.perm`.
See `tf.transpose()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
perm: A permutation of the dimensions of `tensor`. The target shape.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.perm is not None, 'perm is mandatory'
return tf.transpose(tensor, opt.perm, name=opt.name) | python | def sg_transpose(tensor, opt):
r"""Permutes the dimensions according to `opt.perm`.
See `tf.transpose()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
perm: A permutation of the dimensions of `tensor`. The target shape.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.perm is not None, 'perm is mandatory'
return tf.transpose(tensor, opt.perm, name=opt.name) | [
"def",
"sg_transpose",
"(",
"tensor",
",",
"opt",
")",
":",
"assert",
"opt",
".",
"perm",
"is",
"not",
"None",
",",
"'perm is mandatory'",
"return",
"tf",
".",
"transpose",
"(",
"tensor",
",",
"opt",
".",
"perm",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Permutes the dimensions according to `opt.perm`.
See `tf.transpose()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
perm: A permutation of the dimensions of `tensor`. The target shape.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Permutes",
"the",
"dimensions",
"according",
"to",
"opt",
".",
"perm",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L161-L176 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_argmin | def sg_argmin(tensor, opt):
r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1)
return tf.argmin(tensor, opt.axis, opt.name) | python | def sg_argmin(tensor, opt):
r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
opt += tf.sg_opt(axis=tensor.get_shape().ndims - 1)
return tf.argmin(tensor, opt.axis, opt.name) | [
"def",
"sg_argmin",
"(",
"tensor",
",",
"opt",
")",
":",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"axis",
"=",
"tensor",
".",
"get_shape",
"(",
")",
".",
"ndims",
"-",
"1",
")",
"return",
"tf",
".",
"argmin",
"(",
"tensor",
",",
"opt",
".",
"axis",
",",
"opt",
".",
"name",
")"
] | r"""Returns the indices of the minimum values along the specified axis.
See `tf.argin()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis: Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Returns",
"the",
"indices",
"of",
"the",
"minimum",
"values",
"along",
"the",
"specified",
"axis",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L199-L214 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_concat | def sg_concat(tensor, opt):
r"""Concatenates tensors along a axis.
See `tf.concat()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
target: A `Tensor`. Must have the same rank as `tensor`, and
all dimensions except `opt.dim` must be equal.
axis : Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.target is not None, 'target is mandatory.'
opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target]
return tf.concat([tensor] + target, opt.axis, name=opt.name) | python | def sg_concat(tensor, opt):
r"""Concatenates tensors along a axis.
See `tf.concat()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
target: A `Tensor`. Must have the same rank as `tensor`, and
all dimensions except `opt.dim` must be equal.
axis : Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.target is not None, 'target is mandatory.'
opt += tf.sg_opt(axis=tensor.get_shape().ndims-1)
target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target]
return tf.concat([tensor] + target, opt.axis, name=opt.name) | [
"def",
"sg_concat",
"(",
"tensor",
",",
"opt",
")",
":",
"assert",
"opt",
".",
"target",
"is",
"not",
"None",
",",
"'target is mandatory.'",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"axis",
"=",
"tensor",
".",
"get_shape",
"(",
")",
".",
"ndims",
"-",
"1",
")",
"target",
"=",
"opt",
".",
"target",
"if",
"isinstance",
"(",
"opt",
".",
"target",
",",
"(",
"tuple",
",",
"list",
")",
")",
"else",
"[",
"opt",
".",
"target",
"]",
"return",
"tf",
".",
"concat",
"(",
"[",
"tensor",
"]",
"+",
"target",
",",
"opt",
".",
"axis",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Concatenates tensors along a axis.
See `tf.concat()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
target: A `Tensor`. Must have the same rank as `tensor`, and
all dimensions except `opt.dim` must be equal.
axis : Target axis. Default is the last one.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Concatenates",
"tensors",
"along",
"a",
"axis",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L218-L237 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_log | def sg_log(tensor, opt):
r"""Log transform a dense tensor
See `tf.log()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.log(tensor + tf.sg_eps, name=opt.name) | python | def sg_log(tensor, opt):
r"""Log transform a dense tensor
See `tf.log()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.log(tensor + tf.sg_eps, name=opt.name) | [
"def",
"sg_log",
"(",
"tensor",
",",
"opt",
")",
":",
"return",
"tf",
".",
"log",
"(",
"tensor",
"+",
"tf",
".",
"sg_eps",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Log transform a dense tensor
See `tf.log()` in tensorflow.
Args:
tensor: A `Tensor` ( automatically given by chain )
opt:
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Log",
"transform",
"a",
"dense",
"tensor"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L281-L294 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_prod | def sg_prod(tensor, opt):
r"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | python | def sg_prod(tensor, opt):
r"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_prod(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | [
"def",
"sg_prod",
"(",
"tensor",
",",
"opt",
")",
":",
"return",
"tf",
".",
"reduce_prod",
"(",
"tensor",
",",
"axis",
"=",
"opt",
".",
"axis",
",",
"keep_dims",
"=",
"opt",
".",
"keep_dims",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Computes the product of elements across axis of a tensor.
See `tf.reduce_prod()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Computes",
"the",
"product",
"of",
"elements",
"across",
"axis",
"of",
"a",
"tensor",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L357-L372 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_min | def sg_min(tensor, opt):
r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | python | def sg_min(tensor, opt):
r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | [
"def",
"sg_min",
"(",
"tensor",
",",
"opt",
")",
":",
"return",
"tf",
".",
"reduce_min",
"(",
"tensor",
",",
"axis",
"=",
"opt",
".",
"axis",
",",
"keep_dims",
"=",
"opt",
".",
"keep_dims",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Computes",
"the",
"minimum",
"of",
"elements",
"across",
"axis",
"of",
"a",
"tensor",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L376-L391 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_max | def sg_max(tensor, opt):
r"""Computes the maximum of elements across axis of a tensor.
See `tf.reduce_max()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | python | def sg_max(tensor, opt):
r"""Computes the maximum of elements across axis of a tensor.
See `tf.reduce_max()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | [
"def",
"sg_max",
"(",
"tensor",
",",
"opt",
")",
":",
"return",
"tf",
".",
"reduce_max",
"(",
"tensor",
",",
"axis",
"=",
"opt",
".",
"axis",
",",
"keep_dims",
"=",
"opt",
".",
"keep_dims",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Computes the maximum of elements across axis of a tensor.
See `tf.reduce_max()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Computes",
"the",
"maximum",
"of",
"elements",
"across",
"axis",
"of",
"a",
"tensor",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L395-L410 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_any | def sg_any(tensor, opt):
r"""Computes the "logical or" of elements across axis of a tensor.
See `tf.reduce_any()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | python | def sg_any(tensor, opt):
r"""Computes the "logical or" of elements across axis of a tensor.
See `tf.reduce_any()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_any(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | [
"def",
"sg_any",
"(",
"tensor",
",",
"opt",
")",
":",
"return",
"tf",
".",
"reduce_any",
"(",
"tensor",
",",
"axis",
"=",
"opt",
".",
"axis",
",",
"keep_dims",
"=",
"opt",
".",
"keep_dims",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Computes the "logical or" of elements across axis of a tensor.
See `tf.reduce_any()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Computes",
"the",
"logical",
"or",
"of",
"elements",
"across",
"axis",
"of",
"a",
"tensor",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L433-L448 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_lookup | def sg_lookup(tensor, opt):
r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.emb is not None, 'emb is mandatory.'
return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name) | python | def sg_lookup(tensor, opt):
r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
assert opt.emb is not None, 'emb is mandatory.'
return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name) | [
"def",
"sg_lookup",
"(",
"tensor",
",",
"opt",
")",
":",
"assert",
"opt",
".",
"emb",
"is",
"not",
"None",
",",
"'emb is mandatory.'",
"return",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"opt",
".",
"emb",
",",
"tensor",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Looks up the `tensor`, which is the embedding matrix.
Args:
tensor: A tensor ( automatically given by chain )
opt:
emb: A 2-D `Tensor`. An embedding matrix.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. | [
"r",
"Looks",
"up",
"the",
"tensor",
"which",
"is",
"the",
"embedding",
"matrix",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L533-L547 | train |
buriburisuri/sugartensor | sugartensor/sg_transform.py | sg_reverse_seq | def sg_reverse_seq(tensor, opt):
r"""Reverses variable length slices.
Before applying the pure tensorflow function tf.reverse_sequence,
this function calculates sequence lengths by counting non-zeros.
For example,
```
tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]]
tensor.sg_reverse_seq()
=> [[3 2 1 0 0]
[5 4 0 0 0]]
```
Args:
tensor: A 2-D `Tensor` (automatically given by chain).
opt:
axis: Axis to reverse. Default is 1.
name : If provided, it replaces current tensor's name.
Returns:
A `Tensor` with the same shape and type as `tensor`.
"""
# default sequence dimension
opt += tf.sg_opt(axis=1)
seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis)
return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name) | python | def sg_reverse_seq(tensor, opt):
r"""Reverses variable length slices.
Before applying the pure tensorflow function tf.reverse_sequence,
this function calculates sequence lengths by counting non-zeros.
For example,
```
tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]]
tensor.sg_reverse_seq()
=> [[3 2 1 0 0]
[5 4 0 0 0]]
```
Args:
tensor: A 2-D `Tensor` (automatically given by chain).
opt:
axis: Axis to reverse. Default is 1.
name : If provided, it replaces current tensor's name.
Returns:
A `Tensor` with the same shape and type as `tensor`.
"""
# default sequence dimension
opt += tf.sg_opt(axis=1)
seq_len = tf.not_equal(tensor, tf.zeros_like(tensor)).sg_int().sg_sum(axis=opt.axis)
return tf.reverse_sequence(tensor, seq_len, opt.axis, name=opt.name) | [
"def",
"sg_reverse_seq",
"(",
"tensor",
",",
"opt",
")",
":",
"# default sequence dimension",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"axis",
"=",
"1",
")",
"seq_len",
"=",
"tf",
".",
"not_equal",
"(",
"tensor",
",",
"tf",
".",
"zeros_like",
"(",
"tensor",
")",
")",
".",
"sg_int",
"(",
")",
".",
"sg_sum",
"(",
"axis",
"=",
"opt",
".",
"axis",
")",
"return",
"tf",
".",
"reverse_sequence",
"(",
"tensor",
",",
"seq_len",
",",
"opt",
".",
"axis",
",",
"name",
"=",
"opt",
".",
"name",
")"
] | r"""Reverses variable length slices.
Before applying the pure tensorflow function tf.reverse_sequence,
this function calculates sequence lengths by counting non-zeros.
For example,
```
tensor = [[1, 2, 3, 0, 0], [4, 5, 0, 0, 0]]
tensor.sg_reverse_seq()
=> [[3 2 1 0 0]
[5 4 0 0 0]]
```
Args:
tensor: A 2-D `Tensor` (automatically given by chain).
opt:
axis: Axis to reverse. Default is 1.
name : If provided, it replaces current tensor's name.
Returns:
A `Tensor` with the same shape and type as `tensor`. | [
"r",
"Reverses",
"variable",
"length",
"slices",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L551-L578 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_gpus | def sg_gpus():
r""" Gets current available GPU nums
Returns:
A integer : total # of GPUs available
"""
global _gpus
if _gpus is None:
local_device_protos = device_lib.list_local_devices()
_gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU'])
return max(_gpus, 1) | python | def sg_gpus():
r""" Gets current available GPU nums
Returns:
A integer : total # of GPUs available
"""
global _gpus
if _gpus is None:
local_device_protos = device_lib.list_local_devices()
_gpus = len([x.name for x in local_device_protos if x.device_type == 'GPU'])
return max(_gpus, 1) | [
"def",
"sg_gpus",
"(",
")",
":",
"global",
"_gpus",
"if",
"_gpus",
"is",
"None",
":",
"local_device_protos",
"=",
"device_lib",
".",
"list_local_devices",
"(",
")",
"_gpus",
"=",
"len",
"(",
"[",
"x",
".",
"name",
"for",
"x",
"in",
"local_device_protos",
"if",
"x",
".",
"device_type",
"==",
"'GPU'",
"]",
")",
"return",
"max",
"(",
"_gpus",
",",
"1",
")"
] | r""" Gets current available GPU nums
Returns:
A integer : total # of GPUs available | [
"r",
"Gets",
"current",
"available",
"GPU",
"nums"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L64-L76 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_context | def sg_context(**kwargs):
r"""Context helper for computational graph building.
Makes all elements within the with Block share the parameters.
For example, in the following example, the default value of parameter `bn` will be set to True
in the all layers within the with block.
```
with tf.sg_context(bn=True):
...
...
```
Args:
**kwargs:
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
bn: Boolean. If True, batch normalization is applied.
ln: Boolean. If True, layer normalization is applied.
dout: A float of range [0, 100). A dropout rate. Default is 0..
bias: Boolean. If True (Default), biases are added.
name: A name for the layer. By default, the function name is assigned.
act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
Returns:
None
"""
global _context
# set options when enter
context_now = tf.sg_opt(kwargs)
_context += [context_now]
# if named context
if context_now.name:
context_now.scope_name = context_now.name
context_now.name = None
with tf.variable_scope(context_now.scope_name):
yield
else:
yield
# clear options when exit
del _context[-1] | python | def sg_context(**kwargs):
r"""Context helper for computational graph building.
Makes all elements within the with Block share the parameters.
For example, in the following example, the default value of parameter `bn` will be set to True
in the all layers within the with block.
```
with tf.sg_context(bn=True):
...
...
```
Args:
**kwargs:
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
bn: Boolean. If True, batch normalization is applied.
ln: Boolean. If True, layer normalization is applied.
dout: A float of range [0, 100). A dropout rate. Default is 0..
bias: Boolean. If True (Default), biases are added.
name: A name for the layer. By default, the function name is assigned.
act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
Returns:
None
"""
global _context
# set options when enter
context_now = tf.sg_opt(kwargs)
_context += [context_now]
# if named context
if context_now.name:
context_now.scope_name = context_now.name
context_now.name = None
with tf.variable_scope(context_now.scope_name):
yield
else:
yield
# clear options when exit
del _context[-1] | [
"def",
"sg_context",
"(",
"*",
"*",
"kwargs",
")",
":",
"global",
"_context",
"# set options when enter",
"context_now",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"_context",
"+=",
"[",
"context_now",
"]",
"# if named context",
"if",
"context_now",
".",
"name",
":",
"context_now",
".",
"scope_name",
"=",
"context_now",
".",
"name",
"context_now",
".",
"name",
"=",
"None",
"with",
"tf",
".",
"variable_scope",
"(",
"context_now",
".",
"scope_name",
")",
":",
"yield",
"else",
":",
"yield",
"# clear options when exit",
"del",
"_context",
"[",
"-",
"1",
"]"
] | r"""Context helper for computational graph building.
Makes all elements within the with Block share the parameters.
For example, in the following example, the default value of parameter `bn` will be set to True
in the all layers within the with block.
```
with tf.sg_context(bn=True):
...
...
```
Args:
**kwargs:
in_dim: An integer. The size of input dimension, which is set to the last one by default.
dim: An integer. The size of output dimension. Has the same value as in_dim by default.
bn: Boolean. If True, batch normalization is applied.
ln: Boolean. If True, layer normalization is applied.
dout: A float of range [0, 100). A dropout rate. Default is 0..
bias: Boolean. If True (Default), biases are added.
name: A name for the layer. By default, the function name is assigned.
act: A name of activation function. e.g., `sigmoid`, `tanh`, etc.
reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope
as well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
Returns:
None | [
"r",
"Context",
"helper",
"for",
"computational",
"graph",
"building",
".",
"Makes",
"all",
"elements",
"within",
"the",
"with",
"Block",
"share",
"the",
"parameters",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L87-L132 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_get_context | def sg_get_context():
r"""Get current context information
Returns:
tf.sg_opt class object which contains all context information
"""
global _context
# merge current context
res = tf.sg_opt()
for c in _context:
res += c
return res | python | def sg_get_context():
r"""Get current context information
Returns:
tf.sg_opt class object which contains all context information
"""
global _context
# merge current context
res = tf.sg_opt()
for c in _context:
res += c
return res | [
"def",
"sg_get_context",
"(",
")",
":",
"global",
"_context",
"# merge current context",
"res",
"=",
"tf",
".",
"sg_opt",
"(",
")",
"for",
"c",
"in",
"_context",
":",
"res",
"+=",
"c",
"return",
"res"
] | r"""Get current context information
Returns:
tf.sg_opt class object which contains all context information | [
"r",
"Get",
"current",
"context",
"information"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L135-L149 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_sugar_func | def sg_sugar_func(func):
r""" Decorates a function `func` so that it can be a sugar function.
Sugar function can be used in a chainable manner.
Args:
func: function to decorate
Returns:
A sugar function.
"""
@wraps(func)
def wrapper(tensor, **kwargs):
# call sugar function
out = func(tensor, tf.sg_opt(kwargs))
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs)+sg_get_context(), prev=tensor)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper | python | def sg_sugar_func(func):
r""" Decorates a function `func` so that it can be a sugar function.
Sugar function can be used in a chainable manner.
Args:
func: function to decorate
Returns:
A sugar function.
"""
@wraps(func)
def wrapper(tensor, **kwargs):
# call sugar function
out = func(tensor, tf.sg_opt(kwargs))
# save node info for reuse
out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs)+sg_get_context(), prev=tensor)
# inject reuse function
out.sg_reuse = types.MethodType(sg_reuse, out)
return out
return wrapper | [
"def",
"sg_sugar_func",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"tensor",
",",
"*",
"*",
"kwargs",
")",
":",
"# call sugar function",
"out",
"=",
"func",
"(",
"tensor",
",",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
")",
"# save node info for reuse",
"out",
".",
"_sugar",
"=",
"tf",
".",
"sg_opt",
"(",
"func",
"=",
"func",
",",
"arg",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"+",
"sg_get_context",
"(",
")",
",",
"prev",
"=",
"tensor",
")",
"# inject reuse function",
"out",
".",
"sg_reuse",
"=",
"types",
".",
"MethodType",
"(",
"sg_reuse",
",",
"out",
")",
"return",
"out",
"return",
"wrapper"
] | r""" Decorates a function `func` so that it can be a sugar function.
Sugar function can be used in a chainable manner.
Args:
func: function to decorate
Returns:
A sugar function. | [
"r",
"Decorates",
"a",
"function",
"func",
"so",
"that",
"it",
"can",
"be",
"a",
"sugar",
"function",
".",
"Sugar",
"function",
"can",
"be",
"used",
"in",
"a",
"chainable",
"manner",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L156-L177 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_reuse | def sg_reuse(tensor, **opt):
r""" Reconstruct computational graph of `tensor` so all the parameters
can be reused and replace its input tensor with `opt.input`.
Args:
tensor: A `Tensor` (automatically given by chaining).
**opt:
input: A `Tensor` that will replace the original input tensor.
Returns:
Reconstructed tensor nodes.
"""
opt = tf.sg_opt(opt)
assert hasattr(tensor, '_sugar'), 'cannot reuse this node.'
assert opt.input is not None, 'input is mandatory.'
# get all nodes in this graph
nodes, prev = [tensor], tensor._sugar.prev
while prev is not None:
nodes = [prev] + nodes
prev = prev._sugar.prev if hasattr(prev, '_sugar') else None
# create graph again for this input
out = opt.input
for node in nodes[1:]: # exclude head node
if node._sugar.is_layer:
fn = tf.sg_layer_func(node._sugar.func)
if node._sugar.arg.scope_name:
with tf.variable_scope(node._sugar.arg.scope_name):
out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))
else:
out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))
else:
out = node._sugar.func(out, node._sugar.arg)
return out | python | def sg_reuse(tensor, **opt):
r""" Reconstruct computational graph of `tensor` so all the parameters
can be reused and replace its input tensor with `opt.input`.
Args:
tensor: A `Tensor` (automatically given by chaining).
**opt:
input: A `Tensor` that will replace the original input tensor.
Returns:
Reconstructed tensor nodes.
"""
opt = tf.sg_opt(opt)
assert hasattr(tensor, '_sugar'), 'cannot reuse this node.'
assert opt.input is not None, 'input is mandatory.'
# get all nodes in this graph
nodes, prev = [tensor], tensor._sugar.prev
while prev is not None:
nodes = [prev] + nodes
prev = prev._sugar.prev if hasattr(prev, '_sugar') else None
# create graph again for this input
out = opt.input
for node in nodes[1:]: # exclude head node
if node._sugar.is_layer:
fn = tf.sg_layer_func(node._sugar.func)
if node._sugar.arg.scope_name:
with tf.variable_scope(node._sugar.arg.scope_name):
out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))
else:
out = fn(out, **(node._sugar.arg + tf.sg_opt(name=node._sugar.name, reuse=True)))
else:
out = node._sugar.func(out, node._sugar.arg)
return out | [
"def",
"sg_reuse",
"(",
"tensor",
",",
"*",
"*",
"opt",
")",
":",
"opt",
"=",
"tf",
".",
"sg_opt",
"(",
"opt",
")",
"assert",
"hasattr",
"(",
"tensor",
",",
"'_sugar'",
")",
",",
"'cannot reuse this node.'",
"assert",
"opt",
".",
"input",
"is",
"not",
"None",
",",
"'input is mandatory.'",
"# get all nodes in this graph",
"nodes",
",",
"prev",
"=",
"[",
"tensor",
"]",
",",
"tensor",
".",
"_sugar",
".",
"prev",
"while",
"prev",
"is",
"not",
"None",
":",
"nodes",
"=",
"[",
"prev",
"]",
"+",
"nodes",
"prev",
"=",
"prev",
".",
"_sugar",
".",
"prev",
"if",
"hasattr",
"(",
"prev",
",",
"'_sugar'",
")",
"else",
"None",
"# create graph again for this input",
"out",
"=",
"opt",
".",
"input",
"for",
"node",
"in",
"nodes",
"[",
"1",
":",
"]",
":",
"# exclude head node",
"if",
"node",
".",
"_sugar",
".",
"is_layer",
":",
"fn",
"=",
"tf",
".",
"sg_layer_func",
"(",
"node",
".",
"_sugar",
".",
"func",
")",
"if",
"node",
".",
"_sugar",
".",
"arg",
".",
"scope_name",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"node",
".",
"_sugar",
".",
"arg",
".",
"scope_name",
")",
":",
"out",
"=",
"fn",
"(",
"out",
",",
"*",
"*",
"(",
"node",
".",
"_sugar",
".",
"arg",
"+",
"tf",
".",
"sg_opt",
"(",
"name",
"=",
"node",
".",
"_sugar",
".",
"name",
",",
"reuse",
"=",
"True",
")",
")",
")",
"else",
":",
"out",
"=",
"fn",
"(",
"out",
",",
"*",
"*",
"(",
"node",
".",
"_sugar",
".",
"arg",
"+",
"tf",
".",
"sg_opt",
"(",
"name",
"=",
"node",
".",
"_sugar",
".",
"name",
",",
"reuse",
"=",
"True",
")",
")",
")",
"else",
":",
"out",
"=",
"node",
".",
"_sugar",
".",
"func",
"(",
"out",
",",
"node",
".",
"_sugar",
".",
"arg",
")",
"return",
"out"
] | r""" Reconstruct computational graph of `tensor` so all the parameters
can be reused and replace its input tensor with `opt.input`.
Args:
tensor: A `Tensor` (automatically given by chaining).
**opt:
input: A `Tensor` that will replace the original input tensor.
Returns:
Reconstructed tensor nodes. | [
"r",
"Reconstruct",
"computational",
"graph",
"of",
"tensor",
"so",
"all",
"the",
"parameters",
"can",
"be",
"reused",
"and",
"replace",
"its",
"input",
"tensor",
"with",
"opt",
".",
"input",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L452-L487 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_input | def sg_input(shape=None, dtype=sg_floatx, name=None):
r"""Creates a placeholder.
Args:
shape: A tuple/list of integers. If an integers is given, it will turn to a list.
dtype: A data type. Default is float32.
name: A name for the placeholder.
Returns:
A wrapped placeholder `Tensor`.
"""
if shape is None:
return tf.placeholder(dtype, shape=None, name=name)
else:
if not isinstance(shape, (list, tuple)):
shape = [shape]
return tf.placeholder(dtype, shape=[None] + list(shape), name=name) | python | def sg_input(shape=None, dtype=sg_floatx, name=None):
r"""Creates a placeholder.
Args:
shape: A tuple/list of integers. If an integers is given, it will turn to a list.
dtype: A data type. Default is float32.
name: A name for the placeholder.
Returns:
A wrapped placeholder `Tensor`.
"""
if shape is None:
return tf.placeholder(dtype, shape=None, name=name)
else:
if not isinstance(shape, (list, tuple)):
shape = [shape]
return tf.placeholder(dtype, shape=[None] + list(shape), name=name) | [
"def",
"sg_input",
"(",
"shape",
"=",
"None",
",",
"dtype",
"=",
"sg_floatx",
",",
"name",
"=",
"None",
")",
":",
"if",
"shape",
"is",
"None",
":",
"return",
"tf",
".",
"placeholder",
"(",
"dtype",
",",
"shape",
"=",
"None",
",",
"name",
"=",
"name",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"shape",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"shape",
"=",
"[",
"shape",
"]",
"return",
"tf",
".",
"placeholder",
"(",
"dtype",
",",
"shape",
"=",
"[",
"None",
"]",
"+",
"list",
"(",
"shape",
")",
",",
"name",
"=",
"name",
")"
] | r"""Creates a placeholder.
Args:
shape: A tuple/list of integers. If an integers is given, it will turn to a list.
dtype: A data type. Default is float32.
name: A name for the placeholder.
Returns:
A wrapped placeholder `Tensor`. | [
"r",
"Creates",
"a",
"placeholder",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L494-L510 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_inject | def sg_inject(path, mod_name):
r"""Converts all functions in the given Python module to sugar functions
so that they can be used in a chainable manner.
Args:
path: A string. Path to the Python module
mod_name: A string. The name of the Python module to inject.
Returns:
None
"""
# import module
import sys
if path not in list(sys.path):
sys.path.append(path)
globals()[mod_name] = importlib.import_module(mod_name)
# find functions
for func_name in dir(globals()[mod_name]):
if isinstance(globals()[mod_name].__dict__.get(func_name), types.FunctionType):
if not func_name.startswith('_'):
# inject to tf.Variable type
exec('tf.Variable.%s = %s.%s' % (func_name, mod_name, func_name))
# inject to tf.Tensor type
exec('tf.Tensor.%s = %s.%s' % (func_name, mod_name, func_name)) | python | def sg_inject(path, mod_name):
r"""Converts all functions in the given Python module to sugar functions
so that they can be used in a chainable manner.
Args:
path: A string. Path to the Python module
mod_name: A string. The name of the Python module to inject.
Returns:
None
"""
# import module
import sys
if path not in list(sys.path):
sys.path.append(path)
globals()[mod_name] = importlib.import_module(mod_name)
# find functions
for func_name in dir(globals()[mod_name]):
if isinstance(globals()[mod_name].__dict__.get(func_name), types.FunctionType):
if not func_name.startswith('_'):
# inject to tf.Variable type
exec('tf.Variable.%s = %s.%s' % (func_name, mod_name, func_name))
# inject to tf.Tensor type
exec('tf.Tensor.%s = %s.%s' % (func_name, mod_name, func_name)) | [
"def",
"sg_inject",
"(",
"path",
",",
"mod_name",
")",
":",
"# import module",
"import",
"sys",
"if",
"path",
"not",
"in",
"list",
"(",
"sys",
".",
"path",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"path",
")",
"globals",
"(",
")",
"[",
"mod_name",
"]",
"=",
"importlib",
".",
"import_module",
"(",
"mod_name",
")",
"# find functions",
"for",
"func_name",
"in",
"dir",
"(",
"globals",
"(",
")",
"[",
"mod_name",
"]",
")",
":",
"if",
"isinstance",
"(",
"globals",
"(",
")",
"[",
"mod_name",
"]",
".",
"__dict__",
".",
"get",
"(",
"func_name",
")",
",",
"types",
".",
"FunctionType",
")",
":",
"if",
"not",
"func_name",
".",
"startswith",
"(",
"'_'",
")",
":",
"# inject to tf.Variable type",
"exec",
"(",
"'tf.Variable.%s = %s.%s'",
"%",
"(",
"func_name",
",",
"mod_name",
",",
"func_name",
")",
")",
"# inject to tf.Tensor type",
"exec",
"(",
"'tf.Tensor.%s = %s.%s'",
"%",
"(",
"func_name",
",",
"mod_name",
",",
"func_name",
")",
")"
] | r"""Converts all functions in the given Python module to sugar functions
so that they can be used in a chainable manner.
Args:
path: A string. Path to the Python module
mod_name: A string. The name of the Python module to inject.
Returns:
None | [
"r",
"Converts",
"all",
"functions",
"in",
"the",
"given",
"Python",
"module",
"to",
"sugar",
"functions",
"so",
"that",
"they",
"can",
"be",
"used",
"in",
"a",
"chainable",
"manner",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L517-L540 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_queue_context | def sg_queue_context(sess=None):
r"""Context helper for queue routines.
Args:
sess: A session to open queues. If not specified, a new session is created.
Returns:
None
"""
# default session
sess = tf.get_default_session() if sess is None else sess
# thread coordinator
coord = tf.train.Coordinator()
try:
# start queue thread
threads = tf.train.start_queue_runners(sess, coord)
yield
finally:
# stop queue thread
coord.request_stop()
# wait thread to exit.
coord.join(threads) | python | def sg_queue_context(sess=None):
r"""Context helper for queue routines.
Args:
sess: A session to open queues. If not specified, a new session is created.
Returns:
None
"""
# default session
sess = tf.get_default_session() if sess is None else sess
# thread coordinator
coord = tf.train.Coordinator()
try:
# start queue thread
threads = tf.train.start_queue_runners(sess, coord)
yield
finally:
# stop queue thread
coord.request_stop()
# wait thread to exit.
coord.join(threads) | [
"def",
"sg_queue_context",
"(",
"sess",
"=",
"None",
")",
":",
"# default session",
"sess",
"=",
"tf",
".",
"get_default_session",
"(",
")",
"if",
"sess",
"is",
"None",
"else",
"sess",
"# thread coordinator",
"coord",
"=",
"tf",
".",
"train",
".",
"Coordinator",
"(",
")",
"try",
":",
"# start queue thread",
"threads",
"=",
"tf",
".",
"train",
".",
"start_queue_runners",
"(",
"sess",
",",
"coord",
")",
"yield",
"finally",
":",
"# stop queue thread",
"coord",
".",
"request_stop",
"(",
")",
"# wait thread to exit.",
"coord",
".",
"join",
"(",
"threads",
")"
] | r"""Context helper for queue routines.
Args:
sess: A session to open queues. If not specified, a new session is created.
Returns:
None | [
"r",
"Context",
"helper",
"for",
"queue",
"routines",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L565-L588 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_arg | def sg_arg():
r"""Gets current command line options
Returns:
tf.sg_opt instance that is updated with current commandd line options.
"""
if not tf.app.flags.FLAGS.__dict__['__parsed']:
tf.app.flags.FLAGS._parse_flags()
return tf.sg_opt(tf.app.flags.FLAGS.__dict__['__flags']) | python | def sg_arg():
r"""Gets current command line options
Returns:
tf.sg_opt instance that is updated with current commandd line options.
"""
if not tf.app.flags.FLAGS.__dict__['__parsed']:
tf.app.flags.FLAGS._parse_flags()
return tf.sg_opt(tf.app.flags.FLAGS.__dict__['__flags']) | [
"def",
"sg_arg",
"(",
")",
":",
"if",
"not",
"tf",
".",
"app",
".",
"flags",
".",
"FLAGS",
".",
"__dict__",
"[",
"'__parsed'",
"]",
":",
"tf",
".",
"app",
".",
"flags",
".",
"FLAGS",
".",
"_parse_flags",
"(",
")",
"return",
"tf",
".",
"sg_opt",
"(",
"tf",
".",
"app",
".",
"flags",
".",
"FLAGS",
".",
"__dict__",
"[",
"'__flags'",
"]",
")"
] | r"""Gets current command line options
Returns:
tf.sg_opt instance that is updated with current commandd line options. | [
"r",
"Gets",
"current",
"command",
"line",
"options"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L632-L640 | train |
buriburisuri/sugartensor | sugartensor/sg_main.py | sg_arg_def | def sg_arg_def(**kwargs):
r"""Defines command line options
Args:
**kwargs:
key: A name for the option.
value : Default value or a tuple of (default value, description).
Returns:
None
For example,
```
# Either of the following two lines will define `--n_epoch` command line argument and set its default value as 1.
tf.sg_arg_def(n_epoch=1)
tf.sg_arg_def(n_epoch=(1, 'total number of epochs'))
```
"""
for k, v in kwargs.items():
if type(v) is tuple or type(v) is list:
v, c = v[0], v[1]
else:
c = k
if type(v) is str:
tf.app.flags.DEFINE_string(k, v, c)
elif type(v) is int:
tf.app.flags.DEFINE_integer(k, v, c)
elif type(v) is float:
tf.app.flags.DEFINE_float(k, v, c)
elif type(v) is bool:
tf.app.flags.DEFINE_bool(k, v, c) | python | def sg_arg_def(**kwargs):
r"""Defines command line options
Args:
**kwargs:
key: A name for the option.
value : Default value or a tuple of (default value, description).
Returns:
None
For example,
```
# Either of the following two lines will define `--n_epoch` command line argument and set its default value as 1.
tf.sg_arg_def(n_epoch=1)
tf.sg_arg_def(n_epoch=(1, 'total number of epochs'))
```
"""
for k, v in kwargs.items():
if type(v) is tuple or type(v) is list:
v, c = v[0], v[1]
else:
c = k
if type(v) is str:
tf.app.flags.DEFINE_string(k, v, c)
elif type(v) is int:
tf.app.flags.DEFINE_integer(k, v, c)
elif type(v) is float:
tf.app.flags.DEFINE_float(k, v, c)
elif type(v) is bool:
tf.app.flags.DEFINE_bool(k, v, c) | [
"def",
"sg_arg_def",
"(",
"*",
"*",
"kwargs",
")",
":",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"v",
")",
"is",
"tuple",
"or",
"type",
"(",
"v",
")",
"is",
"list",
":",
"v",
",",
"c",
"=",
"v",
"[",
"0",
"]",
",",
"v",
"[",
"1",
"]",
"else",
":",
"c",
"=",
"k",
"if",
"type",
"(",
"v",
")",
"is",
"str",
":",
"tf",
".",
"app",
".",
"flags",
".",
"DEFINE_string",
"(",
"k",
",",
"v",
",",
"c",
")",
"elif",
"type",
"(",
"v",
")",
"is",
"int",
":",
"tf",
".",
"app",
".",
"flags",
".",
"DEFINE_integer",
"(",
"k",
",",
"v",
",",
"c",
")",
"elif",
"type",
"(",
"v",
")",
"is",
"float",
":",
"tf",
".",
"app",
".",
"flags",
".",
"DEFINE_float",
"(",
"k",
",",
"v",
",",
"c",
")",
"elif",
"type",
"(",
"v",
")",
"is",
"bool",
":",
"tf",
".",
"app",
".",
"flags",
".",
"DEFINE_bool",
"(",
"k",
",",
"v",
",",
"c",
")"
] | r"""Defines command line options
Args:
**kwargs:
key: A name for the option.
value : Default value or a tuple of (default value, description).
Returns:
None
For example,
```
# Either of the following two lines will define `--n_epoch` command line argument and set its default value as 1.
tf.sg_arg_def(n_epoch=1)
tf.sg_arg_def(n_epoch=(1, 'total number of epochs'))
``` | [
"r",
"Defines",
"command",
"line",
"options"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_main.py#L643-L675 | train |
buriburisuri/sugartensor | sugartensor/sg_logging.py | sg_summary_loss | def sg_summary_loss(tensor, prefix='losses', name=None):
r"""Register `tensor` to summary report as `loss`
Args:
tensor: A `Tensor` to log as loss
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name, tf.reduce_mean(tensor))
_histogram(name + '-h', tensor) | python | def sg_summary_loss(tensor, prefix='losses', name=None):
r"""Register `tensor` to summary report as `loss`
Args:
tensor: A `Tensor` to log as loss
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name, tf.reduce_mean(tensor))
_histogram(name + '-h', tensor) | [
"def",
"sg_summary_loss",
"(",
"tensor",
",",
"prefix",
"=",
"'losses'",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pretty_name",
"(",
"tensor",
")",
"if",
"name",
"is",
"None",
"else",
"prefix",
"+",
"name",
"# summary statistics",
"_scalar",
"(",
"name",
",",
"tf",
".",
"reduce_mean",
"(",
"tensor",
")",
")",
"_histogram",
"(",
"name",
"+",
"'-h'",
",",
"tensor",
")"
] | r"""Register `tensor` to summary report as `loss`
Args:
tensor: A `Tensor` to log as loss
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | [
"r",
"Register",
"tensor",
"to",
"summary",
"report",
"as",
"loss"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L19-L36 | train |
buriburisuri/sugartensor | sugartensor/sg_logging.py | sg_summary_gradient | def sg_summary_gradient(tensor, gradient, prefix=None, name=None):
r"""Register `tensor` to summary report as `gradient`
Args:
tensor: A `Tensor` to log as gradient
gradient: A 0-D `Tensor`. A gradient to log
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
# noinspection PyBroadException
_scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient)))
_histogram(name + '/grad-h', tf.abs(gradient)) | python | def sg_summary_gradient(tensor, gradient, prefix=None, name=None):
r"""Register `tensor` to summary report as `gradient`
Args:
tensor: A `Tensor` to log as gradient
gradient: A 0-D `Tensor`. A gradient to log
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
# noinspection PyBroadException
_scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient)))
_histogram(name + '/grad-h', tf.abs(gradient)) | [
"def",
"sg_summary_gradient",
"(",
"tensor",
",",
"gradient",
",",
"prefix",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pretty_name",
"(",
"tensor",
")",
"if",
"name",
"is",
"None",
"else",
"prefix",
"+",
"name",
"# summary statistics",
"# noinspection PyBroadException",
"_scalar",
"(",
"name",
"+",
"'/grad'",
",",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"abs",
"(",
"gradient",
")",
")",
")",
"_histogram",
"(",
"name",
"+",
"'/grad-h'",
",",
"tf",
".",
"abs",
"(",
"gradient",
")",
")"
] | r"""Register `tensor` to summary report as `gradient`
Args:
tensor: A `Tensor` to log as gradient
gradient: A 0-D `Tensor`. A gradient to log
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | [
"r",
"Register",
"tensor",
"to",
"summary",
"report",
"as",
"gradient"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L59-L78 | train |
buriburisuri/sugartensor | sugartensor/sg_logging.py | sg_summary_activation | def sg_summary_activation(tensor, prefix=None, name=None):
r"""Register `tensor` to summary report as `activation`
Args:
tensor: A `Tensor` to log as activation
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name + '/ratio',
tf.reduce_mean(tf.cast(tf.greater(tensor, 0), tf.sg_floatx)))
_histogram(name + '/ratio-h', tensor) | python | def sg_summary_activation(tensor, prefix=None, name=None):
r"""Register `tensor` to summary report as `activation`
Args:
tensor: A `Tensor` to log as activation
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name + '/ratio',
tf.reduce_mean(tf.cast(tf.greater(tensor, 0), tf.sg_floatx)))
_histogram(name + '/ratio-h', tensor) | [
"def",
"sg_summary_activation",
"(",
"tensor",
",",
"prefix",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pretty_name",
"(",
"tensor",
")",
"if",
"name",
"is",
"None",
"else",
"prefix",
"+",
"name",
"# summary statistics",
"_scalar",
"(",
"name",
"+",
"'/ratio'",
",",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"cast",
"(",
"tf",
".",
"greater",
"(",
"tensor",
",",
"0",
")",
",",
"tf",
".",
"sg_floatx",
")",
")",
")",
"_histogram",
"(",
"name",
"+",
"'/ratio-h'",
",",
"tensor",
")"
] | r"""Register `tensor` to summary report as `activation`
Args:
tensor: A `Tensor` to log as activation
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | [
"r",
"Register",
"tensor",
"to",
"summary",
"report",
"as",
"activation"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L81-L99 | train |
buriburisuri/sugartensor | sugartensor/sg_logging.py | sg_summary_param | def sg_summary_param(tensor, prefix=None, name=None):
r"""Register `tensor` to summary report as `parameters`
Args:
tensor: A `Tensor` to log as parameters
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name + '/abs', tf.reduce_mean(tf.abs(tensor)))
_histogram(name + '/abs-h', tf.abs(tensor)) | python | def sg_summary_param(tensor, prefix=None, name=None):
r"""Register `tensor` to summary report as `parameters`
Args:
tensor: A `Tensor` to log as parameters
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
_scalar(name + '/abs', tf.reduce_mean(tf.abs(tensor)))
_histogram(name + '/abs-h', tf.abs(tensor)) | [
"def",
"sg_summary_param",
"(",
"tensor",
",",
"prefix",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pretty_name",
"(",
"tensor",
")",
"if",
"name",
"is",
"None",
"else",
"prefix",
"+",
"name",
"# summary statistics",
"_scalar",
"(",
"name",
"+",
"'/abs'",
",",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"abs",
"(",
"tensor",
")",
")",
")",
"_histogram",
"(",
"name",
"+",
"'/abs-h'",
",",
"tf",
".",
"abs",
"(",
"tensor",
")",
")"
] | r"""Register `tensor` to summary report as `parameters`
Args:
tensor: A `Tensor` to log as parameters
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | [
"r",
"Register",
"tensor",
"to",
"summary",
"report",
"as",
"parameters"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L102-L119 | train |
buriburisuri/sugartensor | sugartensor/sg_logging.py | sg_summary_image | def sg_summary_image(tensor, prefix=None, name=None):
r"""Register `tensor` to summary report as `image`
Args:
tensor: A tensor to log as image
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.image(name + '-im', tensor) | python | def sg_summary_image(tensor, prefix=None, name=None):
r"""Register `tensor` to summary report as `image`
Args:
tensor: A tensor to log as image
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.image(name + '-im', tensor) | [
"def",
"sg_summary_image",
"(",
"tensor",
",",
"prefix",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pretty_name",
"(",
"tensor",
")",
"if",
"name",
"is",
"None",
"else",
"prefix",
"+",
"name",
"# summary statistics",
"if",
"not",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"reuse",
":",
"tf",
".",
"summary",
".",
"image",
"(",
"name",
"+",
"'-im'",
",",
"tensor",
")"
] | r"""Register `tensor` to summary report as `image`
Args:
tensor: A tensor to log as image
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | [
"r",
"Register",
"tensor",
"to",
"summary",
"report",
"as",
"image"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L122-L139 | train |
buriburisuri/sugartensor | sugartensor/sg_logging.py | sg_summary_audio | def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + '-au', tensor, sample_rate) | python | def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None):
r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None
"""
# defaults
prefix = '' if prefix is None else prefix + '/'
# summary name
name = prefix + _pretty_name(tensor) if name is None else prefix + name
# summary statistics
if not tf.get_variable_scope().reuse:
tf.summary.audio(name + '-au', tensor, sample_rate) | [
"def",
"sg_summary_audio",
"(",
"tensor",
",",
"sample_rate",
"=",
"16000",
",",
"prefix",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"# defaults",
"prefix",
"=",
"''",
"if",
"prefix",
"is",
"None",
"else",
"prefix",
"+",
"'/'",
"# summary name",
"name",
"=",
"prefix",
"+",
"_pretty_name",
"(",
"tensor",
")",
"if",
"name",
"is",
"None",
"else",
"prefix",
"+",
"name",
"# summary statistics",
"if",
"not",
"tf",
".",
"get_variable_scope",
"(",
")",
".",
"reuse",
":",
"tf",
".",
"summary",
".",
"audio",
"(",
"name",
"+",
"'-au'",
",",
"tensor",
",",
"sample_rate",
")"
] | r"""Register `tensor` to summary report as audio
Args:
tensor: A `Tensor` to log as audio
sample_rate : An int. Sample rate to report. Default is 16000.
prefix: A `string`. A prefix to display in the tensor board web UI.
name: A `string`. A name to display in the tensor board web UI.
Returns:
None | [
"r",
"Register",
"tensor",
"to",
"summary",
"report",
"as",
"audio"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_logging.py#L142-L160 | train |
buriburisuri/sugartensor | sugartensor/sg_train.py | sg_train | def sg_train(**kwargs):
r"""Trains the model.
Args:
**kwargs:
optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', 'RMSProp' or 'sgd'.
loss: A 0-D `Tensor` containing the value to minimize.
lr: A Python Scalar (optional). Learning rate. Default is .001.
beta1: A Python Scalar (optional). Default is .9.
beta2: A Python Scalar (optional). Default is .99.
save_dir: A string. The root path to which checkpoint and log files are saved.
Default is `asset/train`.
max_ep: A positive integer. Maximum number of epochs. Default is 1000.
ep_size: A positive integer. Number of Total batches in an epoch.
For proper display of log. Default is 1e5.
save_interval: A Python scalar. The interval of saving checkpoint files.
By default, for every 600 seconds, a checkpoint file is written.
log_interval: A Python scalar. The interval of recoding logs.
By default, for every 60 seconds, logging is executed.
max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5.
keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour.
category: Scope name or list to train
eval_metric: A list of tensors containing the value to evaluate. Default is [].
tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss
will be shown on the console.
"""
opt = tf.sg_opt(kwargs)
assert opt.loss is not None, 'loss is mandatory.'
# default training options
opt += tf.sg_opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='', ep_size=100000)
# get optimizer
train_op = sg_optim(opt.loss, optim=opt.optim, lr=0.001,
beta1=opt.beta1, beta2=opt.beta2, category=opt.category)
# for console logging
loss_ = opt.loss
# use only first loss when multiple GPU case
if isinstance(opt.loss, (tuple, list)):
loss_ = opt.loss[0]
# define train function
# noinspection PyUnusedLocal
@sg_train_func
def train_func(sess, arg):
return sess.run([loss_, train_op])[0]
# run train function
train_func(**opt) | python | def sg_train(**kwargs):
r"""Trains the model.
Args:
**kwargs:
optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', 'RMSProp' or 'sgd'.
loss: A 0-D `Tensor` containing the value to minimize.
lr: A Python Scalar (optional). Learning rate. Default is .001.
beta1: A Python Scalar (optional). Default is .9.
beta2: A Python Scalar (optional). Default is .99.
save_dir: A string. The root path to which checkpoint and log files are saved.
Default is `asset/train`.
max_ep: A positive integer. Maximum number of epochs. Default is 1000.
ep_size: A positive integer. Number of Total batches in an epoch.
For proper display of log. Default is 1e5.
save_interval: A Python scalar. The interval of saving checkpoint files.
By default, for every 600 seconds, a checkpoint file is written.
log_interval: A Python scalar. The interval of recoding logs.
By default, for every 60 seconds, logging is executed.
max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5.
keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour.
category: Scope name or list to train
eval_metric: A list of tensors containing the value to evaluate. Default is [].
tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss
will be shown on the console.
"""
opt = tf.sg_opt(kwargs)
assert opt.loss is not None, 'loss is mandatory.'
# default training options
opt += tf.sg_opt(optim='MaxProp', lr=0.001, beta1=0.9, beta2=0.99, category='', ep_size=100000)
# get optimizer
train_op = sg_optim(opt.loss, optim=opt.optim, lr=0.001,
beta1=opt.beta1, beta2=opt.beta2, category=opt.category)
# for console logging
loss_ = opt.loss
# use only first loss when multiple GPU case
if isinstance(opt.loss, (tuple, list)):
loss_ = opt.loss[0]
# define train function
# noinspection PyUnusedLocal
@sg_train_func
def train_func(sess, arg):
return sess.run([loss_, train_op])[0]
# run train function
train_func(**opt) | [
"def",
"sg_train",
"(",
"*",
"*",
"kwargs",
")",
":",
"opt",
"=",
"tf",
".",
"sg_opt",
"(",
"kwargs",
")",
"assert",
"opt",
".",
"loss",
"is",
"not",
"None",
",",
"'loss is mandatory.'",
"# default training options",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"optim",
"=",
"'MaxProp'",
",",
"lr",
"=",
"0.001",
",",
"beta1",
"=",
"0.9",
",",
"beta2",
"=",
"0.99",
",",
"category",
"=",
"''",
",",
"ep_size",
"=",
"100000",
")",
"# get optimizer",
"train_op",
"=",
"sg_optim",
"(",
"opt",
".",
"loss",
",",
"optim",
"=",
"opt",
".",
"optim",
",",
"lr",
"=",
"0.001",
",",
"beta1",
"=",
"opt",
".",
"beta1",
",",
"beta2",
"=",
"opt",
".",
"beta2",
",",
"category",
"=",
"opt",
".",
"category",
")",
"# for console logging",
"loss_",
"=",
"opt",
".",
"loss",
"# use only first loss when multiple GPU case",
"if",
"isinstance",
"(",
"opt",
".",
"loss",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"loss_",
"=",
"opt",
".",
"loss",
"[",
"0",
"]",
"# define train function",
"# noinspection PyUnusedLocal",
"@",
"sg_train_func",
"def",
"train_func",
"(",
"sess",
",",
"arg",
")",
":",
"return",
"sess",
".",
"run",
"(",
"[",
"loss_",
",",
"train_op",
"]",
")",
"[",
"0",
"]",
"# run train function",
"train_func",
"(",
"*",
"*",
"opt",
")"
] | r"""Trains the model.
Args:
**kwargs:
optim: A name for optimizer. 'MaxProp' (default), 'AdaMax', 'Adam', 'RMSProp' or 'sgd'.
loss: A 0-D `Tensor` containing the value to minimize.
lr: A Python Scalar (optional). Learning rate. Default is .001.
beta1: A Python Scalar (optional). Default is .9.
beta2: A Python Scalar (optional). Default is .99.
save_dir: A string. The root path to which checkpoint and log files are saved.
Default is `asset/train`.
max_ep: A positive integer. Maximum number of epochs. Default is 1000.
ep_size: A positive integer. Number of Total batches in an epoch.
For proper display of log. Default is 1e5.
save_interval: A Python scalar. The interval of saving checkpoint files.
By default, for every 600 seconds, a checkpoint file is written.
log_interval: A Python scalar. The interval of recoding logs.
By default, for every 60 seconds, logging is executed.
max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5.
keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour.
category: Scope name or list to train
eval_metric: A list of tensors containing the value to evaluate. Default is [].
tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss
will be shown on the console. | [
"r",
"Trains",
"the",
"model",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_train.py#L13-L69 | train |
buriburisuri/sugartensor | sugartensor/sg_train.py | sg_restore | def sg_restore(sess, save_path, category=''):
r""" Restores previously saved variables.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
category: A `String` to filter variables starts with given category.
Returns:
"""
# to list
if not isinstance(category, (tuple, list)):
category = [category]
# make variable list to load
var_list = {}
for cat in category:
for t in tf.global_variables():
if t.name.startswith(cat):
var_list[t.name[:-2]] = t
# restore parameters
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path) | python | def sg_restore(sess, save_path, category=''):
r""" Restores previously saved variables.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
category: A `String` to filter variables starts with given category.
Returns:
"""
# to list
if not isinstance(category, (tuple, list)):
category = [category]
# make variable list to load
var_list = {}
for cat in category:
for t in tf.global_variables():
if t.name.startswith(cat):
var_list[t.name[:-2]] = t
# restore parameters
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path) | [
"def",
"sg_restore",
"(",
"sess",
",",
"save_path",
",",
"category",
"=",
"''",
")",
":",
"# to list",
"if",
"not",
"isinstance",
"(",
"category",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"category",
"=",
"[",
"category",
"]",
"# make variable list to load",
"var_list",
"=",
"{",
"}",
"for",
"cat",
"in",
"category",
":",
"for",
"t",
"in",
"tf",
".",
"global_variables",
"(",
")",
":",
"if",
"t",
".",
"name",
".",
"startswith",
"(",
"cat",
")",
":",
"var_list",
"[",
"t",
".",
"name",
"[",
":",
"-",
"2",
"]",
"]",
"=",
"t",
"# restore parameters",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"var_list",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"save_path",
")"
] | r""" Restores previously saved variables.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
category: A `String` to filter variables starts with given category.
Returns: | [
"r",
"Restores",
"previously",
"saved",
"variables",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_train.py#L124-L148 | train |
buriburisuri/sugartensor | sugartensor/sg_train.py | sg_regularizer_loss | def sg_regularizer_loss(scale=1.0):
r""" Get regularizer losss
Args:
scale: A scalar. A weight applied to regularizer loss
"""
return scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) | python | def sg_regularizer_loss(scale=1.0):
r""" Get regularizer losss
Args:
scale: A scalar. A weight applied to regularizer loss
"""
return scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) | [
"def",
"sg_regularizer_loss",
"(",
"scale",
"=",
"1.0",
")",
":",
"return",
"scale",
"*",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"REGULARIZATION_LOSSES",
")",
")"
] | r""" Get regularizer losss
Args:
scale: A scalar. A weight applied to regularizer loss | [
"r",
"Get",
"regularizer",
"losss"
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_train.py#L376-L382 | train |
buriburisuri/sugartensor | sugartensor/sg_net.py | sg_densenet_layer | def sg_densenet_layer(x, opt):
r"""Applies basic architecture of densenet layer.
Note that the fc layers in the original architecture
will be replaced with fully convolutional layers.
For convenience, We still call them fc layers, though.
Args:
x: A `Tensor`.
opt:
dim: An integer. Dimension for this resnet layer
num: Number of times to repeat
act: String. 'relu' (default). the activation function name
trans: Boolean. If True(default), transition layer will be applied.
reuse: Boolean(Optional). If True, all variables will be loaded from previous network.
name: String. (optional) Used as convolution layer prefix
Returns:
A `Tensor`.
"""
assert opt.dim is not None, 'dim is mandatory.'
assert opt.num is not None, 'num is mandatory.'
# default stride
opt += tf.sg_opt(stride=1, act='relu', trans=True)
# format convolutional layer name
def cname(index):
return opt.name if opt.name is None else opt.name + '_%d' % index
# dense layer
with tf.sg_context(bias=False, reuse=opt.reuse):
out = x
for i in range(opt.num):
# dense block
out_new = (out
.sg_bypass(act=opt.act, bn=True, name=cname(3 * i + 1))
.sg_conv(dim=opt.dim // 4, size=1, act=opt.act, bn=True, name=cname(3 * i + 2))
.sg_conv(dim=opt.dim, size=3, name=cname(3 * i + 3)))
out = tf.concat([out_new, out], 3)
# transition layer
if opt.trans:
out = (out
.sg_bypass(act=opt.act, bn=True, name=cname(3 * i + 4))
.sg_conv(size=1, name=cname(3 * i + 5))
.sg_pool(avg=True))
return out | python | def sg_densenet_layer(x, opt):
r"""Applies basic architecture of densenet layer.
Note that the fc layers in the original architecture
will be replaced with fully convolutional layers.
For convenience, We still call them fc layers, though.
Args:
x: A `Tensor`.
opt:
dim: An integer. Dimension for this resnet layer
num: Number of times to repeat
act: String. 'relu' (default). the activation function name
trans: Boolean. If True(default), transition layer will be applied.
reuse: Boolean(Optional). If True, all variables will be loaded from previous network.
name: String. (optional) Used as convolution layer prefix
Returns:
A `Tensor`.
"""
assert opt.dim is not None, 'dim is mandatory.'
assert opt.num is not None, 'num is mandatory.'
# default stride
opt += tf.sg_opt(stride=1, act='relu', trans=True)
# format convolutional layer name
def cname(index):
return opt.name if opt.name is None else opt.name + '_%d' % index
# dense layer
with tf.sg_context(bias=False, reuse=opt.reuse):
out = x
for i in range(opt.num):
# dense block
out_new = (out
.sg_bypass(act=opt.act, bn=True, name=cname(3 * i + 1))
.sg_conv(dim=opt.dim // 4, size=1, act=opt.act, bn=True, name=cname(3 * i + 2))
.sg_conv(dim=opt.dim, size=3, name=cname(3 * i + 3)))
out = tf.concat([out_new, out], 3)
# transition layer
if opt.trans:
out = (out
.sg_bypass(act=opt.act, bn=True, name=cname(3 * i + 4))
.sg_conv(size=1, name=cname(3 * i + 5))
.sg_pool(avg=True))
return out | [
"def",
"sg_densenet_layer",
"(",
"x",
",",
"opt",
")",
":",
"assert",
"opt",
".",
"dim",
"is",
"not",
"None",
",",
"'dim is mandatory.'",
"assert",
"opt",
".",
"num",
"is",
"not",
"None",
",",
"'num is mandatory.'",
"# default stride",
"opt",
"+=",
"tf",
".",
"sg_opt",
"(",
"stride",
"=",
"1",
",",
"act",
"=",
"'relu'",
",",
"trans",
"=",
"True",
")",
"# format convolutional layer name",
"def",
"cname",
"(",
"index",
")",
":",
"return",
"opt",
".",
"name",
"if",
"opt",
".",
"name",
"is",
"None",
"else",
"opt",
".",
"name",
"+",
"'_%d'",
"%",
"index",
"# dense layer",
"with",
"tf",
".",
"sg_context",
"(",
"bias",
"=",
"False",
",",
"reuse",
"=",
"opt",
".",
"reuse",
")",
":",
"out",
"=",
"x",
"for",
"i",
"in",
"range",
"(",
"opt",
".",
"num",
")",
":",
"# dense block",
"out_new",
"=",
"(",
"out",
".",
"sg_bypass",
"(",
"act",
"=",
"opt",
".",
"act",
",",
"bn",
"=",
"True",
",",
"name",
"=",
"cname",
"(",
"3",
"*",
"i",
"+",
"1",
")",
")",
".",
"sg_conv",
"(",
"dim",
"=",
"opt",
".",
"dim",
"//",
"4",
",",
"size",
"=",
"1",
",",
"act",
"=",
"opt",
".",
"act",
",",
"bn",
"=",
"True",
",",
"name",
"=",
"cname",
"(",
"3",
"*",
"i",
"+",
"2",
")",
")",
".",
"sg_conv",
"(",
"dim",
"=",
"opt",
".",
"dim",
",",
"size",
"=",
"3",
",",
"name",
"=",
"cname",
"(",
"3",
"*",
"i",
"+",
"3",
")",
")",
")",
"out",
"=",
"tf",
".",
"concat",
"(",
"[",
"out_new",
",",
"out",
"]",
",",
"3",
")",
"# transition layer",
"if",
"opt",
".",
"trans",
":",
"out",
"=",
"(",
"out",
".",
"sg_bypass",
"(",
"act",
"=",
"opt",
".",
"act",
",",
"bn",
"=",
"True",
",",
"name",
"=",
"cname",
"(",
"3",
"*",
"i",
"+",
"4",
")",
")",
".",
"sg_conv",
"(",
"size",
"=",
"1",
",",
"name",
"=",
"cname",
"(",
"3",
"*",
"i",
"+",
"5",
")",
")",
".",
"sg_pool",
"(",
"avg",
"=",
"True",
")",
")",
"return",
"out"
] | r"""Applies basic architecture of densenet layer.
Note that the fc layers in the original architecture
will be replaced with fully convolutional layers.
For convenience, We still call them fc layers, though.
Args:
x: A `Tensor`.
opt:
dim: An integer. Dimension for this resnet layer
num: Number of times to repeat
act: String. 'relu' (default). the activation function name
trans: Boolean. If True(default), transition layer will be applied.
reuse: Boolean(Optional). If True, all variables will be loaded from previous network.
name: String. (optional) Used as convolution layer prefix
Returns:
A `Tensor`. | [
"r",
"Applies",
"basic",
"architecture",
"of",
"densenet",
"layer",
"."
] | d2c039954777c7fbe3eb0c2ae40c45c9854deb40 | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_net.py#L432-L480 | train |
thanethomson/statik | statik/utils.py | deep_merge_dict | def deep_merge_dict(a, b):
"""Deep merges dictionary b into dictionary a."""
if not isinstance(a, dict):
raise TypeError("a must be a dict, but found %s" % a.__class__.__name__)
if not isinstance(b, dict):
raise TypeError("b must be a dict, but found %s" % b.__class__.__name__)
_a = copy(a)
_b = copy(b)
for key_b, val_b in iteritems(_b):
# if it's a sub-dictionary
if isinstance(val_b, dict):
if key_b not in _a or not isinstance(_a[key_b], dict):
_a[key_b] = {}
# perform the deep merge recursively
_a[key_b] = deep_merge_dict(_a[key_b], val_b)
else:
_a[key_b] = val_b
# b should now be deep-merged into a
return _a | python | def deep_merge_dict(a, b):
"""Deep merges dictionary b into dictionary a."""
if not isinstance(a, dict):
raise TypeError("a must be a dict, but found %s" % a.__class__.__name__)
if not isinstance(b, dict):
raise TypeError("b must be a dict, but found %s" % b.__class__.__name__)
_a = copy(a)
_b = copy(b)
for key_b, val_b in iteritems(_b):
# if it's a sub-dictionary
if isinstance(val_b, dict):
if key_b not in _a or not isinstance(_a[key_b], dict):
_a[key_b] = {}
# perform the deep merge recursively
_a[key_b] = deep_merge_dict(_a[key_b], val_b)
else:
_a[key_b] = val_b
# b should now be deep-merged into a
return _a | [
"def",
"deep_merge_dict",
"(",
"a",
",",
"b",
")",
":",
"if",
"not",
"isinstance",
"(",
"a",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"a must be a dict, but found %s\"",
"%",
"a",
".",
"__class__",
".",
"__name__",
")",
"if",
"not",
"isinstance",
"(",
"b",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"b must be a dict, but found %s\"",
"%",
"b",
".",
"__class__",
".",
"__name__",
")",
"_a",
"=",
"copy",
"(",
"a",
")",
"_b",
"=",
"copy",
"(",
"b",
")",
"for",
"key_b",
",",
"val_b",
"in",
"iteritems",
"(",
"_b",
")",
":",
"# if it's a sub-dictionary",
"if",
"isinstance",
"(",
"val_b",
",",
"dict",
")",
":",
"if",
"key_b",
"not",
"in",
"_a",
"or",
"not",
"isinstance",
"(",
"_a",
"[",
"key_b",
"]",
",",
"dict",
")",
":",
"_a",
"[",
"key_b",
"]",
"=",
"{",
"}",
"# perform the deep merge recursively",
"_a",
"[",
"key_b",
"]",
"=",
"deep_merge_dict",
"(",
"_a",
"[",
"key_b",
"]",
",",
"val_b",
")",
"else",
":",
"_a",
"[",
"key_b",
"]",
"=",
"val_b",
"# b should now be deep-merged into a",
"return",
"_a"
] | Deep merges dictionary b into dictionary a. | [
"Deep",
"merges",
"dictionary",
"b",
"into",
"dictionary",
"a",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L102-L124 | train |
thanethomson/statik | statik/utils.py | copy_file_if_modified | def copy_file_if_modified(src_path, dest_path):
"""Only copies the file from the source path to the destination path if it doesn't exist yet or it has
been modified. Intended to provide something of an optimisation when a project has large trees of assets."""
# if the destination path is a directory, delete it completely - we assume here we are
# writing a file to the filesystem
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
must_copy = False
if not os.path.exists(dest_path):
must_copy = True
else:
src_stat = os.stat(src_path)
dest_stat = os.stat(dest_path)
# if the size or last modified timestamp are different
if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or
(src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])):
must_copy = True
if must_copy:
shutil.copy2(src_path, dest_path) | python | def copy_file_if_modified(src_path, dest_path):
"""Only copies the file from the source path to the destination path if it doesn't exist yet or it has
been modified. Intended to provide something of an optimisation when a project has large trees of assets."""
# if the destination path is a directory, delete it completely - we assume here we are
# writing a file to the filesystem
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
must_copy = False
if not os.path.exists(dest_path):
must_copy = True
else:
src_stat = os.stat(src_path)
dest_stat = os.stat(dest_path)
# if the size or last modified timestamp are different
if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or
(src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])):
must_copy = True
if must_copy:
shutil.copy2(src_path, dest_path) | [
"def",
"copy_file_if_modified",
"(",
"src_path",
",",
"dest_path",
")",
":",
"# if the destination path is a directory, delete it completely - we assume here we are",
"# writing a file to the filesystem",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dest_path",
")",
":",
"shutil",
".",
"rmtree",
"(",
"dest_path",
")",
"must_copy",
"=",
"False",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dest_path",
")",
":",
"must_copy",
"=",
"True",
"else",
":",
"src_stat",
"=",
"os",
".",
"stat",
"(",
"src_path",
")",
"dest_stat",
"=",
"os",
".",
"stat",
"(",
"dest_path",
")",
"# if the size or last modified timestamp are different",
"if",
"(",
"(",
"src_stat",
"[",
"stat",
".",
"ST_SIZE",
"]",
"!=",
"dest_stat",
"[",
"stat",
".",
"ST_SIZE",
"]",
")",
"or",
"(",
"src_stat",
"[",
"stat",
".",
"ST_MTIME",
"]",
"!=",
"dest_stat",
"[",
"stat",
".",
"ST_MTIME",
"]",
")",
")",
":",
"must_copy",
"=",
"True",
"if",
"must_copy",
":",
"shutil",
".",
"copy2",
"(",
"src_path",
",",
"dest_path",
")"
] | Only copies the file from the source path to the destination path if it doesn't exist yet or it has
been modified. Intended to provide something of an optimisation when a project has large trees of assets. | [
"Only",
"copies",
"the",
"file",
"from",
"the",
"source",
"path",
"to",
"the",
"destination",
"path",
"if",
"it",
"doesn",
"t",
"exist",
"yet",
"or",
"it",
"has",
"been",
"modified",
".",
"Intended",
"to",
"provide",
"something",
"of",
"an",
"optimisation",
"when",
"a",
"project",
"has",
"large",
"trees",
"of",
"assets",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L141-L163 | train |
thanethomson/statik | statik/utils.py | get_url_file_ext | def get_url_file_ext(url):
"""Attempts to extract the file extension from the given URL."""
# get the last part of the path component
filename = url.split('/')[-1]
name, ext = os.path.splitext(filename)
# handle case of files with leading dot
if not ext and name and name[0] == '.':
ext = name
return ext | python | def get_url_file_ext(url):
"""Attempts to extract the file extension from the given URL."""
# get the last part of the path component
filename = url.split('/')[-1]
name, ext = os.path.splitext(filename)
# handle case of files with leading dot
if not ext and name and name[0] == '.':
ext = name
return ext | [
"def",
"get_url_file_ext",
"(",
"url",
")",
":",
"# get the last part of the path component",
"filename",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"# handle case of files with leading dot",
"if",
"not",
"ext",
"and",
"name",
"and",
"name",
"[",
"0",
"]",
"==",
"'.'",
":",
"ext",
"=",
"name",
"return",
"ext"
] | Attempts to extract the file extension from the given URL. | [
"Attempts",
"to",
"extract",
"the",
"file",
"extension",
"from",
"the",
"given",
"URL",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L195-L205 | train |
thanethomson/statik | statik/utils.py | generate_quickstart | def generate_quickstart(project_path):
"""Generates all of the basic paths for a Statik project within the given project path. If the project path
doesn't exist, it will be created."""
ensure_path_exists(project_path)
ensure_file_exists(os.path.join(project_path, "config.yml"), DEFAULT_CONFIG_CONTENT)
ensure_path_exists(os.path.join(project_path, 'models'))
ensure_path_exists(os.path.join(project_path, 'data'))
ensure_path_exists(os.path.join(project_path, 'themes'))
ensure_path_exists(os.path.join(project_path, 'templates'))
ensure_path_exists(os.path.join(project_path, 'templatetags'))
ensure_path_exists(os.path.join(project_path, 'views'))
ensure_path_exists(os.path.join(project_path, 'assets')) | python | def generate_quickstart(project_path):
"""Generates all of the basic paths for a Statik project within the given project path. If the project path
doesn't exist, it will be created."""
ensure_path_exists(project_path)
ensure_file_exists(os.path.join(project_path, "config.yml"), DEFAULT_CONFIG_CONTENT)
ensure_path_exists(os.path.join(project_path, 'models'))
ensure_path_exists(os.path.join(project_path, 'data'))
ensure_path_exists(os.path.join(project_path, 'themes'))
ensure_path_exists(os.path.join(project_path, 'templates'))
ensure_path_exists(os.path.join(project_path, 'templatetags'))
ensure_path_exists(os.path.join(project_path, 'views'))
ensure_path_exists(os.path.join(project_path, 'assets')) | [
"def",
"generate_quickstart",
"(",
"project_path",
")",
":",
"ensure_path_exists",
"(",
"project_path",
")",
"ensure_file_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"\"config.yml\"",
")",
",",
"DEFAULT_CONFIG_CONTENT",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'models'",
")",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'data'",
")",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'themes'",
")",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'templates'",
")",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'templatetags'",
")",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'views'",
")",
")",
"ensure_path_exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"'assets'",
")",
")"
] | Generates all of the basic paths for a Statik project within the given project path. If the project path
doesn't exist, it will be created. | [
"Generates",
"all",
"of",
"the",
"basic",
"paths",
"for",
"a",
"Statik",
"project",
"within",
"the",
"given",
"project",
"path",
".",
"If",
"the",
"project",
"path",
"doesn",
"t",
"exist",
"it",
"will",
"be",
"created",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L208-L219 | train |
thanethomson/statik | statik/utils.py | get_project_config_file | def get_project_config_file(path, default_config_file_name):
"""Attempts to extract the project config file's absolute path from the given path. If the path is a
directory, it automatically assumes a "config.yml" file will be in that directory. If the path is to
a .yml file, it assumes that that is the root configuration file for the project."""
_path, _config_file_path = None, None
path = os.path.abspath(path)
if os.path.isdir(path):
_path = path
# use the default config file
_config_file_path = os.path.join(_path, default_config_file_name)
logger.debug("Using default project configuration file path: %s", _config_file_path)
elif path.endswith(".yml"):
_path = os.path.dirname(path)
_config_file_path = path
logger.debug("Using custom project configuration file path: %s", _config_file_path)
return _path, _config_file_path | python | def get_project_config_file(path, default_config_file_name):
"""Attempts to extract the project config file's absolute path from the given path. If the path is a
directory, it automatically assumes a "config.yml" file will be in that directory. If the path is to
a .yml file, it assumes that that is the root configuration file for the project."""
_path, _config_file_path = None, None
path = os.path.abspath(path)
if os.path.isdir(path):
_path = path
# use the default config file
_config_file_path = os.path.join(_path, default_config_file_name)
logger.debug("Using default project configuration file path: %s", _config_file_path)
elif path.endswith(".yml"):
_path = os.path.dirname(path)
_config_file_path = path
logger.debug("Using custom project configuration file path: %s", _config_file_path)
return _path, _config_file_path | [
"def",
"get_project_config_file",
"(",
"path",
",",
"default_config_file_name",
")",
":",
"_path",
",",
"_config_file_path",
"=",
"None",
",",
"None",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"_path",
"=",
"path",
"# use the default config file",
"_config_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_path",
",",
"default_config_file_name",
")",
"logger",
".",
"debug",
"(",
"\"Using default project configuration file path: %s\"",
",",
"_config_file_path",
")",
"elif",
"path",
".",
"endswith",
"(",
"\".yml\"",
")",
":",
"_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"_config_file_path",
"=",
"path",
"logger",
".",
"debug",
"(",
"\"Using custom project configuration file path: %s\"",
",",
"_config_file_path",
")",
"return",
"_path",
",",
"_config_file_path"
] | Attempts to extract the project config file's absolute path from the given path. If the path is a
directory, it automatically assumes a "config.yml" file will be in that directory. If the path is to
a .yml file, it assumes that that is the root configuration file for the project. | [
"Attempts",
"to",
"extract",
"the",
"project",
"config",
"file",
"s",
"absolute",
"path",
"from",
"the",
"given",
"path",
".",
"If",
"the",
"path",
"is",
"a",
"directory",
"it",
"automatically",
"assumes",
"a",
"config",
".",
"yml",
"file",
"will",
"be",
"in",
"that",
"directory",
".",
"If",
"the",
"path",
"is",
"to",
"a",
".",
"yml",
"file",
"it",
"assumes",
"that",
"that",
"is",
"the",
"root",
"configuration",
"file",
"for",
"the",
"project",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L259-L276 | train |
thanethomson/statik | statik/utils.py | strip_el_text | def strip_el_text(el, max_depth=0, cur_depth=0):
"""Recursively strips the plain text out of the given XML etree element up to the desired depth.
Args:
el: The etree element to scan.
max_depth: The depth to which to recursively strip text (default: 0).
cur_depth: The current recursive depth to which we've scanned so far.
Returns:
The stripped, plain text from within the element.
"""
# text in front of any child elements
el_text = strip_str(el.text if el.text is not None else "")
if cur_depth < max_depth:
for child in el:
el_text += " "+strip_el_text(child, max_depth=max_depth, cur_depth=cur_depth+1)
else:
# add the last child's tail text, if any
children = list(el)
if children is not None and len(children) > 0:
if children[-1].tail is not None:
el_text += " "+strip_str(children[-1].tail)
# we skip the root element
if cur_depth > 0:
# in case there's any text at the end of the element
if el.tail is not None:
el_text += " "+strip_str(el.tail)
return strip_str(el_text) | python | def strip_el_text(el, max_depth=0, cur_depth=0):
"""Recursively strips the plain text out of the given XML etree element up to the desired depth.
Args:
el: The etree element to scan.
max_depth: The depth to which to recursively strip text (default: 0).
cur_depth: The current recursive depth to which we've scanned so far.
Returns:
The stripped, plain text from within the element.
"""
# text in front of any child elements
el_text = strip_str(el.text if el.text is not None else "")
if cur_depth < max_depth:
for child in el:
el_text += " "+strip_el_text(child, max_depth=max_depth, cur_depth=cur_depth+1)
else:
# add the last child's tail text, if any
children = list(el)
if children is not None and len(children) > 0:
if children[-1].tail is not None:
el_text += " "+strip_str(children[-1].tail)
# we skip the root element
if cur_depth > 0:
# in case there's any text at the end of the element
if el.tail is not None:
el_text += " "+strip_str(el.tail)
return strip_str(el_text) | [
"def",
"strip_el_text",
"(",
"el",
",",
"max_depth",
"=",
"0",
",",
"cur_depth",
"=",
"0",
")",
":",
"# text in front of any child elements",
"el_text",
"=",
"strip_str",
"(",
"el",
".",
"text",
"if",
"el",
".",
"text",
"is",
"not",
"None",
"else",
"\"\"",
")",
"if",
"cur_depth",
"<",
"max_depth",
":",
"for",
"child",
"in",
"el",
":",
"el_text",
"+=",
"\" \"",
"+",
"strip_el_text",
"(",
"child",
",",
"max_depth",
"=",
"max_depth",
",",
"cur_depth",
"=",
"cur_depth",
"+",
"1",
")",
"else",
":",
"# add the last child's tail text, if any",
"children",
"=",
"list",
"(",
"el",
")",
"if",
"children",
"is",
"not",
"None",
"and",
"len",
"(",
"children",
")",
">",
"0",
":",
"if",
"children",
"[",
"-",
"1",
"]",
".",
"tail",
"is",
"not",
"None",
":",
"el_text",
"+=",
"\" \"",
"+",
"strip_str",
"(",
"children",
"[",
"-",
"1",
"]",
".",
"tail",
")",
"# we skip the root element",
"if",
"cur_depth",
">",
"0",
":",
"# in case there's any text at the end of the element",
"if",
"el",
".",
"tail",
"is",
"not",
"None",
":",
"el_text",
"+=",
"\" \"",
"+",
"strip_str",
"(",
"el",
".",
"tail",
")",
"return",
"strip_str",
"(",
"el_text",
")"
] | Recursively strips the plain text out of the given XML etree element up to the desired depth.
Args:
el: The etree element to scan.
max_depth: The depth to which to recursively strip text (default: 0).
cur_depth: The current recursive depth to which we've scanned so far.
Returns:
The stripped, plain text from within the element. | [
"Recursively",
"strips",
"the",
"plain",
"text",
"out",
"of",
"the",
"given",
"XML",
"etree",
"element",
"up",
"to",
"the",
"desired",
"depth",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L303-L333 | train |
thanethomson/statik | statik/utils.py | find_first_file_with_ext | def find_first_file_with_ext(base_paths, prefix, exts):
"""Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None).
"""
for base_path in base_paths:
for ext in exts:
filename = os.path.join(base_path, "%s%s" % (prefix, ext))
if os.path.exists(filename) and os.path.isfile(filename):
logger.debug("Found first file with relevant extension: %s", filename)
return base_path, ext
logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts))
return None, None | python | def find_first_file_with_ext(base_paths, prefix, exts):
"""Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None).
"""
for base_path in base_paths:
for ext in exts:
filename = os.path.join(base_path, "%s%s" % (prefix, ext))
if os.path.exists(filename) and os.path.isfile(filename):
logger.debug("Found first file with relevant extension: %s", filename)
return base_path, ext
logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts))
return None, None | [
"def",
"find_first_file_with_ext",
"(",
"base_paths",
",",
"prefix",
",",
"exts",
")",
":",
"for",
"base_path",
"in",
"base_paths",
":",
"for",
"ext",
"in",
"exts",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"\"%s%s\"",
"%",
"(",
"prefix",
",",
"ext",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"logger",
".",
"debug",
"(",
"\"Found first file with relevant extension: %s\"",
",",
"filename",
")",
"return",
"base_path",
",",
"ext",
"logger",
".",
"debug",
"(",
"\"No files found for prefix %s, extensions %s\"",
",",
"prefix",
",",
"\", \"",
".",
"join",
"(",
"exts",
")",
")",
"return",
"None",
",",
"None"
] | Runs through the given list of file extensions and returns the first file with the given base
path and extension combination that actually exists.
Args:
base_paths: The base paths in which to search for files.
prefix: The filename prefix of the file for which to search.
exts: An ordered list of file extensions for which to search.
Returns:
On success, a 2-tuple containing the base path in which the file was found, and the extension of the file.
On failure, returns (None, None). | [
"Runs",
"through",
"the",
"given",
"list",
"of",
"file",
"extensions",
"and",
"returns",
"the",
"first",
"file",
"with",
"the",
"given",
"base",
"path",
"and",
"extension",
"combination",
"that",
"actually",
"exists",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L336-L357 | train |
thanethomson/statik | statik/utils.py | find_duplicates_in_array | def find_duplicates_in_array(array):
"""Runs through the array and returns the elements that contain
more than one duplicate
Args:
array: The array to check for duplicates.
Returns:
Array of the elements that are duplicates. Returns empty list if
there are no duplicates.
"""
duplicates = []
non_duplicates = []
if len(array) != len(set(array)):
for item in array:
if item not in non_duplicates:
non_duplicates.append(item)
elif item in non_duplicates and item not in duplicates:
duplicates.append(item)
return duplicates | python | def find_duplicates_in_array(array):
"""Runs through the array and returns the elements that contain
more than one duplicate
Args:
array: The array to check for duplicates.
Returns:
Array of the elements that are duplicates. Returns empty list if
there are no duplicates.
"""
duplicates = []
non_duplicates = []
if len(array) != len(set(array)):
for item in array:
if item not in non_duplicates:
non_duplicates.append(item)
elif item in non_duplicates and item not in duplicates:
duplicates.append(item)
return duplicates | [
"def",
"find_duplicates_in_array",
"(",
"array",
")",
":",
"duplicates",
"=",
"[",
"]",
"non_duplicates",
"=",
"[",
"]",
"if",
"len",
"(",
"array",
")",
"!=",
"len",
"(",
"set",
"(",
"array",
")",
")",
":",
"for",
"item",
"in",
"array",
":",
"if",
"item",
"not",
"in",
"non_duplicates",
":",
"non_duplicates",
".",
"append",
"(",
"item",
")",
"elif",
"item",
"in",
"non_duplicates",
"and",
"item",
"not",
"in",
"duplicates",
":",
"duplicates",
".",
"append",
"(",
"item",
")",
"return",
"duplicates"
] | Runs through the array and returns the elements that contain
more than one duplicate
Args:
array: The array to check for duplicates.
Returns:
Array of the elements that are duplicates. Returns empty list if
there are no duplicates. | [
"Runs",
"through",
"the",
"array",
"and",
"returns",
"the",
"elements",
"that",
"contain",
"more",
"than",
"one",
"duplicate"
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L365-L386 | train |
thanethomson/statik | setup.py | read_requirements | def read_requirements(filename):
"""
Parse a requirements file.
Accepts vcs+ links, and places the URL into
`DEPENDENCY_LINKS`.
:return: list of str for each package
"""
data = []
for line in read_file(filename):
line = line.strip()
if not line or line.startswith('#'):
continue
if '+' in line[:4]:
repo_link, egg_name = line.split('#egg=')
if not egg_name:
raise ValueError('Unknown requirement: {0}'
.format(line))
DEPENDENCY_LINKS.append(line)
line = egg_name
data.append(line)
return data | python | def read_requirements(filename):
"""
Parse a requirements file.
Accepts vcs+ links, and places the URL into
`DEPENDENCY_LINKS`.
:return: list of str for each package
"""
data = []
for line in read_file(filename):
line = line.strip()
if not line or line.startswith('#'):
continue
if '+' in line[:4]:
repo_link, egg_name = line.split('#egg=')
if not egg_name:
raise ValueError('Unknown requirement: {0}'
.format(line))
DEPENDENCY_LINKS.append(line)
line = egg_name
data.append(line)
return data | [
"def",
"read_requirements",
"(",
"filename",
")",
":",
"data",
"=",
"[",
"]",
"for",
"line",
"in",
"read_file",
"(",
"filename",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"not",
"line",
"or",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"if",
"'+'",
"in",
"line",
"[",
":",
"4",
"]",
":",
"repo_link",
",",
"egg_name",
"=",
"line",
".",
"split",
"(",
"'#egg='",
")",
"if",
"not",
"egg_name",
":",
"raise",
"ValueError",
"(",
"'Unknown requirement: {0}'",
".",
"format",
"(",
"line",
")",
")",
"DEPENDENCY_LINKS",
".",
"append",
"(",
"line",
")",
"line",
"=",
"egg_name",
"data",
".",
"append",
"(",
"line",
")",
"return",
"data"
] | Parse a requirements file.
Accepts vcs+ links, and places the URL into
`DEPENDENCY_LINKS`.
:return: list of str for each package | [
"Parse",
"a",
"requirements",
"file",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/setup.py#L22-L49 | train |
thanethomson/statik | statik/models.py | StatikModel.find_additional_rels | def find_additional_rels(self, all_models):
"""Attempts to scan for additional relationship fields for this model based on all of the other models'
structures and relationships.
"""
for model_name, model in iteritems(all_models):
if model_name != self.name:
for field_name in model.field_names:
field = model.fields[field_name]
# if this field type references the current model
if field.field_type == self.name and field.back_populates is not None and \
(isinstance(field, StatikForeignKeyField) or isinstance(field, StatikManyToManyField)):
self.additional_rels[field.back_populates] = {
'to_model': model_name,
'back_populates': field_name,
'secondary': (model_name, field.field_type)
if isinstance(field, StatikManyToManyField) else None
}
logger.debug(
'Additional relationship %s.%s -> %s (%s)',
self.name,
field.back_populates,
model_name,
self.additional_rels[field.back_populates]
) | python | def find_additional_rels(self, all_models):
"""Attempts to scan for additional relationship fields for this model based on all of the other models'
structures and relationships.
"""
for model_name, model in iteritems(all_models):
if model_name != self.name:
for field_name in model.field_names:
field = model.fields[field_name]
# if this field type references the current model
if field.field_type == self.name and field.back_populates is not None and \
(isinstance(field, StatikForeignKeyField) or isinstance(field, StatikManyToManyField)):
self.additional_rels[field.back_populates] = {
'to_model': model_name,
'back_populates': field_name,
'secondary': (model_name, field.field_type)
if isinstance(field, StatikManyToManyField) else None
}
logger.debug(
'Additional relationship %s.%s -> %s (%s)',
self.name,
field.back_populates,
model_name,
self.additional_rels[field.back_populates]
) | [
"def",
"find_additional_rels",
"(",
"self",
",",
"all_models",
")",
":",
"for",
"model_name",
",",
"model",
"in",
"iteritems",
"(",
"all_models",
")",
":",
"if",
"model_name",
"!=",
"self",
".",
"name",
":",
"for",
"field_name",
"in",
"model",
".",
"field_names",
":",
"field",
"=",
"model",
".",
"fields",
"[",
"field_name",
"]",
"# if this field type references the current model",
"if",
"field",
".",
"field_type",
"==",
"self",
".",
"name",
"and",
"field",
".",
"back_populates",
"is",
"not",
"None",
"and",
"(",
"isinstance",
"(",
"field",
",",
"StatikForeignKeyField",
")",
"or",
"isinstance",
"(",
"field",
",",
"StatikManyToManyField",
")",
")",
":",
"self",
".",
"additional_rels",
"[",
"field",
".",
"back_populates",
"]",
"=",
"{",
"'to_model'",
":",
"model_name",
",",
"'back_populates'",
":",
"field_name",
",",
"'secondary'",
":",
"(",
"model_name",
",",
"field",
".",
"field_type",
")",
"if",
"isinstance",
"(",
"field",
",",
"StatikManyToManyField",
")",
"else",
"None",
"}",
"logger",
".",
"debug",
"(",
"'Additional relationship %s.%s -> %s (%s)'",
",",
"self",
".",
"name",
",",
"field",
".",
"back_populates",
",",
"model_name",
",",
"self",
".",
"additional_rels",
"[",
"field",
".",
"back_populates",
"]",
")"
] | Attempts to scan for additional relationship fields for this model based on all of the other models'
structures and relationships. | [
"Attempts",
"to",
"scan",
"for",
"additional",
"relationship",
"fields",
"for",
"this",
"model",
"based",
"on",
"all",
"of",
"the",
"other",
"models",
"structures",
"and",
"relationships",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/models.py#L73-L96 | train |
thanethomson/statik | statik/database.py | StatikDatabase.create_db | def create_db(self, models):
"""Creates the in-memory SQLite database from the model
configuration."""
# first create the table definitions
self.tables = dict(
[
(model_name, self.create_model_table(model))
for model_name, model in iteritems(models)
]
)
# now create the tables in memory
logger.debug("Creating %d database table(s)...", len(self.tables))
try:
self.Base.metadata.create_all(self.engine)
except Exception as exc:
raise StatikError(
message="Failed to create in-memory data model.",
orig_exc=exc
)
self.load_all_model_data(models) | python | def create_db(self, models):
"""Creates the in-memory SQLite database from the model
configuration."""
# first create the table definitions
self.tables = dict(
[
(model_name, self.create_model_table(model))
for model_name, model in iteritems(models)
]
)
# now create the tables in memory
logger.debug("Creating %d database table(s)...", len(self.tables))
try:
self.Base.metadata.create_all(self.engine)
except Exception as exc:
raise StatikError(
message="Failed to create in-memory data model.",
orig_exc=exc
)
self.load_all_model_data(models) | [
"def",
"create_db",
"(",
"self",
",",
"models",
")",
":",
"# first create the table definitions",
"self",
".",
"tables",
"=",
"dict",
"(",
"[",
"(",
"model_name",
",",
"self",
".",
"create_model_table",
"(",
"model",
")",
")",
"for",
"model_name",
",",
"model",
"in",
"iteritems",
"(",
"models",
")",
"]",
")",
"# now create the tables in memory",
"logger",
".",
"debug",
"(",
"\"Creating %d database table(s)...\"",
",",
"len",
"(",
"self",
".",
"tables",
")",
")",
"try",
":",
"self",
".",
"Base",
".",
"metadata",
".",
"create_all",
"(",
"self",
".",
"engine",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"StatikError",
"(",
"message",
"=",
"\"Failed to create in-memory data model.\"",
",",
"orig_exc",
"=",
"exc",
")",
"self",
".",
"load_all_model_data",
"(",
"models",
")"
] | Creates the in-memory SQLite database from the model
configuration. | [
"Creates",
"the",
"in",
"-",
"memory",
"SQLite",
"database",
"from",
"the",
"model",
"configuration",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/database.py#L106-L125 | train |
thanethomson/statik | statik/database.py | StatikDatabase.sort_models | def sort_models(self):
"""Sorts the database models appropriately based on their relationships so that we load our data
in the appropriate order.
Returns:
A sorted list containing the names of the models.
"""
model_names = [
table.name for table in self.Base.metadata.sorted_tables if table.name in self.models
]
logger.debug("Unsorted models: %s", model_names)
model_count = len(model_names)
swapped = True
sort_round = 0
while swapped:
sort_round += 1
logger.debug('Sorting round: %d (%s)', sort_round, model_names)
sorted_models = []
for i in range(model_count):
model = self.models[model_names[i]]
# check if this model has any dependencies which haven't been taken care of in this round
for foreign_model_name in model.foreign_models:
if foreign_model_name not in sorted_models:
sorted_models.append(foreign_model_name)
if model.name not in sorted_models:
sorted_models.append(model.name)
# we're done here (no changes after this sorting round)
if model_names == sorted_models:
swapped = False
model_names = sorted_models
logger.debug("Sorted models: %s (%d rounds)", model_names, sort_round)
return model_names | python | def sort_models(self):
"""Sorts the database models appropriately based on their relationships so that we load our data
in the appropriate order.
Returns:
A sorted list containing the names of the models.
"""
model_names = [
table.name for table in self.Base.metadata.sorted_tables if table.name in self.models
]
logger.debug("Unsorted models: %s", model_names)
model_count = len(model_names)
swapped = True
sort_round = 0
while swapped:
sort_round += 1
logger.debug('Sorting round: %d (%s)', sort_round, model_names)
sorted_models = []
for i in range(model_count):
model = self.models[model_names[i]]
# check if this model has any dependencies which haven't been taken care of in this round
for foreign_model_name in model.foreign_models:
if foreign_model_name not in sorted_models:
sorted_models.append(foreign_model_name)
if model.name not in sorted_models:
sorted_models.append(model.name)
# we're done here (no changes after this sorting round)
if model_names == sorted_models:
swapped = False
model_names = sorted_models
logger.debug("Sorted models: %s (%d rounds)", model_names, sort_round)
return model_names | [
"def",
"sort_models",
"(",
"self",
")",
":",
"model_names",
"=",
"[",
"table",
".",
"name",
"for",
"table",
"in",
"self",
".",
"Base",
".",
"metadata",
".",
"sorted_tables",
"if",
"table",
".",
"name",
"in",
"self",
".",
"models",
"]",
"logger",
".",
"debug",
"(",
"\"Unsorted models: %s\"",
",",
"model_names",
")",
"model_count",
"=",
"len",
"(",
"model_names",
")",
"swapped",
"=",
"True",
"sort_round",
"=",
"0",
"while",
"swapped",
":",
"sort_round",
"+=",
"1",
"logger",
".",
"debug",
"(",
"'Sorting round: %d (%s)'",
",",
"sort_round",
",",
"model_names",
")",
"sorted_models",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"model_count",
")",
":",
"model",
"=",
"self",
".",
"models",
"[",
"model_names",
"[",
"i",
"]",
"]",
"# check if this model has any dependencies which haven't been taken care of in this round",
"for",
"foreign_model_name",
"in",
"model",
".",
"foreign_models",
":",
"if",
"foreign_model_name",
"not",
"in",
"sorted_models",
":",
"sorted_models",
".",
"append",
"(",
"foreign_model_name",
")",
"if",
"model",
".",
"name",
"not",
"in",
"sorted_models",
":",
"sorted_models",
".",
"append",
"(",
"model",
".",
"name",
")",
"# we're done here (no changes after this sorting round)",
"if",
"model_names",
"==",
"sorted_models",
":",
"swapped",
"=",
"False",
"model_names",
"=",
"sorted_models",
"logger",
".",
"debug",
"(",
"\"Sorted models: %s (%d rounds)\"",
",",
"model_names",
",",
"sort_round",
")",
"return",
"model_names"
] | Sorts the database models appropriately based on their relationships so that we load our data
in the appropriate order.
Returns:
A sorted list containing the names of the models. | [
"Sorts",
"the",
"database",
"models",
"appropriately",
"based",
"on",
"their",
"relationships",
"so",
"that",
"we",
"load",
"our",
"data",
"in",
"the",
"appropriate",
"order",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/database.py#L141-L178 | train |
thanethomson/statik | statik/database.py | StatikDatabase.create_model_table | def create_model_table(self, model):
"""Creates the table for the given model.
Args:
model: A StatikModel instance.
Returns:
A SQLAlchemy model instance for the table corresponding to this
particular model.
"""
try:
return db_model_factory(self.Base, model, self.models)
except Exception as exc:
raise ModelError(
model.name,
message="failed to create in-memory table.",
orig_exc=exc,
context=self.error_context
) | python | def create_model_table(self, model):
"""Creates the table for the given model.
Args:
model: A StatikModel instance.
Returns:
A SQLAlchemy model instance for the table corresponding to this
particular model.
"""
try:
return db_model_factory(self.Base, model, self.models)
except Exception as exc:
raise ModelError(
model.name,
message="failed to create in-memory table.",
orig_exc=exc,
context=self.error_context
) | [
"def",
"create_model_table",
"(",
"self",
",",
"model",
")",
":",
"try",
":",
"return",
"db_model_factory",
"(",
"self",
".",
"Base",
",",
"model",
",",
"self",
".",
"models",
")",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"ModelError",
"(",
"model",
".",
"name",
",",
"message",
"=",
"\"failed to create in-memory table.\"",
",",
"orig_exc",
"=",
"exc",
",",
"context",
"=",
"self",
".",
"error_context",
")"
] | Creates the table for the given model.
Args:
model: A StatikModel instance.
Returns:
A SQLAlchemy model instance for the table corresponding to this
particular model. | [
"Creates",
"the",
"table",
"for",
"the",
"given",
"model",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/database.py#L180-L198 | train |
thanethomson/statik | statik/database.py | StatikDatabase.load_model_data | def load_model_data(self, path, model):
"""Loads the data for the specified model from the given path.
"""
if os.path.isdir(path):
# try find a model data collection
if os.path.isfile(os.path.join(path, '_all.yml')):
self.load_model_data_collection(path, model)
self.load_model_data_from_files(path, model)
self.session.commit() | python | def load_model_data(self, path, model):
"""Loads the data for the specified model from the given path.
"""
if os.path.isdir(path):
# try find a model data collection
if os.path.isfile(os.path.join(path, '_all.yml')):
self.load_model_data_collection(path, model)
self.load_model_data_from_files(path, model)
self.session.commit() | [
"def",
"load_model_data",
"(",
"self",
",",
"path",
",",
"model",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"# try find a model data collection",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'_all.yml'",
")",
")",
":",
"self",
".",
"load_model_data_collection",
"(",
"path",
",",
"model",
")",
"self",
".",
"load_model_data_from_files",
"(",
"path",
",",
"model",
")",
"self",
".",
"session",
".",
"commit",
"(",
")"
] | Loads the data for the specified model from the given path. | [
"Loads",
"the",
"data",
"for",
"the",
"specified",
"model",
"from",
"the",
"given",
"path",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/database.py#L200-L208 | train |
thanethomson/statik | statik/database.py | StatikDatabase.query | def query(self, query, additional_locals=None, safe_mode=False):
"""Executes the given SQLAlchemy query string.
Args:
query: The SQLAlchemy ORM query (or Python code) to be executed.
additional_locals: Any additional local variables to inject into the execution context
when executing the query.
safe_mode: Boolean value indicating whether or not to execute queries in safe mode
only. If True, this only allows MLAlchemy-style queries. If False, this allows
both exec() and MLAlchemy-style queries. Default: False.
Returns:
The result of executing the query.
"""
logger.debug("Attempting to execute database query: %s", query)
if safe_mode and not isinstance(query, dict):
raise SafetyViolationError(
context=self.error_context
)
if isinstance(query, dict):
logger.debug("Executing query in safe mode (MLAlchemy)")
return mlalchemy.parse_query(query).to_sqlalchemy(self.session, self.tables).all()
else:
logger.debug("Executing unsafe query (Python exec())")
if additional_locals is not None:
for k, v in iteritems(additional_locals):
locals()[k] = v
exec(
compile(
'result = %s' % query.strip(),
'<string>',
'exec'
),
globals(),
locals()
)
return locals()['result'] | python | def query(self, query, additional_locals=None, safe_mode=False):
"""Executes the given SQLAlchemy query string.
Args:
query: The SQLAlchemy ORM query (or Python code) to be executed.
additional_locals: Any additional local variables to inject into the execution context
when executing the query.
safe_mode: Boolean value indicating whether or not to execute queries in safe mode
only. If True, this only allows MLAlchemy-style queries. If False, this allows
both exec() and MLAlchemy-style queries. Default: False.
Returns:
The result of executing the query.
"""
logger.debug("Attempting to execute database query: %s", query)
if safe_mode and not isinstance(query, dict):
raise SafetyViolationError(
context=self.error_context
)
if isinstance(query, dict):
logger.debug("Executing query in safe mode (MLAlchemy)")
return mlalchemy.parse_query(query).to_sqlalchemy(self.session, self.tables).all()
else:
logger.debug("Executing unsafe query (Python exec())")
if additional_locals is not None:
for k, v in iteritems(additional_locals):
locals()[k] = v
exec(
compile(
'result = %s' % query.strip(),
'<string>',
'exec'
),
globals(),
locals()
)
return locals()['result'] | [
"def",
"query",
"(",
"self",
",",
"query",
",",
"additional_locals",
"=",
"None",
",",
"safe_mode",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Attempting to execute database query: %s\"",
",",
"query",
")",
"if",
"safe_mode",
"and",
"not",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"raise",
"SafetyViolationError",
"(",
"context",
"=",
"self",
".",
"error_context",
")",
"if",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"logger",
".",
"debug",
"(",
"\"Executing query in safe mode (MLAlchemy)\"",
")",
"return",
"mlalchemy",
".",
"parse_query",
"(",
"query",
")",
".",
"to_sqlalchemy",
"(",
"self",
".",
"session",
",",
"self",
".",
"tables",
")",
".",
"all",
"(",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Executing unsafe query (Python exec())\"",
")",
"if",
"additional_locals",
"is",
"not",
"None",
":",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"additional_locals",
")",
":",
"locals",
"(",
")",
"[",
"k",
"]",
"=",
"v",
"exec",
"(",
"compile",
"(",
"'result = %s'",
"%",
"query",
".",
"strip",
"(",
")",
",",
"'<string>'",
",",
"'exec'",
")",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
")",
"return",
"locals",
"(",
")",
"[",
"'result'",
"]"
] | Executes the given SQLAlchemy query string.
Args:
query: The SQLAlchemy ORM query (or Python code) to be executed.
additional_locals: Any additional local variables to inject into the execution context
when executing the query.
safe_mode: Boolean value indicating whether or not to execute queries in safe mode
only. If True, this only allows MLAlchemy-style queries. If False, this allows
both exec() and MLAlchemy-style queries. Default: False.
Returns:
The result of executing the query. | [
"Executes",
"the",
"given",
"SQLAlchemy",
"query",
"string",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/database.py#L328-L367 | train |
thanethomson/statik | statik/generator.py | generate | def generate(input_path, output_path=None, in_memory=False, safe_mode=False, error_context=None):
"""Executes the Statik site generator using the given parameters.
"""
project = StatikProject(input_path, safe_mode=safe_mode, error_context=error_context)
return project.generate(output_path=output_path, in_memory=in_memory) | python | def generate(input_path, output_path=None, in_memory=False, safe_mode=False, error_context=None):
"""Executes the Statik site generator using the given parameters.
"""
project = StatikProject(input_path, safe_mode=safe_mode, error_context=error_context)
return project.generate(output_path=output_path, in_memory=in_memory) | [
"def",
"generate",
"(",
"input_path",
",",
"output_path",
"=",
"None",
",",
"in_memory",
"=",
"False",
",",
"safe_mode",
"=",
"False",
",",
"error_context",
"=",
"None",
")",
":",
"project",
"=",
"StatikProject",
"(",
"input_path",
",",
"safe_mode",
"=",
"safe_mode",
",",
"error_context",
"=",
"error_context",
")",
"return",
"project",
".",
"generate",
"(",
"output_path",
"=",
"output_path",
",",
"in_memory",
"=",
"in_memory",
")"
] | Executes the Statik site generator using the given parameters. | [
"Executes",
"the",
"Statik",
"site",
"generator",
"using",
"the",
"given",
"parameters",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/generator.py#L11-L15 | train |
thanethomson/statik | statik/project.py | StatikProject.generate | def generate(self, output_path=None, in_memory=False):
"""Executes the Statik project generator.
Args:
output_path: The path to which to write output files.
in_memory: Whether or not to generate the results in memory. If True, this will
generate the output result as a dictionary. If False, this will write the output
to files in the output_path.
Returns:
If in_memory is True, this returns a dictionary containing the actual generated static
content. If in_memory is False, this returns an integer indicating the number of files
generated in the output path.
"""
result = dict() if in_memory else 0
logger.info("Generating Statik build...")
try:
if output_path is None and not in_memory:
raise InternalError(
"If project is not to be generated in-memory, an output path must be specified"
)
self.error_context.update(filename=self.config_file_path)
self.config = self.config or StatikConfig(self.config_file_path)
if self.config.encoding is not None:
logger.debug("Using encoding: %s", self.config.encoding)
else:
logger.debug("Using encoding: %s", self.config.encoding)
self.error_context.clear()
self.models = self.load_models()
self.template_engine = StatikTemplateEngine(self)
if self.config.external_database is not None:
self.config.external_database.write_files(output_path, self.models)
self.views = self.load_views()
if not self.views:
raise NoViewsError()
self.db = self.load_db_data(self.models)
self.project_context = self.load_project_context()
in_memory_result = self.process_views()
if in_memory:
result = in_memory_result
else:
# dump the in-memory output to files
file_count = self.dump_in_memory_result(in_memory_result, output_path)
logger.info('Wrote %d output file(s) to folder: %s', file_count, output_path)
# copy any assets across, recursively
self.copy_assets(output_path)
result = file_count
logger.info("Success!")
except StatikError as exc:
logger.debug(traceback.format_exc())
logger.error(exc.render())
# re-raise the error to stop execution
raise exc
except Exception as exc:
logger.debug(traceback.format_exc())
_exc = StatikError(
message="Failed to build project. Run Statik in verbose mode (-v) to see " +
"additional traceback information about this error.",
orig_exc=exc,
context=self.error_context
)
logger.error(_exc.render())
raise _exc
finally:
try:
# make sure to destroy the database engine (to provide for the possibility of
# database engine reloads when watching for changes)
if self.db is not None:
self.db.shutdown()
except Exception as e:
logger.exception("Unable to clean up properly: %s", e)
return result | python | def generate(self, output_path=None, in_memory=False):
"""Executes the Statik project generator.
Args:
output_path: The path to which to write output files.
in_memory: Whether or not to generate the results in memory. If True, this will
generate the output result as a dictionary. If False, this will write the output
to files in the output_path.
Returns:
If in_memory is True, this returns a dictionary containing the actual generated static
content. If in_memory is False, this returns an integer indicating the number of files
generated in the output path.
"""
result = dict() if in_memory else 0
logger.info("Generating Statik build...")
try:
if output_path is None and not in_memory:
raise InternalError(
"If project is not to be generated in-memory, an output path must be specified"
)
self.error_context.update(filename=self.config_file_path)
self.config = self.config or StatikConfig(self.config_file_path)
if self.config.encoding is not None:
logger.debug("Using encoding: %s", self.config.encoding)
else:
logger.debug("Using encoding: %s", self.config.encoding)
self.error_context.clear()
self.models = self.load_models()
self.template_engine = StatikTemplateEngine(self)
if self.config.external_database is not None:
self.config.external_database.write_files(output_path, self.models)
self.views = self.load_views()
if not self.views:
raise NoViewsError()
self.db = self.load_db_data(self.models)
self.project_context = self.load_project_context()
in_memory_result = self.process_views()
if in_memory:
result = in_memory_result
else:
# dump the in-memory output to files
file_count = self.dump_in_memory_result(in_memory_result, output_path)
logger.info('Wrote %d output file(s) to folder: %s', file_count, output_path)
# copy any assets across, recursively
self.copy_assets(output_path)
result = file_count
logger.info("Success!")
except StatikError as exc:
logger.debug(traceback.format_exc())
logger.error(exc.render())
# re-raise the error to stop execution
raise exc
except Exception as exc:
logger.debug(traceback.format_exc())
_exc = StatikError(
message="Failed to build project. Run Statik in verbose mode (-v) to see " +
"additional traceback information about this error.",
orig_exc=exc,
context=self.error_context
)
logger.error(_exc.render())
raise _exc
finally:
try:
# make sure to destroy the database engine (to provide for the possibility of
# database engine reloads when watching for changes)
if self.db is not None:
self.db.shutdown()
except Exception as e:
logger.exception("Unable to clean up properly: %s", e)
return result | [
"def",
"generate",
"(",
"self",
",",
"output_path",
"=",
"None",
",",
"in_memory",
"=",
"False",
")",
":",
"result",
"=",
"dict",
"(",
")",
"if",
"in_memory",
"else",
"0",
"logger",
".",
"info",
"(",
"\"Generating Statik build...\"",
")",
"try",
":",
"if",
"output_path",
"is",
"None",
"and",
"not",
"in_memory",
":",
"raise",
"InternalError",
"(",
"\"If project is not to be generated in-memory, an output path must be specified\"",
")",
"self",
".",
"error_context",
".",
"update",
"(",
"filename",
"=",
"self",
".",
"config_file_path",
")",
"self",
".",
"config",
"=",
"self",
".",
"config",
"or",
"StatikConfig",
"(",
"self",
".",
"config_file_path",
")",
"if",
"self",
".",
"config",
".",
"encoding",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Using encoding: %s\"",
",",
"self",
".",
"config",
".",
"encoding",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Using encoding: %s\"",
",",
"self",
".",
"config",
".",
"encoding",
")",
"self",
".",
"error_context",
".",
"clear",
"(",
")",
"self",
".",
"models",
"=",
"self",
".",
"load_models",
"(",
")",
"self",
".",
"template_engine",
"=",
"StatikTemplateEngine",
"(",
"self",
")",
"if",
"self",
".",
"config",
".",
"external_database",
"is",
"not",
"None",
":",
"self",
".",
"config",
".",
"external_database",
".",
"write_files",
"(",
"output_path",
",",
"self",
".",
"models",
")",
"self",
".",
"views",
"=",
"self",
".",
"load_views",
"(",
")",
"if",
"not",
"self",
".",
"views",
":",
"raise",
"NoViewsError",
"(",
")",
"self",
".",
"db",
"=",
"self",
".",
"load_db_data",
"(",
"self",
".",
"models",
")",
"self",
".",
"project_context",
"=",
"self",
".",
"load_project_context",
"(",
")",
"in_memory_result",
"=",
"self",
".",
"process_views",
"(",
")",
"if",
"in_memory",
":",
"result",
"=",
"in_memory_result",
"else",
":",
"# dump the in-memory output to files",
"file_count",
"=",
"self",
".",
"dump_in_memory_result",
"(",
"in_memory_result",
",",
"output_path",
")",
"logger",
".",
"info",
"(",
"'Wrote %d output file(s) to folder: %s'",
",",
"file_count",
",",
"output_path",
")",
"# copy any assets across, recursively",
"self",
".",
"copy_assets",
"(",
"output_path",
")",
"result",
"=",
"file_count",
"logger",
".",
"info",
"(",
"\"Success!\"",
")",
"except",
"StatikError",
"as",
"exc",
":",
"logger",
".",
"debug",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"logger",
".",
"error",
"(",
"exc",
".",
"render",
"(",
")",
")",
"# re-raise the error to stop execution",
"raise",
"exc",
"except",
"Exception",
"as",
"exc",
":",
"logger",
".",
"debug",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"_exc",
"=",
"StatikError",
"(",
"message",
"=",
"\"Failed to build project. Run Statik in verbose mode (-v) to see \"",
"+",
"\"additional traceback information about this error.\"",
",",
"orig_exc",
"=",
"exc",
",",
"context",
"=",
"self",
".",
"error_context",
")",
"logger",
".",
"error",
"(",
"_exc",
".",
"render",
"(",
")",
")",
"raise",
"_exc",
"finally",
":",
"try",
":",
"# make sure to destroy the database engine (to provide for the possibility of",
"# database engine reloads when watching for changes)",
"if",
"self",
".",
"db",
"is",
"not",
"None",
":",
"self",
".",
"db",
".",
"shutdown",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"\"Unable to clean up properly: %s\"",
",",
"e",
")",
"return",
"result"
] | Executes the Statik project generator.
Args:
output_path: The path to which to write output files.
in_memory: Whether or not to generate the results in memory. If True, this will
generate the output result as a dictionary. If False, this will write the output
to files in the output_path.
Returns:
If in_memory is True, this returns a dictionary containing the actual generated static
content. If in_memory is False, this returns an integer indicating the number of files
generated in the output path. | [
"Executes",
"the",
"Statik",
"project",
"generator",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/project.py#L71-L156 | train |
thanethomson/statik | statik/project.py | StatikProject.load_views | def load_views(self):
"""Loads the views for this project from the project directory
structure."""
view_path = os.path.join(self.path, StatikProject.VIEWS_DIR)
logger.debug("Loading views from: %s", view_path)
if not os.path.isdir(view_path):
raise MissingProjectFolderError(StatikProject.VIEWS_DIR)
view_files = list_files(view_path, ['yml', 'yaml'])
logger.debug("Found %d view(s) in project", len(view_files))
views = {}
for view_file in view_files:
view_name = extract_filename(view_file)
views[view_name] = StatikView(
filename=os.path.join(view_path, view_file),
encoding=self.config.encoding,
name=view_name,
models=self.models,
template_engine=self.template_engine,
error_context=self.error_context
)
return views | python | def load_views(self):
"""Loads the views for this project from the project directory
structure."""
view_path = os.path.join(self.path, StatikProject.VIEWS_DIR)
logger.debug("Loading views from: %s", view_path)
if not os.path.isdir(view_path):
raise MissingProjectFolderError(StatikProject.VIEWS_DIR)
view_files = list_files(view_path, ['yml', 'yaml'])
logger.debug("Found %d view(s) in project", len(view_files))
views = {}
for view_file in view_files:
view_name = extract_filename(view_file)
views[view_name] = StatikView(
filename=os.path.join(view_path, view_file),
encoding=self.config.encoding,
name=view_name,
models=self.models,
template_engine=self.template_engine,
error_context=self.error_context
)
return views | [
"def",
"load_views",
"(",
"self",
")",
":",
"view_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"StatikProject",
".",
"VIEWS_DIR",
")",
"logger",
".",
"debug",
"(",
"\"Loading views from: %s\"",
",",
"view_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"view_path",
")",
":",
"raise",
"MissingProjectFolderError",
"(",
"StatikProject",
".",
"VIEWS_DIR",
")",
"view_files",
"=",
"list_files",
"(",
"view_path",
",",
"[",
"'yml'",
",",
"'yaml'",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Found %d view(s) in project\"",
",",
"len",
"(",
"view_files",
")",
")",
"views",
"=",
"{",
"}",
"for",
"view_file",
"in",
"view_files",
":",
"view_name",
"=",
"extract_filename",
"(",
"view_file",
")",
"views",
"[",
"view_name",
"]",
"=",
"StatikView",
"(",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"view_path",
",",
"view_file",
")",
",",
"encoding",
"=",
"self",
".",
"config",
".",
"encoding",
",",
"name",
"=",
"view_name",
",",
"models",
"=",
"self",
".",
"models",
",",
"template_engine",
"=",
"self",
".",
"template_engine",
",",
"error_context",
"=",
"self",
".",
"error_context",
")",
"return",
"views"
] | Loads the views for this project from the project directory
structure. | [
"Loads",
"the",
"views",
"for",
"this",
"project",
"from",
"the",
"project",
"directory",
"structure",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/project.py#L181-L203 | train |
thanethomson/statik | statik/project.py | StatikProject.process_views | def process_views(self):
"""Processes the loaded views to generate the required output data."""
output = {}
logger.debug("Processing %d view(s)...", len(self.views))
for view_name, view in iteritems(self.views):
try:
output = deep_merge_dict(
output,
view.process(
self.db,
safe_mode=self.safe_mode,
extra_context=self.project_context
)
)
except StatikError as exc:
# just re-raise it
raise exc
except Exception as exc:
# for unhandled view-related exceptions, raise our own exception
raise ViewError(
message="Failed to render view \"%s\"." % view_name,
orig_exc=exc
)
return output | python | def process_views(self):
"""Processes the loaded views to generate the required output data."""
output = {}
logger.debug("Processing %d view(s)...", len(self.views))
for view_name, view in iteritems(self.views):
try:
output = deep_merge_dict(
output,
view.process(
self.db,
safe_mode=self.safe_mode,
extra_context=self.project_context
)
)
except StatikError as exc:
# just re-raise it
raise exc
except Exception as exc:
# for unhandled view-related exceptions, raise our own exception
raise ViewError(
message="Failed to render view \"%s\"." % view_name,
orig_exc=exc
)
return output | [
"def",
"process_views",
"(",
"self",
")",
":",
"output",
"=",
"{",
"}",
"logger",
".",
"debug",
"(",
"\"Processing %d view(s)...\"",
",",
"len",
"(",
"self",
".",
"views",
")",
")",
"for",
"view_name",
",",
"view",
"in",
"iteritems",
"(",
"self",
".",
"views",
")",
":",
"try",
":",
"output",
"=",
"deep_merge_dict",
"(",
"output",
",",
"view",
".",
"process",
"(",
"self",
".",
"db",
",",
"safe_mode",
"=",
"self",
".",
"safe_mode",
",",
"extra_context",
"=",
"self",
".",
"project_context",
")",
")",
"except",
"StatikError",
"as",
"exc",
":",
"# just re-raise it",
"raise",
"exc",
"except",
"Exception",
"as",
"exc",
":",
"# for unhandled view-related exceptions, raise our own exception",
"raise",
"ViewError",
"(",
"message",
"=",
"\"Failed to render view \\\"%s\\\".\"",
"%",
"view_name",
",",
"orig_exc",
"=",
"exc",
")",
"return",
"output"
] | Processes the loaded views to generate the required output data. | [
"Processes",
"the",
"loaded",
"views",
"to",
"generate",
"the",
"required",
"output",
"data",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/project.py#L243-L268 | train |
thanethomson/statik | statik/project.py | StatikProject.dump_in_memory_result | def dump_in_memory_result(self, result, output_path):
"""Recursively dumps the result of our processing into files within the
given output path.
Args:
result: The in-memory result of our processing.
output_path: Full path to the folder into which to dump the files.
Returns:
The number of files generated (integer).
"""
file_count = 0
logger.debug("Dumping in-memory processing results to output folder: %s", output_path)
for k, v in iteritems(result):
cur_output_path = os.path.join(output_path, k)
if isinstance(v, dict):
file_count += self.dump_in_memory_result(v, cur_output_path)
else:
if not os.path.isdir(output_path):
os.makedirs(output_path)
filename = os.path.join(output_path, k)
logger.debug("Writing output file: %s", filename)
# dump the contents of the file
with open(filename, 'wt', encoding=self.config.encoding) as f:
f.write(v)
file_count += 1
return file_count | python | def dump_in_memory_result(self, result, output_path):
"""Recursively dumps the result of our processing into files within the
given output path.
Args:
result: The in-memory result of our processing.
output_path: Full path to the folder into which to dump the files.
Returns:
The number of files generated (integer).
"""
file_count = 0
logger.debug("Dumping in-memory processing results to output folder: %s", output_path)
for k, v in iteritems(result):
cur_output_path = os.path.join(output_path, k)
if isinstance(v, dict):
file_count += self.dump_in_memory_result(v, cur_output_path)
else:
if not os.path.isdir(output_path):
os.makedirs(output_path)
filename = os.path.join(output_path, k)
logger.debug("Writing output file: %s", filename)
# dump the contents of the file
with open(filename, 'wt', encoding=self.config.encoding) as f:
f.write(v)
file_count += 1
return file_count | [
"def",
"dump_in_memory_result",
"(",
"self",
",",
"result",
",",
"output_path",
")",
":",
"file_count",
"=",
"0",
"logger",
".",
"debug",
"(",
"\"Dumping in-memory processing results to output folder: %s\"",
",",
"output_path",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"result",
")",
":",
"cur_output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_path",
",",
"k",
")",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"file_count",
"+=",
"self",
".",
"dump_in_memory_result",
"(",
"v",
",",
"cur_output_path",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"output_path",
")",
":",
"os",
".",
"makedirs",
"(",
"output_path",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_path",
",",
"k",
")",
"logger",
".",
"debug",
"(",
"\"Writing output file: %s\"",
",",
"filename",
")",
"# dump the contents of the file",
"with",
"open",
"(",
"filename",
",",
"'wt'",
",",
"encoding",
"=",
"self",
".",
"config",
".",
"encoding",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"v",
")",
"file_count",
"+=",
"1",
"return",
"file_count"
] | Recursively dumps the result of our processing into files within the
given output path.
Args:
result: The in-memory result of our processing.
output_path: Full path to the folder into which to dump the files.
Returns:
The number of files generated (integer). | [
"Recursively",
"dumps",
"the",
"result",
"of",
"our",
"processing",
"into",
"files",
"within",
"the",
"given",
"output",
"path",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/project.py#L270-L300 | train |
thanethomson/statik | statik/project.py | StatikProject.copy_assets | def copy_assets(self, output_path):
"""Copies all asset files from the source path to the destination
path. If no such source path exists, no asset copying will be performed.
"""
src_paths = []
# if we have a theme
if self.config.theme is not None:
# assume it's in the folder: "themes/theme_name/assets"
src_paths.append(os.path.join(
self.path,
StatikProject.THEMES_DIR,
self.config.theme,
StatikProject.ASSETS_DIR
))
# NOTE: Adding the theme's assets directory *before* the project's internal assets
# directory always ensures that the project's own assets are copied *after* the
# theme's, thereby ensuring that the project's assets folder takes precedence
# over the theme's.
# always attempt to copy from our base assets folder
if os.path.isabs(self.config.assets_src_path):
src_paths.append(self.config.assets_src_path)
else:
src_paths.append(os.path.join(self.path, self.config.assets_src_path))
for src_path in src_paths:
if os.path.exists(src_path) and os.path.isdir(src_path):
dest_path = self.config.assets_dest_path
if not os.path.isabs(dest_path):
dest_path = os.path.join(output_path, dest_path)
asset_count = copy_tree(src_path, dest_path)
logger.info("Copied %s asset(s) from %s to %s", asset_count, src_path, dest_path)
else:
logger.info(
"Missing assets source path - skipping copying of assets: %s",
src_path
) | python | def copy_assets(self, output_path):
"""Copies all asset files from the source path to the destination
path. If no such source path exists, no asset copying will be performed.
"""
src_paths = []
# if we have a theme
if self.config.theme is not None:
# assume it's in the folder: "themes/theme_name/assets"
src_paths.append(os.path.join(
self.path,
StatikProject.THEMES_DIR,
self.config.theme,
StatikProject.ASSETS_DIR
))
# NOTE: Adding the theme's assets directory *before* the project's internal assets
# directory always ensures that the project's own assets are copied *after* the
# theme's, thereby ensuring that the project's assets folder takes precedence
# over the theme's.
# always attempt to copy from our base assets folder
if os.path.isabs(self.config.assets_src_path):
src_paths.append(self.config.assets_src_path)
else:
src_paths.append(os.path.join(self.path, self.config.assets_src_path))
for src_path in src_paths:
if os.path.exists(src_path) and os.path.isdir(src_path):
dest_path = self.config.assets_dest_path
if not os.path.isabs(dest_path):
dest_path = os.path.join(output_path, dest_path)
asset_count = copy_tree(src_path, dest_path)
logger.info("Copied %s asset(s) from %s to %s", asset_count, src_path, dest_path)
else:
logger.info(
"Missing assets source path - skipping copying of assets: %s",
src_path
) | [
"def",
"copy_assets",
"(",
"self",
",",
"output_path",
")",
":",
"src_paths",
"=",
"[",
"]",
"# if we have a theme",
"if",
"self",
".",
"config",
".",
"theme",
"is",
"not",
"None",
":",
"# assume it's in the folder: \"themes/theme_name/assets\"",
"src_paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"StatikProject",
".",
"THEMES_DIR",
",",
"self",
".",
"config",
".",
"theme",
",",
"StatikProject",
".",
"ASSETS_DIR",
")",
")",
"# NOTE: Adding the theme's assets directory *before* the project's internal assets",
"# directory always ensures that the project's own assets are copied *after* the",
"# theme's, thereby ensuring that the project's assets folder takes precedence",
"# over the theme's.",
"# always attempt to copy from our base assets folder",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"self",
".",
"config",
".",
"assets_src_path",
")",
":",
"src_paths",
".",
"append",
"(",
"self",
".",
"config",
".",
"assets_src_path",
")",
"else",
":",
"src_paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"self",
".",
"config",
".",
"assets_src_path",
")",
")",
"for",
"src_path",
"in",
"src_paths",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"src_path",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"src_path",
")",
":",
"dest_path",
"=",
"self",
".",
"config",
".",
"assets_dest_path",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"dest_path",
")",
":",
"dest_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_path",
",",
"dest_path",
")",
"asset_count",
"=",
"copy_tree",
"(",
"src_path",
",",
"dest_path",
")",
"logger",
".",
"info",
"(",
"\"Copied %s asset(s) from %s to %s\"",
",",
"asset_count",
",",
"src_path",
",",
"dest_path",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Missing assets source path - skipping copying of assets: %s\"",
",",
"src_path",
")"
] | Copies all asset files from the source path to the destination
path. If no such source path exists, no asset copying will be performed. | [
"Copies",
"all",
"asset",
"files",
"from",
"the",
"source",
"path",
"to",
"the",
"destination",
"path",
".",
"If",
"no",
"such",
"source",
"path",
"exists",
"no",
"asset",
"copying",
"will",
"be",
"performed",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/project.py#L302-L340 | train |
thanethomson/statik | statik/autogen.py | autogen | def autogen(project_path):
"""Autogenerates views and templates for all the models in the project."""
generate_quickstart(project_path)
project = StatikProject(project_path)
project.config = StatikConfig(project.config_file_path)
models = list(project.load_models().values())
logger.info('Creating view and template for home page (index.html).')
generate_yaml_file(os.path.join(project_path, StatikProject.VIEWS_DIR, 'index.yaml'),
{
'path': '/',
'template': 'index'
}
)
generate_index_file(os.path.join(project_path, StatikProject.TEMPLATES_DIR, 'index.jinja2'))
for model in models:
logger.info('Creating view and template for model: %s' % model.name)
generate_yaml_file(os.path.join(project_path, StatikProject.VIEWS_DIR, '%s.yaml' % model.name),
{
'path': {
'template': '/%s/{{ %s.pk }}' % (model.name, model.name),
'for-each': {
'%s' % model.name: 'session.query(%s).all()' % model.name
}
},
'template': ('%s' % model.name),
}
)
generate_model_file(os.path.join(project_path, StatikProject.TEMPLATES_DIR, '%s.jinja2' % model.name),
project,
model,
model.fields.values()) | python | def autogen(project_path):
"""Autogenerates views and templates for all the models in the project."""
generate_quickstart(project_path)
project = StatikProject(project_path)
project.config = StatikConfig(project.config_file_path)
models = list(project.load_models().values())
logger.info('Creating view and template for home page (index.html).')
generate_yaml_file(os.path.join(project_path, StatikProject.VIEWS_DIR, 'index.yaml'),
{
'path': '/',
'template': 'index'
}
)
generate_index_file(os.path.join(project_path, StatikProject.TEMPLATES_DIR, 'index.jinja2'))
for model in models:
logger.info('Creating view and template for model: %s' % model.name)
generate_yaml_file(os.path.join(project_path, StatikProject.VIEWS_DIR, '%s.yaml' % model.name),
{
'path': {
'template': '/%s/{{ %s.pk }}' % (model.name, model.name),
'for-each': {
'%s' % model.name: 'session.query(%s).all()' % model.name
}
},
'template': ('%s' % model.name),
}
)
generate_model_file(os.path.join(project_path, StatikProject.TEMPLATES_DIR, '%s.jinja2' % model.name),
project,
model,
model.fields.values()) | [
"def",
"autogen",
"(",
"project_path",
")",
":",
"generate_quickstart",
"(",
"project_path",
")",
"project",
"=",
"StatikProject",
"(",
"project_path",
")",
"project",
".",
"config",
"=",
"StatikConfig",
"(",
"project",
".",
"config_file_path",
")",
"models",
"=",
"list",
"(",
"project",
".",
"load_models",
"(",
")",
".",
"values",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'Creating view and template for home page (index.html).'",
")",
"generate_yaml_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"StatikProject",
".",
"VIEWS_DIR",
",",
"'index.yaml'",
")",
",",
"{",
"'path'",
":",
"'/'",
",",
"'template'",
":",
"'index'",
"}",
")",
"generate_index_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"StatikProject",
".",
"TEMPLATES_DIR",
",",
"'index.jinja2'",
")",
")",
"for",
"model",
"in",
"models",
":",
"logger",
".",
"info",
"(",
"'Creating view and template for model: %s'",
"%",
"model",
".",
"name",
")",
"generate_yaml_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"StatikProject",
".",
"VIEWS_DIR",
",",
"'%s.yaml'",
"%",
"model",
".",
"name",
")",
",",
"{",
"'path'",
":",
"{",
"'template'",
":",
"'/%s/{{ %s.pk }}'",
"%",
"(",
"model",
".",
"name",
",",
"model",
".",
"name",
")",
",",
"'for-each'",
":",
"{",
"'%s'",
"%",
"model",
".",
"name",
":",
"'session.query(%s).all()'",
"%",
"model",
".",
"name",
"}",
"}",
",",
"'template'",
":",
"(",
"'%s'",
"%",
"model",
".",
"name",
")",
",",
"}",
")",
"generate_model_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_path",
",",
"StatikProject",
".",
"TEMPLATES_DIR",
",",
"'%s.jinja2'",
"%",
"model",
".",
"name",
")",
",",
"project",
",",
"model",
",",
"model",
".",
"fields",
".",
"values",
"(",
")",
")"
] | Autogenerates views and templates for all the models in the project. | [
"Autogenerates",
"views",
"and",
"templates",
"for",
"all",
"the",
"models",
"in",
"the",
"project",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/autogen.py#L17-L51 | train |
thanethomson/statik | statik/autogen.py | generate_yaml_file | def generate_yaml_file(filename, contents):
"""Creates a yaml file with the given content."""
with open(filename, 'w') as file:
file.write(yaml.dump(contents, default_flow_style=False)) | python | def generate_yaml_file(filename, contents):
"""Creates a yaml file with the given content."""
with open(filename, 'w') as file:
file.write(yaml.dump(contents, default_flow_style=False)) | [
"def",
"generate_yaml_file",
"(",
"filename",
",",
"contents",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"yaml",
".",
"dump",
"(",
"contents",
",",
"default_flow_style",
"=",
"False",
")",
")"
] | Creates a yaml file with the given content. | [
"Creates",
"a",
"yaml",
"file",
"with",
"the",
"given",
"content",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/autogen.py#L54-L57 | train |
thanethomson/statik | statik/autogen.py | generate_index_file | def generate_index_file(filename):
"""Constructs a default home page for the project."""
with open(filename, 'w') as file:
content = open(os.path.join(os.path.dirname(__file__), 'templates/index_page.html'), 'r').read()
file.write(content) | python | def generate_index_file(filename):
"""Constructs a default home page for the project."""
with open(filename, 'w') as file:
content = open(os.path.join(os.path.dirname(__file__), 'templates/index_page.html'), 'r').read()
file.write(content) | [
"def",
"generate_index_file",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"file",
":",
"content",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'templates/index_page.html'",
")",
",",
"'r'",
")",
".",
"read",
"(",
")",
"file",
".",
"write",
"(",
"content",
")"
] | Constructs a default home page for the project. | [
"Constructs",
"a",
"default",
"home",
"page",
"for",
"the",
"project",
"."
] | 56b1b5a2cb05a97afa81f428bfcefc833e935b8d | https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/autogen.py#L60-L64 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.