Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def reverse_complement( self ):
rval = copy( self )
# Conveniently enough, reversing rows and columns is exactly what we
# want, since this results in A swapping with T and C swapping with G.
rval.values = self.values[::-1,::-1].copy()
return rval | [
"\n Create the reverse complement of this matrix. The result probably\n only makese sense if the alphabet is that of DNA ('A','C','G','T').\n "
] |
Please provide a description of the function:def to_logodds_scoring_matrix( self, background=None, correction=DEFAULT_CORRECTION ):
alphabet_size = len( self.alphabet )
if background is None:
background = ones( alphabet_size, float32 ) / alphabet_size
# Row totals as a one column array
totals = numpy.sum( self.values, 1 )[:,newaxis]
values = log2( maximum( self.values, correction ) ) \
- log2( totals ) \
- log2( maximum( background, correction ) )
return ScoringMatrix.create_from_other( self, values.astype( float32 ) ) | [
"\n Create a standard logodds scoring matrix.\n "
] |
Please provide a description of the function:def to_stormo_scoring_matrix( self, background=None ):
alphabet_size = len( self.alphabet )
if background is None:
background = ones( alphabet_size, float32 ) / alphabet_size
# Row totals as a one column array
totals = numpy.sum( self.values, 1 )[:,newaxis]
values = log2( self.values + background ) \
- log2( totals + 1 ) - log2( background )
return ScoringMatrix.create_from_other( self, values.astype( float32 ) ) | [
"\n Create a scoring matrix from this count matrix using the method from:\n\n Hertz, G.Z. and G.D. Stormo (1999). Identifying DNA and protein patterns with statistically \n significant alignments of multiple sequences. Bioinformatics 15(7): 563-577.\n "
] |
Please provide a description of the function:def score_string( self, string ):
rval = zeros( len( string ), float32 )
rval[:] = nan
_pwm.score_string( self.values, self.char_to_index, string, rval )
return rval | [
"\n Score each valid position in `string` using this scoring matrix. \n Positions which were not scored are set to nan.\n "
] |
Please provide a description of the function:def _get_exchange_key_ntlm_v1(negotiate_flags, session_base_key,
server_challenge, lm_challenge_response,
lm_hash):
if negotiate_flags & \
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:
key_exchange_key = hmac.new(
session_base_key, server_challenge + lm_challenge_response[:8],
digestmod=hashlib.md5
).digest()
elif negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_LM_KEY:
des_handler = DES(DES.key56_to_key64(lm_hash[:7]))
first_des = des_handler.encrypt(lm_challenge_response[:8])
second_des_key = lm_hash[7:8] + b"\xbd\xbd\xbd\xbd\xbd\xbd"
des_handler = DES(DES.key56_to_key64(second_des_key))
second_des = des_handler.encrypt(lm_challenge_response[:8])
key_exchange_key = first_des + second_des
elif negotiate_flags & NegotiateFlags.NTLMSSP_REQUEST_NON_NT_SESSION_KEY:
key_exchange_key = lm_hash[:8] + b'\0' * 8
else:
key_exchange_key = session_base_key
return key_exchange_key | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 3.4.5.1 KXKEY\n Calculates the Key Exchange Key for NTLMv1 authentication. Used for signing\n and sealing messages\n\n :param negotiate_flags: The negotiated NTLM flags\n :param session_base_key: A session key calculated from the user password\n challenge\n :param server_challenge: A random 8-byte response generated by the server\n in the CHALLENGE_MESSAGE\n :param lm_challenge_response: The LmChallengeResponse value computed in\n ComputeResponse\n :param lm_hash: The LMOWF computed in Compute Response\n :return: The Key Exchange Key (KXKEY) used to sign and seal messages and\n compute the ExportedSessionKey\n "
] |
Please provide a description of the function:def _get_seal_key_ntlm1(negotiate_flags, exported_session_key):
if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_56:
seal_key = exported_session_key[:7] + b"\xa0"
else:
seal_key = exported_session_key[:5] + b"\xe5\x38\xb0"
return seal_key | [
"\n 3.4.5.3 SEALKEY\n Calculates the seal_key used to seal (encrypt) messages. This for\n authentication where NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has not\n been negotiated. Will weaken the keys if NTLMSSP_NEGOTIATE_56 is not\n negotiated it will default to the 40-bit key\n\n :param negotiate_flags: The negotiate_flags structure sent by the server\n :param exported_session_key: A 128-bit session key used to derive signing\n and sealing keys\n :return seal_key: Key used to seal messages\n "
] |
Please provide a description of the function:def get_nt_challenge_response(self, lm_challenge_response,
server_certificate_hash=None, cbt_data=None):
if self._negotiate_flags & \
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY and \
self._ntlm_compatibility < 3:
# The compatibility level is less than 3 which means it doesn't
# support NTLMv2 but we want extended security so use NTLM2 which
# is different from NTLMv2
# [MS-NLMP] - 3.3.1 NTLMv1 Authentication
response, session_base_key = \
self._get_NTLM2_response(self._password,
self._server_challenge,
self._client_challenge)
lm_hash = comphash._lmowfv1(self._password)
key_exchange_key = \
compkeys._get_exchange_key_ntlm_v1(self._negotiate_flags,
session_base_key,
self._server_challenge,
lm_challenge_response,
lm_hash)
target_info = None
elif 0 <= self._ntlm_compatibility < 3:
response, session_base_key = \
self._get_NTLMv1_response(self._password,
self._server_challenge)
lm_hash = comphash._lmowfv1(self._password)
key_exchange_key = \
compkeys._get_exchange_key_ntlm_v1(self._negotiate_flags,
session_base_key,
self._server_challenge,
lm_challenge_response,
lm_hash)
target_info = None
else:
if self._server_target_info is None:
target_info = ntlm_auth.messages.TargetInfo()
else:
target_info = self._server_target_info
if target_info[AvId.MSV_AV_TIMESTAMP] is None:
timestamp = get_windows_timestamp()
else:
timestamp = target_info[AvId.MSV_AV_TIMESTAMP]
# [MS-NLMP] If the CHALLENGE_MESSAGE TargetInfo field has an
# MsvAvTimestamp present, the client SHOULD provide a MIC
target_info[AvId.MSV_AV_FLAGS] = \
struct.pack("<L", AvFlags.MIC_PROVIDED)
if server_certificate_hash is not None and cbt_data is None:
# Older method of creating CBT struct based on the cert hash.
# This should be avoided in favour of an explicit
# GssChannelBindingStruct being passed in.
certificate_digest = base64.b16decode(server_certificate_hash)
cbt_data = GssChannelBindingsStruct()
cbt_data[cbt_data.APPLICATION_DATA] = \
b'tls-server-end-point:' + certificate_digest
if cbt_data is not None:
cbt_bytes = cbt_data.get_data()
cbt_hash = hashlib.md5(cbt_bytes).digest()
target_info[AvId.MSV_AV_CHANNEL_BINDINGS] = cbt_hash
response, session_base_key = \
self._get_NTLMv2_response(self._user_name, self._password,
self._domain_name,
self._server_challenge,
self._client_challenge,
timestamp, target_info)
key_exchange_key = \
compkeys._get_exchange_key_ntlm_v2(session_base_key)
return response, key_exchange_key, target_info | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 3.3.1 - NTLM v1 Authentication\n 3.3.2 - NTLM v2 Authentication\n\n This method returns the NtChallengeResponse key based on the\n ntlm_compatibility chosen and the target_info supplied by the\n CHALLENGE_MESSAGE. It is quite different from what is set in the\n document as it combines the NTLMv1, NTLM2 and NTLMv2 methods into one\n and calls separate methods based on the ntlm_compatibility value\n chosen.\n\n :param lm_challenge_response: The LmChallengeResponse calculated\n beforehand, used to get the key_exchange_key value\n :param server_certificate_hash: This is deprecated and will be removed\n in a future version, use cbt_data instead\n :param cbt_data: The GssChannelBindingsStruct to bind in the NTLM\n response\n :return response: (NtChallengeResponse) - The NT response to the server\n challenge. Computed by the client\n :return session_base_key: (SessionBaseKey) - A session key calculated\n from the user password challenge\n :return target_info: (AV_PAIR) - The AV_PAIR structure used in the\n nt_challenge calculations\n "
] |
Please provide a description of the function:def _get_LMv2_response(user_name, password, domain_name, server_challenge,
client_challenge):
nt_hash = comphash._ntowfv2(user_name, password, domain_name)
challenge = server_challenge + client_challenge
lm_hash = hmac.new(nt_hash, challenge, digestmod=hashlib.md5).digest()
response = lm_hash + client_challenge
return response | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 2.2.2.4 LMv2_RESPONSE\n The LMv2_RESPONSE structure defines the NTLM v2 authentication\n LmChallengeResponse in the AUTHENTICATE_MESSAGE. This response is used\n only when NTLM v2 authentication is configured.\n\n :param user_name: The user name of the user we are trying to\n authenticate with\n :param password: The password of the user we are trying to authenticate\n with\n :param domain_name: The domain name of the user account we are\n authenticated with\n :param server_challenge: A random 8-byte response generated by the\n server in the CHALLENGE_MESSAGE\n :param client_challenge: A random 8-byte response generated by the\n client for the AUTHENTICATE_MESSAGE\n :return response: LmChallengeResponse to the server challenge\n "
] |
Please provide a description of the function:def _get_NTLM2_response(password, server_challenge, client_challenge):
ntlm_hash = comphash._ntowfv1(password)
challenge = server_challenge + client_challenge
nt_session_hash = hashlib.md5(challenge).digest()[:8]
response = ComputeResponse._calc_resp(ntlm_hash, nt_session_hash[0:8])
session_base_key = hashlib.new('md4', ntlm_hash).digest()
return response, session_base_key | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n This name is really misleading as it isn't NTLM v2 authentication\n rather this authentication is only used when the ntlm_compatibility\n level is set to a value < 3 (No NTLMv2 auth) but the\n NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY flag is set in the negotiate\n flags section. The documentation for computing this value is on page 56\n under section 3.3.1 NTLM v1 Authentication\n\n :param password: The password of the user we are trying to authenticate\n with\n :param server_challenge: A random 8-byte response generated by the\n server in the CHALLENGE_MESSAGE\n :param client_challenge: A random 8-byte response generated by the\n client for the AUTHENTICATE_MESSAGE\n :return response: NtChallengeResponse to the server_challenge\n :return session_base_key: A session key calculated from the user\n password challenge\n "
] |
Please provide a description of the function:def _get_NTLMv2_response(user_name, password, domain_name,
server_challenge, client_challenge, timestamp,
target_info):
nt_hash = comphash._ntowfv2(user_name, password, domain_name)
temp = ComputeResponse._get_NTLMv2_temp(timestamp, client_challenge,
target_info)
nt_proof_str = hmac.new(nt_hash,
(server_challenge + temp),
digestmod=hashlib.md5).digest()
response = nt_proof_str + temp
session_base_key = hmac.new(nt_hash, nt_proof_str,
digestmod=hashlib.md5).digest()
return response, session_base_key | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 2.2.2.8 NTLM V2 Response: NTLMv2_RESPONSE\n The NTLMv2_RESPONSE strucutre defines the NTLMv2 authentication\n NtChallengeResponse in the AUTHENTICATE_MESSAGE. This response is used\n only when NTLMv2 authentication is configured.\n\n The guide on how this is computed is in 3.3.2 NTLM v2 Authentication.\n\n :param user_name: The user name of the user we are trying to\n authenticate with\n :param password: The password of the user we are trying to authenticate\n with\n :param domain_name: The domain name of the user account we are\n authenticated with\n :param server_challenge: A random 8-byte response generated by the\n server in the CHALLENGE_MESSAGE\n :param client_challenge: A random 8-byte response generated by the\n client for the AUTHENTICATE_MESSAGE\n :param timestamp: An 8-byte timestamp in windows format, 100\n nanoseconds since 1601-01-01\n :param target_info: The target_info structure from the\n CHALLENGE_MESSAGE with the CBT attached if required\n :return response: NtChallengeResponse to the server_challenge\n :return session_base_key: A session key calculated from the user\n password challenge\n "
] |
Please provide a description of the function:def _get_NTLMv2_temp(timestamp, client_challenge, target_info):
resp_type = b'\x01'
hi_resp_type = b'\x01'
reserved1 = b'\x00' * 2
reserved2 = b'\x00' * 4
reserved3 = b'\x00' * 4
# This byte is not in the structure defined in 2.2.2.7 but is in the
# computation guide, works with it present
reserved4 = b'\x00' * 4
temp = resp_type
temp += hi_resp_type
temp += reserved1
temp += reserved2
temp += timestamp
temp += client_challenge
temp += reserved3
temp += target_info.pack()
temp += reserved4
return temp | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 2.2.2.7 NTLMv2_CLIENT_CHALLENGE - variable length\n The NTLMv2_CLIENT_CHALLENGE structure defines the client challenge in\n the AUTHENTICATE_MESSAGE. This structure is used only when NTLM v2\n authentication is configured and is transported in the NTLMv2_RESPONSE\n structure.\n\n The method to create this structure is defined in 3.3.2 NTLMv2\n Authentication. In this method this variable is known as the temp\n value. The target_info variable corresponds to the ServerName variable\n used in that documentation. This is in reality a lot more than just the\n ServerName and contains the AV_PAIRS structure we need to transport\n with the message like Channel Binding tokens and others. By default\n this will be the target_info returned from the CHALLENGE_MESSAGE plus\n MSV_AV_CHANNEL_BINDINGS if specified otherwise it is a new target_info\n set with MSV_AV_TIMESTAMP to the current time.\n\n :param timestamp: An 8-byte timestamp in windows format, 100\n nanoseconds since 1601-01-01\n :param client_challenge: A random 8-byte response generated by the\n `client for the AUTHENTICATE_MESSAGE\n :param target_info: The target_info structure from the\n CHALLENGE_MESSAGE with the CBT attached if required\n :return temp: The CLIENT_CHALLENGE structure that will be added to the\n NtChallengeResponse structure\n "
] |
Please provide a description of the function:def _calc_resp(password_hash, server_challenge):
# padding with zeros to make the hash 21 bytes long
password_hash += b'\x00' * (21 - len(password_hash))
res = b''
dobj = DES(DES.key56_to_key64(password_hash[0:7]))
res = res + dobj.encrypt(server_challenge[0:8])
dobj = DES(DES.key56_to_key64(password_hash[7:14]))
res = res + dobj.encrypt(server_challenge[0:8])
dobj = DES(DES.key56_to_key64(password_hash[14:21]))
res = res + dobj.encrypt(server_challenge[0:8])
return res | [
"\n Generate the LM response given a 16-byte password hash and the\n challenge from the CHALLENGE_MESSAGE\n\n :param password_hash: A 16-byte password hash\n :param server_challenge: A random 8-byte response generated by the\n server in the CHALLENGE_MESSAGE\n :return res: A 24-byte buffer to contain the LM response upon return\n "
] |
Please provide a description of the function:def encrypt(self, data, pad=True):
encrypted_data = b""
for i in range(0, len(data), 8):
block = data[i:i + 8]
block_length = len(block)
if block_length != 8 and pad:
block += b"\x00" * (8 - block_length)
elif block_length != 8:
raise ValueError("DES encryption must be a multiple of 8 "
"bytes")
encrypted_data += self._encode_block(block)
return encrypted_data | [
"\n DES encrypts the data based on the key it was initialised with.\n\n :param data: The bytes string to encrypt\n :param pad: Whether to right pad data with \\x00 to a multiple of 8\n :return: The encrypted bytes string\n "
] |
Please provide a description of the function:def decrypt(self, data):
decrypted_data = b""
for i in range(0, len(data), 8):
block = data[i:i + 8]
block_length = len(block)
if block_length != 8:
raise ValueError("DES decryption must be a multiple of 8 "
"bytes")
decrypted_data += self._decode_block(block)
return decrypted_data | [
"\n DES decrypts the data based on the key it was initialised with.\n\n :param data: The encrypted bytes string to decrypt\n :return: The decrypted bytes string\n "
] |
Please provide a description of the function:def key56_to_key64(key):
if len(key) != 7:
raise ValueError("DES 7-byte key is not 7 bytes in length, "
"actual: %d" % len(key))
new_key = b""
for i in range(0, 8):
if i == 0:
new_value = struct.unpack("B", key[i:i+1])[0]
elif i == 7:
new_value = struct.unpack("B", key[6:7])[0]
new_value = (new_value << 1) & 0xFF
else:
new_value = struct.unpack("B", key[i - 1:i])[0]
next_value = struct.unpack("B", key[i:i + 1])[0]
new_value = ((new_value << (8 - i)) & 0xFF) | next_value >> i
# clear the last bit so the count isn't off
new_value = new_value & ~(1 << 0)
# set the last bit if the number of set bits are even
new_value = new_value | int(not DES.bit_count(new_value) & 0x1)
new_key += struct.pack("B", new_value)
return new_key | [
"\n This takes in an a bytes string of 7 bytes and converts it to a bytes\n string of 8 bytes with the odd parity bit being set to every 8 bits,\n\n For example\n\n b\"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\"\n 00000001 00000010 00000011 00000100 00000101 00000110 00000111\n\n is converted to\n\n b\"\\x01\\x80\\x80\\x61\\x40\\x29\\x19\\x0E\"\n 00000001 10000000 10000000 01100001 01000000 00101001 00011001 00001110\n\n https://crypto.stackexchange.com/questions/15799/des-with-actual-7-byte-key\n\n :param key: 7-byte string sized key\n :return: 8-byte string with the parity bits sets from the 7-byte string\n "
] |
Please provide a description of the function:def _lmowfv1(password):
# if the password is a hash, return the LM hash
if re.match(r'^[a-fA-F\d]{32}:[a-fA-F\d]{32}$', password):
lm_hash = binascii.unhexlify(password.split(':')[0])
return lm_hash
# fix the password to upper case and length to 14 bytes
password = password.upper()
lm_pw = password.encode('utf-8')
padding_size = 0 if len(lm_pw) >= 14 else (14 - len(lm_pw))
lm_pw += b"\x00" * padding_size
# do hash
magic_str = b"KGS!@#$%" # page 56 in [MS-NLMP v28.0]
res = b""
dobj = DES(DES.key56_to_key64(lm_pw[0:7]))
res += dobj.encrypt(magic_str)
dobj = DES(DES.key56_to_key64(lm_pw[7:14]))
res += dobj.encrypt(magic_str)
return res | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 3.3.1 NTLM v1 Authentication\n Same function as LMOWFv1 in document to create a one way hash of the\n password. Only used in NTLMv1 auth without session security\n\n :param password: The password or hash of the user we are trying to\n authenticate with\n :return res: A Lan Manager hash of the password supplied\n "
] |
Please provide a description of the function:def _ntowfv1(password):
# if the password is a hash, return the NT hash
if re.match(r'^[a-fA-F\d]{32}:[a-fA-F\d]{32}$', password):
nt_hash = binascii.unhexlify(password.split(':')[1])
return nt_hash
digest = hashlib.new('md4', password.encode('utf-16-le')).digest()
return digest | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 3.3.1 NTLM v1 Authentication\n Same function as NTOWFv1 in document to create a one way hash of the\n password. Only used in NTLMv1 auth without session security\n\n :param password: The password or hash of the user we are trying to\n authenticate with\n :return digest: An NT hash of the password supplied\n "
] |
Please provide a description of the function:def _ntowfv2(user_name, password, domain_name):
digest = _ntowfv1(password)
user = (user_name.upper() + domain_name).encode('utf-16-le')
digest = hmac.new(digest, user, digestmod=hashlib.md5).digest()
return digest | [
"\n [MS-NLMP] v28.0 2016-07-14\n\n 3.3.2 NTLM v2 Authentication\n Same function as NTOWFv2 (and LMOWFv2) in document to create a one way hash\n of the password. This combines some extra security features over the v1\n calculations used in NTLMv2 auth.\n\n :param user_name: The user name of the user we are trying to authenticate\n with\n :param password: The password of the user we are trying to authenticate\n with\n :param domain_name: The domain name of the user account we are\n authenticated with\n :return digest: An NT hash of the parameters supplied\n "
] |
Please provide a description of the function:def visit_Method(self, method):
resolved_method = method.resolved.type
def get_params(method, extra_bindings):
# The Method should already be the resolved version.
result = []
for param in method.params:
resolved_param = texpr(param.resolved.type, param.resolved.bindings, extra_bindings)
result.append(resolved_param.id)
return result
def get_return_type(method, extra_bindings):
# The Method should already be the resolved version.
return texpr(method.type.resolved.type, method.type.resolved.bindings,
extra_bindings).id
def signature(method, return_type, params):
return "%s %s(%s)" % (return_type, method.name.text, ", ".join(params))
# Ensure the method has the same signature as matching methods on parent
# interfaces:
interfaces = list(t for t in method.clazz.bases if isinstance(t.resolved.type, Interface))
for interface in interfaces:
interfaceTypeExpr = interface.resolved
for definition in interfaceTypeExpr.type.definitions:
if definition.name.text == method.name.text:
resolved_definition = definition.resolved.type
method_params = get_params(resolved_method, method.clazz.resolved.bindings)
definition_params = get_params(resolved_definition, interfaceTypeExpr.bindings)
method_return = get_return_type(resolved_method, method.clazz.resolved.bindings)
definition_return = get_return_type(resolved_definition, interfaceTypeExpr.bindings)
if method_params != definition_params or method_return != definition_return:
self.errors.append(
"%s: method signature '%s' on %s does not match method '%s' on interface %s" % (
lineinfo(method), signature(resolved_method, method_return, method_params),
method.clazz.resolved.type.id,
signature(resolved_definition, definition_return, definition_params),
interface.resolved.type.id)) | [
"\n Ensure method has the same signature matching method on parent interface.\n\n :param method: L{quarkc.ast.Method} instance.\n "
] |
Please provide a description of the function:def urlparse(self, url, top=True, text=None, include=False, recurse=True):
if os.path.exists(url):
url = os.path.abspath(url)
urlc = compiled_quark(url)
if not include and url in self.CACHE:
self.log.debug("loading from cache: %s", url)
root = self.CACHE[url]
self.roots.add(root)
if recurse:
for u in root.uses:
assert u in self.CACHE, (url, u, self.CACHE.keys())
self.roots.add(self.CACHE[u])
if not include: self.entries[url] = root.files[0]
return root.files[0]
elif not include and recurse and os.path.exists(url) and is_newer(urlc, url, __file__):
self.log.debug("loading from: %sc", url)
with open(urlc) as fd:
try:
unp = pickle.Unpickler(fd)
deps = unp.load()
if is_newer(urlc, *deps):
roots = unp.load()
# Check for the end record in case we
# encounter a partially written file.
end = unp.load()
if end == ARCHIVE_END:
for root in roots:
self.CACHE[root.url] = root
self.roots.add(root)
if not include: self.entries[url] = roots[0].files[0]
return roots[0].files[0]
except EOFError:
pass
old = None
if not include and url not in self.roots:
old = self.root
self.root = Root(url)
self.roots.add(self.root)
try:
if text is None:
try:
text = self.read(url)
except IOError, e:
if top:
raise CompileError(e)
else:
raise
self.log.debug("parsing %s", url)
file = self.parse(url, text)
if recurse:
for u in file.uses.values():
qurl = join(url, u.url)
self.perform_use(qurl, u)
assert qurl in self.CACHE, (url, qurl, self.CACHE.keys())
for inc in file.includes.values():
qurl = join(url, inc.url)
if qurl.endswith(".q"):
self.perform_quark_include(qurl, inc)
else:
self.perform_native_include(qurl, inc)
if not include:
self.CACHE[url] = self.root
if not include: self.entries[url] = file
return file
finally:
if old: self.root = old | [
"\n Parse a quark file and, optionally, its recursive dependencies.\n\n A quark file (main.q) is loaded via urlparse() can have two kinds of\n dependencies, `use a.q` or `include b.q`. For the `use` case each file\n is added as a separate top-level root to self.roots. For the `include`\n case the file is added to the *current* root that is including it, so\n it's added as a child of `self.root`.\n\n There are two forms of caching: CACHE is a shared dictionary across\n class instances of parsed roots. Additionally .qc file are written with\n pickled versions of loaded roots. Given that both of them store a root\n these forms of caching are only relevant to top-level quark files and\n files referenced using `use`. Files loaded with `include` should bypass\n the caching mechanism since they need to be loaded as child of the\n parent root.\n "
] |
Please provide a description of the function:def get_doc(node):
res = " ".join(get_doc_annotations(node))
if not res:
res = "(%s)" % node.__class__.__name__.lower()
return res | [
"\n Return a node's documentation as a string, pulling from annotations\n or constructing a simple fake as needed.\n "
] |
Please provide a description of the function:def get_code(node, coder=Coder()):
return cgi.escape(str(coder.code(node)), quote=True) | [
"\n Return a node's code\n "
] |
Please provide a description of the function:def setup_environ(self):
SimpleHandler.setup_environ(self)
self.environ['ws4py.socket'] = get_connection(self.environ['wsgi.input'])
self.http_version = self.environ['SERVER_PROTOCOL'].rsplit('/')[-1] | [
"\n Setup the environ dictionary and add the\n `'ws4py.socket'` key. Its associated value\n is the real socket underlying socket.\n "
] |
Please provide a description of the function:def finish_response(self):
# force execution of the result iterator until first actual content
rest = iter(self.result)
first = list(itertools.islice(rest, 1))
self.result = itertools.chain(first, rest)
# now it's safe to look if environ was modified
ws = None
if self.environ:
self.environ.pop('ws4py.socket', None)
ws = self.environ.pop('ws4py.websocket', None)
try:
SimpleHandler.finish_response(self)
except:
if ws:
ws.close(1011, reason='Something broke')
raise
else:
if ws:
self.request_handler.server.link_websocket_to_server(ws) | [
"\n Completes the response and performs the following tasks:\n\n - Remove the `'ws4py.socket'` and `'ws4py.websocket'`\n environ keys.\n - Attach the returned websocket, if any, to the WSGI server\n using its ``link_websocket_to_server`` method.\n "
] |
Please provide a description of the function:def handle(self):
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
# next line is where we'd have expect a configuration key somehow
handler = self.WebSocketWSGIHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app()) | [
"\n Unfortunately the base class forces us\n to override the whole method to actually provide our wsgi handler.\n "
] |
Please provide a description of the function:def right_associative_infix_rule(operator, grammar_rule):
def semantic_action(self, node, (result, remaining)):
while remaining:
op, rhs = remaining.pop(0)
result = operator(Attr(result, Name(self.aliases[op])), [rhs], op)
return result
return g.rule(grammar_rule)(semantic_action) | [
"Semantic action for rules like 'A = B (C B)*'."
] |
Please provide a description of the function:def configure(self, voltage_range=RANGE_32V, gain=GAIN_AUTO,
bus_adc=ADC_12BIT, shunt_adc=ADC_12BIT):
self.__validate_voltage_range(voltage_range)
self._voltage_range = voltage_range
if self._max_expected_amps is not None:
if gain == self.GAIN_AUTO:
self._auto_gain_enabled = True
self._gain = self._determine_gain(self._max_expected_amps)
else:
self._gain = gain
else:
if gain != self.GAIN_AUTO:
self._gain = gain
else:
self._auto_gain_enabled = True
self._gain = self.GAIN_1_40MV
logging.info('gain set to %.2fV' % self.__GAIN_VOLTS[self._gain])
logging.debug(
self.__LOG_MSG_1 %
(self._shunt_ohms, self.__BUS_RANGE[voltage_range],
self.__GAIN_VOLTS[self._gain],
self.__max_expected_amps_to_string(self._max_expected_amps),
bus_adc, shunt_adc))
self._calibrate(
self.__BUS_RANGE[voltage_range], self.__GAIN_VOLTS[self._gain],
self._max_expected_amps)
self._configure(voltage_range, self._gain, bus_adc, shunt_adc) | [
" Configures and calibrates how the INA219 will take measurements.\n\n Arguments:\n voltage_range -- The full scale voltage range, this is either 16V\n or 32V represented by one of the following constants;\n RANGE_16V, RANGE_32V (default).\n gain -- The gain which controls the maximum range of the shunt\n voltage represented by one of the following constants;\n GAIN_1_40MV, GAIN_2_80MV, GAIN_4_160MV,\n GAIN_8_320MV, GAIN_AUTO (default).\n bus_adc -- The bus ADC resolution (9, 10, 11, or 12-bit) or\n set the number of samples used when averaging results\n represent by one of the following constants; ADC_9BIT,\n ADC_10BIT, ADC_11BIT, ADC_12BIT (default),\n ADC_2SAMP, ADC_4SAMP, ADC_8SAMP, ADC_16SAMP,\n ADC_32SAMP, ADC_64SAMP, ADC_128SAMP\n shunt_adc -- The shunt ADC resolution (9, 10, 11, or 12-bit) or\n set the number of samples used when averaging results\n represent by one of the following constants; ADC_9BIT,\n ADC_10BIT, ADC_11BIT, ADC_12BIT (default),\n ADC_2SAMP, ADC_4SAMP, ADC_8SAMP, ADC_16SAMP,\n ADC_32SAMP, ADC_64SAMP, ADC_128SAMP\n "
] |
Please provide a description of the function:def wake(self):
configuration = self._read_configuration()
self._configuration_register(configuration | 0x0007)
# 40us delay to recover from powerdown (p14 of spec)
time.sleep(0.00004) | [
" Wake the INA219 from power down mode "
] |
Please provide a description of the function:def _return_response_and_status_code(response, json_results=True):
if response.status_code == requests.codes.ok:
return dict(results=response.json() if json_results else response.content, response_code=response.status_code)
elif response.status_code == 400:
return dict(
error='package sent is either malformed or not within the past 24 hours.',
response_code=response.status_code)
elif response.status_code == 204:
return dict(
error='You exceeded the public API request rate limit (4 requests of any nature per minute)',
response_code=response.status_code)
elif response.status_code == 403:
return dict(
error='You tried to perform calls to functions for which you require a Private API key.',
response_code=response.status_code)
elif response.status_code == 404:
return dict(error='File not found.', response_code=response.status_code)
else:
return dict(response_code=response.status_code) | [
" Output the requests response content or content as json and status code\n\n :rtype : dict\n :param response: requests response object\n :param json_results: Should return JSON or raw content\n :return: dict containing the response content and/or the status code with error string.\n "
] |
Please provide a description of the function:def rescan_file(self, this_hash, timeout=None):
params = {'apikey': self.api_key, 'resource': this_hash}
try:
response = requests.post(self.base + 'file/rescan', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Rescan a previously submitted filed or schedule an scan to be performed in the future.\n\n :param this_hash: a md5/sha1/sha256 hash. You can also specify a CSV list made up of a combination of any of\n the three allowed hashes (up to 25 items), this allows you to perform a batch request with\n one single call. Note that the file must already be present in our file store.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response that contains scan_id and permalink.\n "
] |
Please provide a description of the function:def put_comments(self, resource, comment, timeout=None):
params = {'apikey': self.api_key, 'resource': resource, 'comment': comment}
try:
response = requests.post(self.base + 'comments/put', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Post a comment on a file or URL.\n\n The initial idea of VirusTotal Community was that users should be able to make comments on files and URLs,\n the comments may be malware analyses, false positive flags, disinfection instructions, etc.\n\n Imagine you have some automatic setup that can produce interesting results related to a given sample or URL\n that you submit to VirusTotal for antivirus characterization, you might want to give visibility to your setup\n by automatically reviewing samples and URLs with the output of your automation.\n\n :param resource: either a md5/sha1/sha256 hash of the file you want to review or the URL itself that you want\n to comment on.\n :param comment: the actual review, you can tag it using the \"#\" twitter-like syntax (e.g. #disinfection #zbot)\n and reference users using the \"@\" syntax (e.g. @VirusTotalTeam).\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: If the comment was successfully posted the response code will be 1, 0 otherwise.\n "
] |
Please provide a description of the function:def get_ip_report(self, this_ip, timeout=None):
params = {'apikey': self.api_key, 'ip': this_ip}
try:
response = requests.get(self.base + 'ip-address/report',
params=params,
proxies=self.proxies,
timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Get IP address reports.\n\n :param this_ip: a valid IPv4 address in dotted quad notation, for the time being only IPv4 addresses are\n supported.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response\n "
] |
Please provide a description of the function:def get_domain_report(self, this_domain, timeout=None):
params = {'apikey': self.api_key, 'domain': this_domain}
try:
response = requests.get(self.base + 'domain/report', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Get information about a given domain.\n\n :param this_domain: a domain name.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response\n "
] |
Please provide a description of the function:def scan_file(self,
this_file,
notify_url=None,
notify_changes_only=None,
from_disk=True,
filename=None,
timeout=None):
params = {'apikey': self.api_key}
if from_disk:
if not filename:
filename = os.path.basename(this_file)
files = {'file': (filename, open(this_file, 'rb').read())}
else:
if filename:
files = {'file': (filename, this_file)}
else:
files = {'file': this_file}
try:
response = requests.post(
self.base + 'file/scan', files=files, params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Submit a file to be scanned by VirusTotal.\n\n Allows you to send a file for scanning with VirusTotal. Before performing your submissions we encourage you to\n retrieve the latest report on the files, if it is recent enough you might want to save time and bandwidth by\n making use of it. File size limit is 32MB, in order to submmit files up to 200MB in size you must request a\n special upload URL.\n\n :param this_file: The file to be uploaded.\n :param notify_url: A URL to which a POST notification should be sent when the scan finishes.\n :param notify_changes_only: Used in conjunction with notify_url. Indicates if POST notifications should be\n sent only if the scan results differ from the previous analysis.\n :param from_disk: If True we read the file contents from disk using this_file as filepath. If False this_file\n is the actual file object.\n :param filename: Specify the filename, this overwrites the filename if we read a file from disk.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response that contains scan_id and permalink.\n "
] |
Please provide a description of the function:def get_upload_url(self, timeout=None):
params = {'apikey': self.api_key}
try:
response = requests.get(self.base + 'file/scan/upload_url',
params=params,
proxies=self.proxies,
timeout=timeout)
if response.status_code == requests.codes.ok:
return response.json().get('upload_url')
else:
return dict(response_code=response.status_code)
except requests.RequestException as e:
return dict(error=str(e)) | [
" Get a special URL for submitted files bigger than 32MB.\n\n In order to submit files bigger than 32MB you need to obtain a special upload URL to which you\n can POST files up to 200MB in size. This API generates such a URL.\n\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON special upload URL to which you can POST files up to 200MB in size.\n "
] |
Please provide a description of the function:def get_file_report(self, resource, allinfo=1, timeout=None):
params = {'apikey': self.api_key, 'resource': resource, 'allinfo': allinfo}
try:
response = requests.get(self.base + 'file/report', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Get the scan results for a file.\n\n Retrieves a concluded file scan report for a given file. Unlike the public API, this call allows you to also\n access all the information we have on a particular file (VirusTotal metadata, signature information, structural\n information, etc.) by using the allinfo parameter described later on.\n\n :param resource: An md5/sha1/sha256 hash of a file for which you want to retrieve the most recent antivirus\n report. You may also specify a scan_id (sha256-timestamp as returned by the scan API) to access a specific\n report. You can also specify a CSV list made up of a combination of hashes and scan_ids (up to 25 items),\n this allows you to perform a batch request with just one single call.\n :param allinfo: (optional) If specified and set to one, the call will return additional info, other than the\n antivirus results, on the file being queried. This additional info includes the output of several tools acting\n on the file (PDFiD, ExifTool, sigcheck, TrID, etc.), metadata regarding VirusTotal submissions (number of\n unique sources that have sent the file in the past, first seen date, last seen date, etc.), the output of\n in-house technologies such as a behavioural sandbox, etc.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response\n "
] |
Please provide a description of the function:def file_search(self, query, offset=None, timeout=None):
params = dict(apikey=self.api_key, query=query, offset=offset)
try:
response = requests.get(self.base + 'file/search', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Search for samples.\n\n In addition to retrieving all information on a particular file, VirusTotal allows you to perform what we\n call \"advanced reverse searches\". Reverse searches take you from a file property to a list of files that\n match that property. For example, this functionality enables you to retrieve all those files marked by at\n least one antivirus vendor as Zbot, or all those files that have a size under 90KB and are detected by at\n least 10 antivirus solutions, or all those PDF files that have an invalid XREF section, etc.\n\n This API is equivalent to VirusTotal Intelligence advanced searches. A very wide variety of search modifiers\n are available, including: file size, file type, first submission date to VirusTotal, last submission date to\n VirusTotal, number of positives, dynamic behavioural properties, binary content, submission file name, and a\n very long etcetera. The full list of search modifiers allowed for file search queries is documented at:\n https://www.virustotal.com/intelligence/help/file-search/#search-modifiers\n\n NOTE:\n Daily limited! No matter what API step you have licensed, this API call is limited to 50K requests per day.\n If you need any more, chances are you are approaching your engineering problem erroneously and you can\n probably solve it using the file distribution call. Do not hesitate to contact us with your particular\n use case.\n\n EXAMPLE:\n search_options = 'type:peexe size:90kb+ positives:5+ behaviour:\"taskkill\"'\n\n :param query: A search modifier compliant file search query.\n :param offset: (optional) The offset value returned by a previously issued identical query, allows you to\n paginate over the results. If not specified the first 300 matching files sorted according to last submission\n date to VirusTotal in a descending fashion will be returned.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response - By default the list returned contains at most 300 hashes, ordered according to\n last submission date to VirusTotal in a descending fashion.\n "
] |
Please provide a description of the function:def get_file_clusters(self, this_date, timeout=None):
params = {'apikey': self.api_key, 'date': this_date}
try:
response = requests.get(self.base + 'file/clusters', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" File similarity clusters for a given time frame.\n\n VirusTotal has built its own in-house file similarity clustering functionality. At present, this clustering\n works only on PE, PDF, DOC and RTF files and is based on a very simple structural feature hash. This hash\n can very often be confused by certain compression and packing strategies, in other words, this clustering\n logic is no holly grail, yet it has proven itself very useful in the past.\n\n This API offers a programmatic access to the clustering section of VirusTotal Intelligence:\n https://www.virustotal.com/intelligence/clustering/\n\n NOTE:\n Please note that you must be logged in with a valid VirusTotal Community user account with access to\n VirusTotal Intelligence in order to be able to view the clustering listing.\n\n :param this_date: A specific day for which we want to access the clustering details, example: 2013-09-10.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON object contains several properties\n num_candidates - Total number of files submitted during the given time frame for which a feature hash could\n be calculated.\n num_clusters - Total number of clusters generated for the given time period under consideration, a cluster\n can be as small as an individual file, meaning that no other feature-wise similar file was\n found.\n size_top200\t - The sum of the number of files in the 200 largest clusters identified.\n clusters - List of JSON objects that contain details about the 200 largest clusters identified. These\n objects contain 4 properties: id, label, size and avg_positives.. The id field can be used\n to then query the search API call for files contained in the given cluster. The label\n property is a verbose human-intelligible name for the cluster. The size field is the number\n of files that make up the cluster. Finally, avg_positives represents the average number of\n antivirus detections that the files in the cluster exhibit.\n "
] |
Please provide a description of the function:def get_url_distribution(self, after=None, reports='true', limit=1000, timeout=None):
params = {'apikey': self.api_key, 'after': after, 'reports': reports, 'limit': limit}
try:
response = requests.get(self.base + 'url/distribution',
params=params,
proxies=self.proxies,
timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Get a live feed with the lastest URLs submitted to VirusTotal.\n\n Allows you to retrieve a live feed of URLs submitted to VirusTotal, along with their scan reports. This\n call enables you to stay synced with VirusTotal URL submissions and replicate our dataset.\n\n :param after: (optional) Retrieve URLs received after the given timestamp, in timestamp ascending order.\n :param reports: (optional) When set to \"true\" each item retrieved will include the results for each particular\n URL scan (in exactly the same format as the URL scan retrieving API). If the parameter is not specified, each\n item returned will only contain the scanned URL and its detection ratio.\n :param limit: (optional) Retrieve limit file items at most (default: 1000).\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: JSON response\n "
] |
Please provide a description of the function:def get_url_feed(self, package=None, timeout=None):
if package is None:
now = datetime.utcnow()
five_minutes_ago = now - timedelta(
minutes=now.minute % 5 + 5, seconds=now.second, microseconds=now.microsecond)
package = five_minutes_ago.strftime('%Y%m%dT%H%M')
params = {'apikey': self.api_key, 'package': package}
try:
response = requests.get(self.base + 'url/feed', params=params, proxies=self.proxies, timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response, json_results=False) | [
" Get a live file feed with the latest files submitted to VirusTotal.\n\n Allows you to retrieve a live feed of reports on absolutely all URLs scanned by VirusTotal. This API requires\n you to stay relatively synced with the live submissions as only a backlog of 24 hours is provided at any given\n point in time.\n\n This API returns a bzip2 compressed tarball. For per-minute packages the compressed package contains a unique\n file, the file contains a json per line, this json is a full report on a given URL processed by VirusTotal\n during the given time window. The URL report follows the exact same format as the response of the URL report\n API if the allinfo=1 parameter is provided. For hourly packages, the tarball contains 60 files, one per each\n minute of the window.\n\n :param package: Indicates a time window to pull reports on all items received during such window.\n Only per-minute and hourly windows are allowed, the format is %Y%m%dT%H%M (e.g. 20160304T0900)\n or %Y%m%dT%H (e.g. 20160304T09). Time is expressed in UTC.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n :return: BZIP2 response: please see https://www.virustotal.com/en/documentation/private-api/#file-feed\n "
] |
Please provide a description of the function:def get_hashes_from_search(self, query, page=None, timeout=None):
params = {'query': query, 'apikey': self.api_key, 'page': page}
try:
response = requests.get(self.base + 'search/programmatic/',
params=params,
proxies=self.proxies,
timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Get the scan results for a file.\n\n Even if you do not have a Private Mass API key that you can use, you can still automate VirusTotal Intelligence\n searches pretty much in the same way that the searching for files api call works.\n\n :param query: a VirusTotal Intelligence search string in accordance with the file search documentation .\n <https://www.virustotal.com/intelligence/help/file-search/>\n :param page: the next_page property of the results of a previously issued query to this API. This parameter\n should not be provided if it is the very first query to the API, i.e. if we are retrieving the\n first page of results.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n apikey: the API key associated to a VirusTotal Community account with VirusTotal Intelligence privileges.\n "
] |
Please provide a description of the function:def get_file(self, file_hash, save_file_at, timeout=None):
params = {'hash': file_hash, 'apikey': self.api_key}
try:
response = requests.get(self.base + 'download/',
params=params,
proxies=self.proxies,
stream=True,
timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
if response.status_code == requests.codes.ok:
self.save_downloaded_file(file_hash, save_file_at, response.content)
return _return_response_and_status_code(response, json_results=False) | [
" Get the scan results for a file.\n\n Even if you do not have a Private Mass API key that you can use, you can still download files from the\n VirusTotal storage making use of your VirusTotal Intelligence quota, i.e. programmatic downloads will\n also deduct quota.\n\n :param file_hash: You may use either the md5, sha1 or sha256 hash of the file in order to download it.\n :param save_file_at: Path of where to save the file.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n\n "
] |
Please provide a description of the function:def get_all_file_report_pages(self, query):
responses = []
r = self.get_hashes_from_search(query)
responses.append(r)
if ('results' in r.keys()) and ('next_page' in r['results'].keys()):
next_page = r['results']['next_page']
else:
next_page = None
while next_page:
r = self.get_hashes_from_search(query, next_page)
if ('results' in r.keys()) and ('next_page' in r['results'].keys()):
next_page = r['results']['next_page']
else:
next_page = None
responses.append(r)
return dict(results=responses) | [
" Get File Report (All Pages).\n\n :param query: a VirusTotal Intelligence search string in accordance with the file search documentation.\n :return: All JSON responses appended together.\n "
] |
Please provide a description of the function:def get_intel_notifications_feed(self, page=None, timeout=None):
params = {'apikey': self.api_key, 'next': page}
try:
response = requests.get(self.base + 'hunting/notifications-feed/',
params=params,
proxies=self.proxies,
timeout=timeout)
# VT returns an empty result, len(content)==0, and status OK if there are no pending notifications.
# To keep the API consistent we generate an empty object instead.
# This might not be necessary with a later release of the VTI API. (bug has been submitted)
if len(response.content) == 0:
response.__dict__['_content'] = \
b'{"notifications":[],"verbose_msg":"No pending notification","result":0,"next":null}'
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Get notification feed in JSON for further processing.\n\n :param page: the next_page property of the results of a previously issued query to this API. This parameter\n should not be provided if it is the very first query to the API, i.e. if we are retrieving the\n first page of results.\n :param timeout: The amount of time in seconds the request should wait before timing out.\n :returns: The next page identifier, The results (JSON is possible with .json())\n "
] |
Please provide a description of the function:def delete_intel_notifications(self, ids, timeout=None):
if not isinstance(ids, list):
raise TypeError("ids must be a list")
# VirusTotal needs ids as a stringified array
data = json.dumps(ids)
try:
response = requests.post(
self.base + 'hunting/delete-notifications/programmatic/?key=' + self.api_key,
data=data,
proxies=self.proxies,
timeout=timeout)
except requests.RequestException as e:
return dict(error=str(e))
return _return_response_and_status_code(response) | [
" Programmatically delete notifications via the Intel API.\n\n :param ids: A list of IDs to delete from the notification feed.\n :returns: The post response.\n "
] |
Please provide a description of the function:def get_credentials(self):
return Credentials(access_key=self.aws_access_key_id,
secret_key=self.aws_secret_access_key,
token=self.aws_session_token) | [
"\n Returns botocore.credential.Credential object.\n "
] |
Please provide a description of the function:def check_membership(self, group):
user_groups = self.request.user.groups.values_list("name", flat=True)
if isinstance(group, (list, tuple)):
for req_group in group:
if req_group in user_groups:
return True
is_member = group in user_groups
if not is_member:
messages.add_message(self.request, messages.ERROR, 'You do not have sufficient permissions to do that.')
return is_member | [
" Check required group(s) "
] |
Please provide a description of the function:def form_valid(self, form):
ret = super(HookCreate, self).form_valid(form)
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Hook %s created' % self.object.url)
return ret | [
"After the form is valid lets let people know"
] |
Please provide a description of the function:def form_valid(self, form):
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
return form_valid_from_parent | [
"First call the parent's form valid then let the user know it worked."
] |
Please provide a description of the function:def post(self, *args, **kwargs):
existing_ssh = models.SSHConfig.objects.all()
if existing_ssh.exists():
return self.get_view()
remote_user = self.request.POST.get('remote_user', 'root')
create_ssh_config(remote_user=remote_user)
return self.get_view() | [
"Create the SSH file & then return the normal get method..."
] |
Please provide a description of the function:def update_sandbox_site(comment_text):
file_to_deliver = NamedTemporaryFile(delete=False)
file_text = "Deployed at: {} <br /> Comment: {}".format(datetime.datetime.now().strftime('%c'), cgi.escape(comment_text))
file_to_deliver.write(file_text)
file_to_deliver.close()
put(file_to_deliver.name, '/var/www/html/index.html', use_sudo=True) | [
"put's a text file on the server"
] |
Please provide a description of the function:def web_hooks(self, include_global=True):
from fabric_bolt.web_hooks.models import Hook
ors = [Q(project=self)]
if include_global:
ors.append(Q(project=None))
hooks = Hook.objects.filter(reduce(operator.or_, ors))
return hooks | [
"Get all web hooks for this project. Includes global hooks."
] |
Please provide a description of the function:def get_deployment_count(self):
ret = self.stage_set.annotate(num_deployments=Count('deployment')).aggregate(total_deployments=Sum('num_deployments'))
return ret['total_deployments'] | [
"Utility function to get the number of deployments a given project has"
] |
Please provide a description of the function:def get_queryset_configurations(self, **kwargs):
queryset_list = []
current_configs = []
# Create stage specific configurations dictionary
for stage in self.stage_configurations().filter(**kwargs):
queryset_list.append(stage)
current_configs.append(stage.key)
for project in self.project.project_configurations().filter(**kwargs):
if not project.key in current_configs:
queryset_list.append(project)
current_configs.append(project.key)
return queryset_list | [
"\n Really we just want to do a simple SQL statement like this (but oh the ORM):\n\n SELECT Distinct(Coalesce(stage.key, project.key)) AS key,\n (CASE WHEN stage.key IS NOT null THEN stage.data_type ELSE project.data_type END) AS data_type,\n (CASE WHEN stage.key IS NOT null THEN stage.value ELSE project.value END) AS value,\n (CASE WHEN stage.key IS NOT null THEN stage.value_number ELSE project.value_number END) AS value_number,\n (CASE WHEN stage.key IS NOT null THEN stage.value_boolean ELSE project.value_boolean END) AS value_boolean,\n (CASE WHEN stage.key IS NOT null THEN stage.prompt_me_for_input ELSE project.prompt_me_for_input END) AS prompt_me_for_input,\n (CASE WHEN stage.key IS NOT null THEN stage.sensitive_value ELSE project.sensitive_value END) AS sensitive_value\n FROM projects_configuration AS project\n LEFT JOIN projects_configuration AS stage ON stage.project_id = project.project_id\n AND project.key = stage.key AND stage.stage_id = STAGE_ID_HERE\n WHERE project.project_id = PROJECT_ID_HERE AND (project.stage_id is null OR project.stage_id = STAGE_ID_HERE)\n "
] |
Please provide a description of the function:def get_configurations(self):
project_configurations_dictionary = {}
project_configurations = self.project.project_configurations()
# Create project specific configurations dictionary
for config in project_configurations:
project_configurations_dictionary[config.key] = config
stage_configurations_dictionary = {}
stage_configurations = self.stage_configurations()
# Create stage specific configurations dictionary
for s in stage_configurations:
stage_configurations_dictionary[s.key] = s
# override project specific configuration with the ones in the stage if they are there
project_configurations_dictionary.update(stage_configurations_dictionary)
# Return the updated configurations
return project_configurations_dictionary | [
"\n Generates a dictionary that's made up of the configurations on the project.\n Any configurations on a project that are duplicated on a stage, the stage configuration will take precedence.\n "
] |
Please provide a description of the function:def get_absolute_url(self):
# Determine if this configuration is on a stage
if self.stage:
# Stage specific configurations go back to the stage view
url = reverse('projects_stage_view', args=(self.project.pk, self.stage.pk))
else:
# Project specific configurations go back to the project page
url = self.project.get_absolute_url()
return url | [
"Determine where I am coming from and where I am going"
] |
Please provide a description of the function:def get_value(self):
if self.data_type == self.BOOLEAN_TYPE:
return self.value_boolean
elif self.data_type == self.NUMBER_TYPE:
return self.value_number
elif self.data_type == self.SSH_KEY_TYPE:
return self.value_ssh_key.private_key_file._get_path()
else:
return self.value | [
"Determine the proper value based on the data_type"
] |
Please provide a description of the function:def set_value(self, value):
if self.data_type == self.BOOLEAN_TYPE:
self.value_boolean = bool(value)
elif self.data_type == self.NUMBER_TYPE:
self.value_number = float(value)
else:
self.value = value | [
"Determine the proper value based on the data_type"
] |
Please provide a description of the function:def add_output(self, line):
Deployment.objects.filter(pk=self.id).update(output=CF('output')+line) | [
"\n Appends {line} of output to the output instantly. (directly hits the database)\n :param line: the line of text to append\n :return: None\n "
] |
Please provide a description of the function:def add_input(self, line):
Deployment.objects.filter(pk=self.id).update(input=CF('input')+line) | [
"\n Appends {line} of input to the input instantly. (directly hits the database)\n :param line: the line of text to append\n :return: None\n "
] |
Please provide a description of the function:def get_next_input(self):
# TODO: could override input if we get input coming in at the same time
all_input = Deployment.objects.get(pk=self.id).input or ''
lines = all_input.splitlines()
first_line = lines[0] if len(lines) else None
lines = lines[1:] if len(lines) > 1 else []
Deployment.objects.filter(pk=self.id).update(input='\n'.join(lines))
return first_line | [
"\n Returns the next line of input\n :return: string of input\n "
] |
Please provide a description of the function:def gravatar(self, size=20):
default = "mm"
gravatar_url = "//www.gravatar.com/avatar/" + hashlib.md5(self.email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d': default, 's': str(size)})
return gravatar_url | [
"\n Construct a gravatar image address for the user\n "
] |
Please provide a description of the function:def save(self, commit=True):
instance = super(UserChangeForm, self).save(commit=commit)
if commit:
self.set_permissions(instance)
return instance | [
"\n Save the model instance with the correct Auth Group based on the user_level question\n "
] |
Please provide a description of the function:def save(self, commit=True):
instance = super(UserCreationForm, self).save(commit=commit)
random_password = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
instance.set_password(random_password)
instance.save()
email_form = PasswordResetForm({'email': self.cleaned_data['email']})
email_form.is_valid()
email_form.save(email_template_name='accounts/welcome_email.html')
return instance | [
"\n Save the model instance with the correct Auth Group based on the user_level question\n "
] |
Please provide a description of the function:def hooks(self, project):
return self.get_queryset().filter(
Q(project=None) |
Q(project=project)
).distinct('url') | [
" Look up the urls we need to post to"
] |
Please provide a description of the function:def web_hook_receiver(sender, **kwargs):
deployment = Deployment.objects.get(pk=kwargs.get('deployment_id'))
hooks = deployment.web_hooks
if not hooks:
return
for hook in hooks:
data = payload_generator(deployment)
deliver_hook(deployment, hook.url, data) | [
"Generic receiver for the web hook firing piece."
] |
Please provide a description of the function:def full_domain_validator(hostname):
HOSTNAME_LABEL_PATTERN = re.compile("(?!-)[A-Z\d-]+(?<!-)$", re.IGNORECASE)
if not hostname:
return
if len(hostname) > 255:
raise ValidationError(_("The domain name cannot be composed of more than 255 characters."))
if hostname[-1:] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
for label in hostname.split("."):
if len(label) > 63:
raise ValidationError(
_("The label '%(label)s' is too long (maximum is 63 characters).") % {'label': label})
if not HOSTNAME_LABEL_PATTERN.match(label):
raise ValidationError(_("Unallowed characters in label '%(label)s'.") % {'label': label}) | [
"\n Fully validates a domain name as compilant with the standard rules:\n - Composed of series of labels concatenated with dots, as are all domain names.\n - Each label must be between 1 and 63 characters long.\n - The entire hostname (including the delimiting dots) has a maximum of 255 characters.\n - Only characters 'a' through 'z' (in a case-insensitive manner), the digits '0' through '9'.\n - Labels can't start or end with a hyphen.\n "
] |
Please provide a description of the function:def serialize_hook(instance):
if getattr(instance, 'serialize_hook', None) and callable(instance.serialize_hook):
return instance.serialize_hook(hook=instance)
if getattr(settings, 'HOOK_SERIALIZER', None):
serializer = get_module(settings.HOOK_SERIALIZER)
return serializer(instance, hook=instance)
# if no user defined serializers, fallback to the django builtin!
return {
'hook': instance.dict(),
'data': serializers.serialize('python', [instance])[0]
} | [
"\n Serialize the object down to Python primitives.\n\n By default it uses Django's built in serializer.\n "
] |
Please provide a description of the function:def deliver_hook(instance, target, payload_override=None):
payload = payload_override or serialize_hook(instance)
if hasattr(settings, 'HOOK_DELIVERER'):
deliverer = get_module(settings.HOOK_DELIVERER)
deliverer(target, payload, instance=instance)
else:
client.post(
url=target,
data=json.dumps(payload, cls=serializers.json.DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
return None | [
"\n Deliver the payload to the target URL.\n\n By default it serializes to JSON and POSTs.\n "
] |
Please provide a description of the function:def paginate(self, klass=Paginator, per_page=None, page=1, *args, **kwargs):
self.per_page_options = [25, 50, 100, 200] # This should probably be a passed in option
self.per_page = per_page = per_page or self._meta.per_page
self.paginator = klass(self.rows, per_page, *args, **kwargs)
self.page = self.paginator.page(page)
# Calc variables for use in displaying first, adjacent, and last page links
adjacent_pages = 1 # This should probably be a passed in option
# Starting page (first page between the ellipsis)
start_page = max(self.page.number - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
# Ending page (last page between the ellipsis)
end_page = self.page.number + adjacent_pages + 1
if end_page >= self.paginator.num_pages - 1:
end_page = self.paginator.num_pages + 1
# Paging vars used in template
self.page_numbers = [n for n in range(start_page, end_page) if 0 < n <= self.paginator.num_pages]
self.show_first = 1 not in self.page_numbers
self.show_last = self.paginator.num_pages not in self.page_numbers | [
"\n Paginates the table using a paginator and creates a ``page`` property\n containing information for the current page.\n\n :type klass: Paginator class\n :param klass: a paginator class to paginate the results\n :type per_page: `int`\n :param per_page: how many records are displayed on each page\n :type page: `int`\n :param page: which page should be displayed.\n\n Extra arguments are passed to the paginator.\n\n Pagination exceptions (`~django.core.paginator.EmptyPage` and\n `~django.core.paginator.PageNotAnInteger`) may be raised from this\n method and should be handled by the caller.\n "
] |
Please provide a description of the function:def get_fabric_tasks(self, project):
cache_key = 'project_{}_fabfile_tasks'.format(project.pk)
cached_result = cache.get(cache_key)
if cached_result:
return cached_result
try:
fabfile_path, activate_loc = self.get_fabfile_path(project)
if activate_loc:
output = self.check_output(
'source {};fab --list --list-format=short --fabfile={}'.format(activate_loc, fabfile_path),
shell=True
)
else:
output = self.check_output(
'fab --list --list-format=short --fabfile={}'.format(fabfile_path),
shell=True
)
lines = output.splitlines()
tasks = []
for line in lines:
name = line.strip()
if activate_loc:
o = self.check_output(
'source {};fab --display={} --fabfile={}'.format(activate_loc, name, fabfile_path),
shell=True
)
else:
o = self.check_output(
['fab', '--display={}'.format(name), '--fabfile={}'.format(fabfile_path)]
)
tasks.append(self.parse_task_details(name, o))
cache.set(cache_key, tasks, settings.FABRIC_TASK_CACHE_TIMEOUT)
except Exception as e:
tasks = []
return tasks | [
"\n Generate a list of fabric tasks that are available\n "
] |
Please provide a description of the function:def form_valid(self, form):
ret = super(ProjectCreate, self).form_valid(form)
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Project %s created' % self.object.name)
return ret | [
"After the form is valid lets let people know"
] |
Please provide a description of the function:def get_initial(self):
initial = super(ProjectCopy, self).get_initial()
if self.copy_object:
initial.update({'name': '%s copy' % self.copy_object.name,
'description': self.copy_object.description,
'use_repo_fabfile': self.copy_object.use_repo_fabfile,
'fabfile_requirements': self.copy_object.fabfile_requirements,
'repo_url': self.copy_object.repo_url})
return initial | [
"\n Returns the initial data to use for forms on this view.\n "
] |
Please provide a description of the function:def copy_configurations(self, stages=None):
if stages:
confs = stages[0].stage_configurations()
new_stage = stages[1]
else:
confs = self.copy_object.project_configurations()
new_stage = None
for conf in confs:
new_conf = deepcopy(conf)
new_conf.id = None
new_conf.project = self.object
new_conf.stage = new_stage
new_conf.save() | [
"\n Copy configuretions\n "
] |
Please provide a description of the function:def form_valid(self, form):
ret = super(ProjectCopy, self).form_valid(form)
self.copy_relations()
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Project %s copied' % self.object.name)
return ret | [
"After the form is valid lets let people know"
] |
Please provide a description of the function:def form_valid(self, form):
self.object = form.save(commit=False)
self.object.project = self.project
if self.kwargs.get('stage_id', None):
current_stage = models.Stage.objects.get(pk=self.kwargs.get('stage_id'))
self.object.stage = current_stage
self.object.save()
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Configuration %s created' % self.object.key)
return super(ProjectConfigurationCreate, self).form_valid(form) | [
"Set the project on this configuration after it's valid"
] |
Please provide a description of the function:def get_success_url(self):
if self.stage_id:
url = reverse('projects_stage_view', args=(self.project_id, self.stage_id))
else:
url = reverse('projects_project_view', args=(self.project_id,))
return url | [
"Get the url depending on what type of configuration I deleted."
] |
Please provide a description of the function:def form_valid(self, form):
self.object = form.save(commit=False)
self.object.project = self.project
self.object.save()
# Good to make note of that
messages.add_message(self.request, messages.SUCCESS, 'Stage %s created' % self.object.name)
return super(ProjectStageCreate, self).form_valid(form) | [
"Set the project on this configuration after it's valid"
] |
Please provide a description of the function:def run(self, target, payload, instance=None, hook_id=None, **kwargs):
self.post_data(target, payload, hook_id) | [
"\n target: the url to receive the payload.\n payload: a python primitive data structure\n instance: a possibly null \"trigger\" instance\n hook: the defining Hook object (useful for removing)\n "
] |
Please provide a description of the function:def create_ssh_config(remote_user='root', name='Auto Generated SSH Key',
file_name='fabricbolt_private.key', email='[email protected]', public_key_text=None,
private_key_text=None):
if not private_key_text and not public_key_text:
key = RSA.generate(2048)
pubkey = key.publickey()
private_key_text = key.exportKey('PEM')
public_key_text = pubkey.exportKey('OpenSSH')
ssh_config = models.SSHConfig()
ssh_config.name = name
ssh_config.private_key_file.save(file_name, ContentFile(private_key_text))
ssh_config.public_key = '{} {}'.format(public_key_text, email)
ssh_config.remote_user = remote_user
ssh_config.save()
return ssh_config | [
"Create SSH Key"
] |
Please provide a description of the function:def convert(self, json="", table_attributes='border="1"', clubbing=True, encode=False, escape=True):
# table attributes such as class, id, data-attr-*, etc.
# eg: table_attributes = 'class = "table table-bordered sortable"'
self.table_init_markup = "<table %s>" % table_attributes
self.clubbing = clubbing
self.escape = escape
json_input = None
if not json:
json_input = {}
elif type(json) in text_types:
try:
json_input = json_parser.loads(json, object_pairs_hook=OrderedDict)
except ValueError as e:
#so the string passed here is actually not a json string
# - let's analyze whether we want to pass on the error or use the string as-is as a text node
if u"Expecting property name" in text(e):
#if this specific json loads error is raised, then the user probably actually wanted to pass json, but made a mistake
raise e
json_input = json
else:
json_input = json
converted = self.convert_json_node(json_input)
if encode:
return converted.encode('ascii', 'xmlcharrefreplace')
return converted | [
"\n Convert JSON to HTML Table format\n "
] |
Please provide a description of the function:def column_headers_from_list_of_dicts(self, json_input):
if not json_input \
or not hasattr(json_input, '__getitem__') \
or not hasattr(json_input[0], 'keys'):
return None
column_headers = json_input[0].keys()
for entry in json_input:
if not hasattr(entry, 'keys') \
or not hasattr(entry, '__iter__') \
or len(entry.keys()) != len(column_headers):
return None
for header in column_headers:
if header not in entry:
return None
return column_headers | [
"\n This method is required to implement clubbing.\n It tries to come up with column headers for your input\n "
] |
Please provide a description of the function:def convert_json_node(self, json_input):
if type(json_input) in text_types:
if self.escape:
return cgi.escape(text(json_input))
else:
return text(json_input)
if hasattr(json_input, 'items'):
return self.convert_object(json_input)
if hasattr(json_input, '__iter__') and hasattr(json_input, '__getitem__'):
return self.convert_list(json_input)
return text(json_input) | [
"\n Dispatch JSON input according to the outermost type and process it\n to generate the super awesome HTML format.\n We try to adhere to duck typing such that users can just pass all kinds\n of funky objects to json2html that *behave* like dicts and lists and other\n basic JSON types.\n "
] |
Please provide a description of the function:def convert_list(self, list_input):
if not list_input:
return ""
converted_output = ""
column_headers = None
if self.clubbing:
column_headers = self.column_headers_from_list_of_dicts(list_input)
if column_headers is not None:
converted_output += self.table_init_markup
converted_output += '<thead>'
converted_output += '<tr><th>' + '</th><th>'.join(column_headers) + '</th></tr>'
converted_output += '</thead>'
converted_output += '<tbody>'
for list_entry in list_input:
converted_output += '<tr><td>'
converted_output += '</td><td>'.join([self.convert_json_node(list_entry[column_header]) for column_header in
column_headers])
converted_output += '</td></tr>'
converted_output += '</tbody>'
converted_output += '</table>'
return converted_output
#so you don't want or need clubbing eh? This makes @muellermichel very sad... ;(
#alright, let's fall back to a basic list here...
converted_output = '<ul><li>'
converted_output += '</li><li>'.join([self.convert_json_node(child) for child in list_input])
converted_output += '</li></ul>'
return converted_output | [
"\n Iterate over the JSON list and process it\n to generate either an HTML table or a HTML list, depending on what's inside.\n If suppose some key has array of objects and all the keys are same,\n instead of creating a new row for each such entry,\n club such values, thus it makes more sense and more readable table.\n\n @example:\n jsonObject = {\n \"sampleData\": [\n {\"a\":1, \"b\":2, \"c\":3},\n {\"a\":5, \"b\":6, \"c\":7}\n ]\n }\n OUTPUT:\n _____________________________\n | | | | |\n | | a | c | b |\n | sampleData |---|---|---|\n | | 1 | 3 | 2 |\n | | 5 | 7 | 6 |\n -----------------------------\n\n @contributed by: @muellermichel\n "
] |
Please provide a description of the function:def convert_object(self, json_input):
if not json_input:
return "" #avoid empty tables
converted_output = self.table_init_markup + "<tr>"
converted_output += "</tr><tr>".join([
"<th>%s</th><td>%s</td>" %(
self.convert_json_node(k),
self.convert_json_node(v)
)
for k, v in json_input.items()
])
converted_output += '</tr></table>'
return converted_output | [
"\n Iterate over the JSON object and process it\n to generate the super awesome HTML Table format\n "
] |
Please provide a description of the function:def cameraUrls(self, camera=None, home=None, cid=None):
local_url = None
vpn_url = None
if cid:
camera_data=self.cameraById(cid)
else:
camera_data=self.cameraByName(camera=camera, home=home)
if camera_data:
vpn_url = camera_data['vpn_url']
resp = postRequest(vpn_url + '/command/ping')
temp_local_url=resp['local_url']
try:
resp = postRequest(temp_local_url + '/command/ping',timeout=1)
if resp and temp_local_url == resp['local_url']:
local_url = temp_local_url
except: # On this particular request, vithout errors from previous requests, error is timeout
local_url = None
return vpn_url, local_url | [
"\n Return the vpn_url and the local_url (if available) of a given camera\n in order to access to its live feed\n Can't use the is_local property which is mostly false in case of operator\n dynamic IP change after presence start sequence\n "
] |
Please provide a description of the function:def personsAtHome(self, home=None):
if not home: home = self.default_home
home_data = self.homeByName(home)
atHome = []
for p in home_data['persons']:
#Only check known persons
if 'pseudo' in p:
if not p["out_of_sight"]:
atHome.append(p['pseudo'])
return atHome | [
"\n Return the list of known persons who are currently at home\n "
] |
Please provide a description of the function:def getCameraPicture(self, image_id, key):
postParams = {
"access_token" : self.getAuthToken,
"image_id" : image_id,
"key" : key
}
resp = postRequest(_GETCAMERAPICTURE_REQ, postParams)
image_type = imghdr.what('NONE.FILE',resp)
return resp, image_type | [
"\n Download a specific image (of an event or user face) from the camera\n "
] |
Please provide a description of the function:def getProfileImage(self, name):
for p in self.persons:
if 'pseudo' in self.persons[p]:
if name == self.persons[p]['pseudo']:
image_id = self.persons[p]['face']['id']
key = self.persons[p]['face']['key']
return self.getCameraPicture(image_id, key)
return None, None | [
"\n Retrieve the face of a given person\n "
] |
Please provide a description of the function:def updateEvent(self, event=None, home=None):
if not home: home=self.default_home
if not event:
#If not event is provided we need to retrieve the oldest of the last event seen by each camera
listEvent = dict()
for cam_id in self.lastEvent:
listEvent[self.lastEvent[cam_id]['time']] = self.lastEvent[cam_id]
event = listEvent[sorted(listEvent)[0]]
home_data = self.homeByName(home)
postParams = {
"access_token" : self.getAuthToken,
"home_id" : home_data['id'],
"event_id" : event['id']
}
resp = postRequest(_GETEVENTSUNTIL_REQ, postParams)
eventList = resp['body']['events_list']
for e in eventList:
self.events[ e['camera_id'] ][ e['time'] ] = e
for camera in self.events:
self.lastEvent[camera]=self.events[camera][sorted(self.events[camera])[-1]] | [
"\n Update the list of event with the latest ones\n "
] |
Please provide a description of the function:def personSeenByCamera(self, name, home=None, camera=None):
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
#Check in the last event is someone known has been seen
if self.lastEvent[cam_id]['type'] == 'person':
person_id = self.lastEvent[cam_id]['person_id']
if 'pseudo' in self.persons[person_id]:
if self.persons[person_id]['pseudo'] == name:
return True
return False | [
"\n Return True if a specific person has been seen by a camera\n "
] |
Please provide a description of the function:def someoneKnownSeen(self, home=None, camera=None):
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
#Check in the last event is someone known has been seen
if self.lastEvent[cam_id]['type'] == 'person':
if self.lastEvent[cam_id]['person_id'] in self._knownPersons():
return True
return False | [
"\n Return True if someone known has been seen\n "
] |
Please provide a description of the function:def motionDetected(self, home=None, camera=None):
try:
cam_id = self.cameraByName(camera=camera, home=home)['id']
except TypeError:
logger.warning("personSeenByCamera: Camera name or home is unknown")
return False
if self.lastEvent[cam_id]['type'] == 'movement':
return True
return False | [
"\n Return True if movement has been detected\n "
] |
Please provide a description of the function:def batch(sequence, callback, size=100, **kwargs):
batch_len, rem = divmod(len(sequence), size)
if rem > 0:
batch_len += 1
for i in range(batch_len):
offset = i * size
yield callback(sequence[offset:offset + size], **kwargs) | [
"Helper to setup batch requests.\n\n There are endpoints which support updating multiple resources at once,\n but they are often limited to 100 updates per request.\n This function helps with splitting bigger requests into sequence of\n smaller ones.\n\n Example:\n def add_organization_tag(organizations, tag):\n request = {'organizations': [\n {\n 'id': org['id'],\n 'tags': org['tags'] + [tag],\n } for org in organizations\n ]}\n job = z.organizations_update_many(request)['job_status']\n return job['id']\n\n # z = Zendesk(...)\n orgs = z.organizations_list(get_all_pages=True)['organizations']\n job_ids = [job for job in\n batch(orgs, add_organization_tag, tag='new_tag')]\n\n Parameters:\n sequence - any sequence you want to split\n callback - function to call with slices of sequence,\n its return value is yielded on each slice\n size - size of chunks, combined with length of sequence determines\n how many times callback is called (defaults to 100)\n **kwargs - any additional keyword arguments are passed to callback\n "
] |
Please provide a description of the function:def call(self, path, query=None, method='GET', data=None,
files=None, get_all_pages=False, complete_response=False,
retry_on=None, max_retries=0, raw_query=None, retval=None,
**kwargs):
# Rather obscure way to support retry_on per single API call
if retry_on and max_retries:
try:
_retry_on = self._retry_on
_max_retries = self._max_retries
self.retry_on = retry_on
self.max_retries = max_retries
return self.call(path=path,
query=query,
method=method,
data=data,
files=files,
get_all_pages=get_all_pages,
complete_response=complete_response)
finally:
self._retry_on = _retry_on
self._max_retries = _max_retries
# Support specifying a mime-type other than application/json
mime_type = kwargs.pop('mime_type', 'application/json')
for key in kwargs.keys():
value = kwargs[key]
if hasattr(value, '__iter__') and not isinstance(value, str):
kwargs[key] = ','.join(map(str, value))
if query:
if kwargs:
kwargs.update(query)
else:
kwargs = query
if raw_query:
path = path + raw_query
kwargs = None
url = self.zdesk_url + path
if files:
# Sending multipart file. data contains parameters.
json = None
self.headers.pop('Content-Type', None)
elif (mime_type == 'application/json' and
(method == 'POST' or method == 'PUT')):
# Sending JSON data.
json = data
data = {}
self.headers.pop('Content-Type', None)
elif (mime_type != 'application/json' and
(method == 'POST' or method == 'PUT')):
# Uploading an attachment, probably.
# Specifying the MIME type is required.
json = None
self.headers['Content-Type'] = mime_type
else:
# Probably a GET or DELETE. Not sending JSON or files.
json = None
self.headers.pop('Content-Type', None)
results = []
all_requests_complete = False
request_count = 0
while not all_requests_complete:
# Make an http request
# counts request attempts in order to fetch this specific one
request_count += 1
try:
response = self.client.request(method,
url,
params=kwargs,
json=json,
data=data,
headers=self.headers,
files=files,
**self.client_args)
except requests.RequestException:
if request_count <= self.max_retries:
# we have to bind response to None in case
# self.client.request raises an exception and
# response holds old requests.Response
# (and possibly its Retry-After header)
response = None
self._handle_retry(response)
continue
else:
raise
# If the response status is not in the 200 range then assume an
# error and raise proper exception
code = response.status_code
try:
if not 200 <= code < 300 and code != 422:
if code == 401:
raise AuthenticationError(
response.content, code, response)
elif code == 429:
raise RateLimitError(
response.content, code, response)
else:
raise ZendeskError(
response.content, code, response)
except ZendeskError:
if request_count <= self.max_retries:
self._handle_retry(response)
continue
else:
raise
# Deserialize json content if content exists.
# In some cases Zendesk returns ' ' strings.
# Also return false non strings (0, [], (), {})
if response.content.strip() and 'json' in response.headers['content-type']:
content = response.json()
# set url to the next page if that was returned in the response
url = content.get('next_page', None)
# url we get above already has the start_time appended to it,
# specific to incremental exports
kwargs = {}
elif response.content.strip() and 'text' in response.headers['content-type']:
try:
content = response.json()
# set url to the next page if that was returned in the response
url = content.get('next_page', None)
# url we get above already has the start_time appended to it,
# specific to incremental exports
kwargs = {}
except ValueError:
content = response.content
else:
content = response.content
url = None
if complete_response:
results.append({
'response': response,
'content': content,
'status': response.status_code
})
else:
if retval == 'content':
results.append(content)
elif retval == 'code':
results.append(response.status_code)
elif retval == 'location':
results.append(response.headers.get('location'))
elif retval == 'headers':
results.append(response.headers)
else:
# Attempt to automatically determine the value of
# most interest to return.
if response.headers.get('location'):
# Zendesk's response is sometimes the url of a newly
# created user/ticket/group/etc and they pass this through
# 'location'. Otherwise, the body of 'content'
# has our response.
results.append(response.headers.get('location'))
elif content:
results.append(content)
else:
results.append(responses[response.status_code])
# if there is a next_page, and we are getting pages, then continue
# making requests
# deal with how incremental export results are returned
# there could be two cases
# response code == 422 returned when end_time < five minutes recent
# or count < 1000
# this is an ugly check, and we have to check this just for incremental export end-points
# non-incremental load end-points have a 100 item/page limit and return next_page = null for last page
# also note that incremental/ticket_metric_events end-point has a 10,000 items per page limit
url = None if (url is not None and
'incremental' in url and
content.get('count') < 1000) else url
all_requests_complete = not (get_all_pages and url)
request_count = 0
if get_all_pages and complete_response:
# Return the list of results from all calls made.
# This way even if only one page was present the caller will
# always receive back an iterable value, since multiple pages
# were requested/expected. This also provides the information for
# every call, and saves us from having to try to combine all of
# that ourselves in a sensible way.
return results
if len(results) == 1:
# regardless as to whether all pages were requested, there was
# only one call and set of results, so just send it back.
return results[0]
# Now we need to try to combine or reduce the results:
hashable = True
try:
if len(set(results)) == 1:
# all responses were the same, so return just the first one.
# may have a list of locations or response statuses
return results[0]
except TypeError:
# probably we have a list of content dictionaries.
hashable = False
if hashable:
# we have a list of simple objects like strings, but they are not
# all the same so send them all back.
return results
# may have a sequence of response contents
# (dicts, possibly lists in the future as that is valid json also)
combined_dict_results = {}
combined_list_results = []
for result in results:
if isinstance(result, list):
# the result of this call returned a list.
# extend the combined list with these results.
combined_list_results.extend(result)
elif isinstance(result, dict):
# the result of this call returned a dict. the dict probably
# has both simple attributes (strings) and complex attributes
# (lists). if the attribute is a list, we will extend the
# combined attribute, otherwise we will just take the last
# attribute value from the last call.
# the end result is a response that looks like one giant call,
# to e.g. list tickets, but was actually made by multiple API
# calls.
for k in result.keys():
v = result[k]
if isinstance(v, list):
try:
combined_dict_results[k].extend(v)
except KeyError:
combined_dict_results[k] = v
else:
combined_dict_results[k] = v
else:
# returned result is not a dict or a list. don't know how to
# deal with this, so just send everything back.
return results
if combined_list_results and combined_dict_results:
# there was a mix of list and dict results from the sequence
# of calls. this case seems very odd to me if it ever happens.
# at any rate, send everything back uncombined
return results
if combined_dict_results:
return combined_dict_results
if combined_list_results:
return combined_list_results
# I don't expect to make it here, but I suppose it could happen if,
# perhaps, a sequence of empty dicts were returned or some such.
# Send everything back.
return results | [
"Make a REST call to the Zendesk web service.\n\n Parameters:\n path - Path portion of the Zendesk REST endpoint URL.\n query - Query parameters in dict form.\n method - HTTP method to use in making the request.\n data - POST data or multi-part form data to include.\n files - Requests style dict of files for multi-part file uploads.\n get_all_pages - Make multiple requests and follow next_page.\n complete_response - Return raw request results.\n retry_on - Specify any exceptions from ACCEPT_RETRIES or non-2xx\n HTTP codes on which you want to retry request.\n Note that calling Zendesk.call with get_all_pages=True can make\n up to (max_retries + 1) * pages.\n Defaults to empty set, but can be any iterable, exception or int,\n which will become set with same values you provided.\n max_retries - How many additional connections to make when\n first one fails. No effect when retry_on evaluates to False.\n Defaults to 0.\n raw_query - Raw query string, starting with '?', that will be\n appended to the URL path and will completely override / discard\n any other query parameters. Enables use cases where query\n parameters need to be repeated in the query string.\n retval - Request a specific part of the returned response. Valid\n values are 'content', 'code', 'location', and 'headers'.\n JSON content is still automatically deserialized if possible.\n If retval is not specified, then the old behavior of trying\n to determine an appropriate value to return is used.\n "
] |
Please provide a description of the function:def _handle_retry(self, resp):
exc_t, exc_v, exc_tb = sys.exc_info()
if exc_t is None:
raise TypeError('Must be called in except block.')
retry_on_exc = tuple(
(x for x in self._retry_on if inspect.isclass(x)))
retry_on_codes = tuple(
(x for x in self._retry_on if isinstance(x, int)))
if issubclass(exc_t, ZendeskError):
code = exc_v.error_code
if exc_t not in retry_on_exc and code not in retry_on_codes:
six.reraise(exc_t, exc_v, exc_tb)
else:
if not issubclass(exc_t, retry_on_exc):
six.reraise(exc_t, exc_v, exc_tb)
if resp is not None:
try:
retry_after = float(resp.headers.get('Retry-After', 0))
time.sleep(retry_after)
except (TypeError, ValueError):
pass
return True | [
"Handle any exceptions during API request or\n parsing its response status code.\n\n Parameters:\n resp: requests.Response instance obtained during concerning request\n or None, when request failed\n\n Returns: True if should retry our request or raises original Exception\n "
] |
Please provide a description of the function:def account_settings_update(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/account_settings#update-account-settings"
api_path = "/api/v2/account/settings.json"
return self.call(api_path, method="PUT", data=data, **kwargs) | [] |
Please provide a description of the function:def account_update(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/accounts#update-account"
api_path = "/api/v2/account"
return self.call(api_path, method="PUT", data=data, **kwargs) | [] |
Please provide a description of the function:def activities_list(self, since=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/activity_stream#list-activities"
api_path = "/api/v2/activities.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if since:
api_query.update({
"since": since,
})
return self.call(api_path, query=api_query, **kwargs) | [] |
Please provide a description of the function:def activity_show(self, activity_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/activity_stream#show-activity"
api_path = "/api/v2/activities/{activity_id}.json"
api_path = api_path.format(activity_id=activity_id)
return self.call(api_path, **kwargs) | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.