sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = filter(self.server_key_dict.keys().__contains__,
self._preferred_keys)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(chr(MSG_KEXINIT))
m.add_bytes(rng.read(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string('')
m.add_string('')
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = str(m)
self._send_message(m) | announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support. | entailment |
def _generate_prime(bits, rng):
"primtive attempt at prime generation"
hbyte_mask = pow(2, bits % 8) - 1
while True:
# loop catches the case where we increment n into a higher bit-range
x = rng.read((bits+7) // 8)
if hbyte_mask > 0:
x = chr(ord(x[0]) & hbyte_mask) + x[1:]
n = util.inflate_long(x, 1)
n |= 1
n |= (1 << (bits - 1))
while not number.isPrime(n):
n += 2
if util.bit_length(n) == bits:
break
return n | primtive attempt at prime generation | entailment |
def _roll_random(rng, n):
"returns a random # from 0 to N-1"
bits = util.bit_length(n-1)
bytes = (bits + 7) // 8
hbyte_mask = pow(2, bits % 8) - 1
# so here's the plan:
# we fetch as many random bits as we'd need to fit N-1, and if the
# generated number is >= N, we try again. in the worst case (N-1 is a
# power of 2), we have slightly better than 50% odds of getting one that
# fits, so i can't guarantee that this loop will ever finish, but the odds
# of it looping forever should be infinitesimal.
while True:
x = rng.read(bytes)
if hbyte_mask > 0:
x = chr(ord(x[0]) & hbyte_mask) + x[1:]
num = util.inflate_long(x, 1)
if num < n:
break
return num | returns a random # from 0 to N-1 | entailment |
def _write_private_key_file(self, tag, filename, data, password=None):
"""
Write an SSH2-format private key file in a form that can be read by
ssh or openssh. If no password is given, the key is written in
a trivially-encoded format (base64) which is completely insecure. If
a password is given, DES-EDE3-CBC is used.
@param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block.
@type tag: str
@param filename: name of the file to write.
@type filename: str
@param data: data blob that makes up the private key.
@type data: str
@param password: an optional password to use to encrypt the file.
@type password: str
@raise IOError: if there was an error writing the file.
"""
f = open(filename, 'w', 0600)
# grrr... the mode doesn't always take hold
os.chmod(filename, 0600)
self._write_private_key(tag, f, data, password)
f.close() | Write an SSH2-format private key file in a form that can be read by
ssh or openssh. If no password is given, the key is written in
a trivially-encoded format (base64) which is completely insecure. If
a password is given, DES-EDE3-CBC is used.
@param tag: C{"RSA"} or C{"DSA"}, the tag used to mark the data block.
@type tag: str
@param filename: name of the file to write.
@type filename: str
@param data: data blob that makes up the private key.
@type data: str
@param password: an optional password to use to encrypt the file.
@type password: str
@raise IOError: if there was an error writing the file. | entailment |
def set_event(self, event):
"""
Set an event on this buffer. When data is ready to be read (or the
buffer has been closed), the event will be set. When no data is
ready, the event will be cleared.
@param event: the event to set/clear
@type event: Event
"""
self._event = event
if len(self._buffer) > 0:
event.set()
else:
event.clear() | Set an event on this buffer. When data is ready to be read (or the
buffer has been closed), the event will be set. When no data is
ready, the event will be cleared.
@param event: the event to set/clear
@type event: Event | entailment |
def feed(self, data):
"""
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
@param data: the data to add
@type data: str
"""
self._lock.acquire()
try:
if self._event is not None:
self._event.set()
self._buffer.fromstring(data)
self._cv.notifyAll()
finally:
self._lock.release() | Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
@param data: the data to add
@type data: str | entailment |
def read(self, offset, length):
"""
Read up to C{length} bytes from this file, starting at position
C{offset}. The offset may be a python long, since SFTP allows it
to be 64 bits.
If the end of the file has been reached, this method may return an
empty string to signify EOF, or it may also return L{SFTP_EOF}.
The default implementation checks for an attribute on C{self} named
C{readfile}, and if present, performs the read operation on the python
file-like object found there. (This is meant as a time saver for the
common case where you are wrapping a python file object.)
@param offset: position in the file to start reading from.
@type offset: int or long
@param length: number of bytes to attempt to read.
@type length: int
@return: data read from the file, or an SFTP error code.
@rtype: str
"""
readfile = getattr(self, 'readfile', None)
if readfile is None:
return SFTP_OP_UNSUPPORTED
try:
if self.__tell is None:
self.__tell = readfile.tell()
if offset != self.__tell:
readfile.seek(offset)
self.__tell = offset
data = readfile.read(length)
except IOError, e:
self.__tell = None
return SFTPServer.convert_errno(e.errno)
self.__tell += len(data)
return data | Read up to C{length} bytes from this file, starting at position
C{offset}. The offset may be a python long, since SFTP allows it
to be 64 bits.
If the end of the file has been reached, this method may return an
empty string to signify EOF, or it may also return L{SFTP_EOF}.
The default implementation checks for an attribute on C{self} named
C{readfile}, and if present, performs the read operation on the python
file-like object found there. (This is meant as a time saver for the
common case where you are wrapping a python file object.)
@param offset: position in the file to start reading from.
@type offset: int or long
@param length: number of bytes to attempt to read.
@type length: int
@return: data read from the file, or an SFTP error code.
@rtype: str | entailment |
def write(self, offset, data):
"""
Write C{data} into this file at position C{offset}. Extending the
file past its original end is expected. Unlike python's normal
C{write()} methods, this method cannot do a partial write: it must
write all of C{data} or else return an error.
The default implementation checks for an attribute on C{self} named
C{writefile}, and if present, performs the write operation on the
python file-like object found there. The attribute is named
differently from C{readfile} to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
@param offset: position in the file to start reading from.
@type offset: int or long
@param data: data to write into the file.
@type data: str
@return: an SFTP error code like L{SFTP_OK}.
"""
writefile = getattr(self, 'writefile', None)
if writefile is None:
return SFTP_OP_UNSUPPORTED
try:
# in append mode, don't care about seeking
if (self.__flags & os.O_APPEND) == 0:
if self.__tell is None:
self.__tell = writefile.tell()
if offset != self.__tell:
writefile.seek(offset)
self.__tell = offset
writefile.write(data)
writefile.flush()
except IOError, e:
self.__tell = None
return SFTPServer.convert_errno(e.errno)
if self.__tell is not None:
self.__tell += len(data)
return SFTP_OK | Write C{data} into this file at position C{offset}. Extending the
file past its original end is expected. Unlike python's normal
C{write()} methods, this method cannot do a partial write: it must
write all of C{data} or else return an error.
The default implementation checks for an attribute on C{self} named
C{writefile}, and if present, performs the write operation on the
python file-like object found there. The attribute is named
differently from C{readfile} to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
@param offset: position in the file to start reading from.
@type offset: int or long
@param data: data to write into the file.
@type data: str
@return: an SFTP error code like L{SFTP_OK}. | entailment |
def canonicalize(self, path):
"""
Return the canonical form of a path on the server. For example,
if the server's home folder is C{/home/foo}, the path
C{"../betty"} would be canonicalized to C{"/home/betty"}. Note
the obvious security issues: if you're serving files only from a
specific folder, you probably don't want this method to reveal path
names outside that folder.
You may find the python methods in C{os.path} useful, especially
C{os.path.normpath} and C{os.path.realpath}.
The default implementation returns C{os.path.normpath('/' + path)}.
"""
if os.path.isabs(path):
out = os.path.normpath(path)
else:
out = os.path.normpath('/' + path)
if sys.platform == 'win32':
# on windows, normalize backslashes to sftp/posix format
out = out.replace('\\', '/')
return out | Return the canonical form of a path on the server. For example,
if the server's home folder is C{/home/foo}, the path
C{"../betty"} would be canonicalized to C{"/home/betty"}. Note
the obvious security issues: if you're serving files only from a
specific folder, you probably don't want this method to reveal path
names outside that folder.
You may find the python methods in C{os.path} useful, especially
C{os.path.normpath} and C{os.path.realpath}.
The default implementation returns C{os.path.normpath('/' + path)}. | entailment |
def connect(self):
"""
Method automatically called by the run() method of the AgentProxyThread
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn | Method automatically called by the run() method of the AgentProxyThread | entailment |
def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written
"""
f = open(filename, 'w')
f.write('# SSH host keys collected by ssh\n')
for hostname, keys in self._host_keys.iteritems():
for keytype, key in keys.iteritems():
f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
f.close() | Save the host keys back to a file. Only the host keys loaded with
L{load_host_keys} (plus any added directly) will be saved -- not any
host keys loaded with L{load_system_host_keys}.
@param filename: the filename to save to
@type filename: str
@raise IOError: if the file could not be written | entailment |
def exec_command(self, command, bufsize=-1):
"""
Execute a command on the SSH server. A new L{Channel} is opened and
the requested command is executed. The command's input and output
streams are returned as python C{file}-like objects representing
stdin, stdout, and stderr.
@param command: the command to execute
@type command: str
@param bufsize: interpreted the same way as by the built-in C{file()} function in python
@type bufsize: int
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile})
@raise SSHException: if the server fails to execute the command
"""
chan = self._transport.open_session()
chan.exec_command(command)
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
return stdin, stdout, stderr | Execute a command on the SSH server. A new L{Channel} is opened and
the requested command is executed. The command's input and output
streams are returned as python C{file}-like objects representing
stdin, stdout, and stderr.
@param command: the command to execute
@type command: str
@param bufsize: interpreted the same way as by the built-in C{file()} function in python
@type bufsize: int
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile})
@raise SSHException: if the server fails to execute the command | entailment |
def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys):
"""
Try, in order:
- The key passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key.)
The password is required for two-factor authentication.
"""
saved_exception = None
two_factor = False
allowed_types = []
if pkey is not None:
try:
self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
allowed_types = self._transport.auth_publickey(username, pkey)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
except SSHException, e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
for pkey_class in (RSAKey, DSSKey):
try:
key = pkey_class.from_private_key_file(key_filename, password)
self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent == None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
if not two_factor:
keyfiles = []
rsa_key = os.path.expanduser('~/.ssh/id_rsa')
dsa_key = os.path.expanduser('~/.ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
# look in ~/ssh/ for windows users:
rsa_key = os.path.expanduser('~/ssh/id_rsa')
dsa_key = os.path.expanduser('~/ssh/id_dsa')
if os.path.isfile(rsa_key):
keyfiles.append((RSAKey, rsa_key))
if os.path.isfile(dsa_key):
keyfiles.append((DSSKey, dsa_key))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = pkey_class.from_private_key_file(filename, password)
self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
# for 2-factor auth a successfully auth'd key will result in ['password']
allowed_types = self._transport.auth_publickey(username, key)
two_factor = (allowed_types == ['password'])
if not two_factor:
return
break
except SSHException, e:
saved_exception = e
except IOError, e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException, e:
saved_exception = e
elif two_factor:
raise SSHException('Two-factor authentication requires a password')
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException('No authentication methods available') | Try, in order:
- The key passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key.)
The password is required for two-factor authentication. | entailment |
def get_bytes(self, n):
"""
Return the next C{n} bytes of the Message, without decomposing into
an int, string, etc. Just the raw bytes are returned.
@return: a string of the next C{n} bytes of the Message, or a string
of C{n} zero bytes, if there aren't C{n} bytes remaining.
@rtype: string
"""
b = self.packet.read(n)
if len(b) < n:
return b + '\x00' * (n - len(b))
return b | Return the next C{n} bytes of the Message, without decomposing into
an int, string, etc. Just the raw bytes are returned.
@return: a string of the next C{n} bytes of the Message, or a string
of C{n} zero bytes, if there aren't C{n} bytes remaining.
@rtype: string | entailment |
def add_int(self, n):
"""
Add an integer to the stream.
@param n: integer to add
@type n: int
"""
self.packet.write(struct.pack('>I', n))
return self | Add an integer to the stream.
@param n: integer to add
@type n: int | entailment |
def add_string(self, s):
"""
Add a string to the stream.
@param s: string to add
@type s: str
"""
self.add_int(len(s))
self.packet.write(s)
return self | Add a string to the stream.
@param s: string to add
@type s: str | entailment |
def resize_pty(self, width=80, height=24):
"""
Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous L{get_pty} call.
@param width: new width (in characters) of the terminal screen
@type width: int
@param height: new height (in characters) of the terminal screen
@type height: int
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('window-change')
m.add_boolean(True)
m.add_int(width)
m.add_int(height)
m.add_int(0).add_int(0)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event() | Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous L{get_pty} call.
@param width: new width (in characters) of the terminal screen
@type width: int
@param height: new height (in characters) of the terminal screen
@type height: int
@raise SSHException: if the request was rejected or the channel was
closed | entailment |
def recv_exit_status(self):
"""
Return the exit status from the process on the server. This is
mostly useful for retrieving the reults of an L{exec_command}.
If the command hasn't finished yet, this method will wait until
it does, or until the channel is closed. If no exit status is
provided by the server, -1 is returned.
@return: the exit code of the process on the server.
@rtype: int
@since: 1.2
"""
self.status_event.wait()
assert self.status_event.isSet()
return self.exit_status | Return the exit status from the process on the server. This is
mostly useful for retrieving the reults of an L{exec_command}.
If the command hasn't finished yet, this method will wait until
it does, or until the channel is closed. If no exit status is
provided by the server, -1 is returned.
@return: the exit code of the process on the server.
@rtype: int
@since: 1.2 | entailment |
def send_exit_status(self, status):
"""
Send the exit status of an executed command to the client. (This
really only makes sense in server mode.) Many clients expect to
get some sort of status code back from an executed command after
it completes.
@param status: the exit code of the process
@type status: int
@since: 1.2
"""
# in many cases, the channel will not still be open here.
# that's fine.
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('exit-status')
m.add_boolean(False)
m.add_int(status)
self.transport._send_user_message(m) | Send the exit status of an executed command to the client. (This
really only makes sense in server mode.) Many clients expect to
get some sort of status code back from an executed command after
it completes.
@param status: the exit code of the process
@type status: int
@since: 1.2 | entailment |
def recv(self, nbytes):
"""
Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
received at once is specified by C{nbytes}. If a string of length zero
is returned, the channel stream has closed.
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data.
@rtype: str
@raise socket.timeout: if no data is ready before the timeout set by
L{settimeout}.
"""
try:
out = self.in_buffer.read(nbytes, self.timeout)
except PipeTimeout, e:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST))
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out | Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
received at once is specified by C{nbytes}. If a string of length zero
is returned, the channel stream has closed.
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data.
@rtype: str
@raise socket.timeout: if no data is ready before the timeout set by
L{settimeout}. | entailment |
def sendall(self, s):
"""
Send data to the channel, without allowing partial results. Unlike
L{send}, this method continues to send data from the given string until
either all data has been sent or an error occurs. Nothing is returned.
@param s: data to send.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@note: If the channel is closed while only part of the data hase been
sent, there is no way to determine how much data (if any) was sent.
This is irritating, but identically follows python's API.
"""
while s:
if self.closed:
# this doesn't seem useful, but it is the documented behavior of Socket
raise socket.error('Socket is closed')
sent = self.send(s)
s = s[sent:]
return None | Send data to the channel, without allowing partial results. Unlike
L{send}, this method continues to send data from the given string until
either all data has been sent or an error occurs. Nothing is returned.
@param s: data to send.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@note: If the channel is closed while only part of the data hase been
sent, there is no way to determine how much data (if any) was sent.
This is irritating, but identically follows python's API. | entailment |
def sendall_stderr(self, s):
"""
Send data to the channel's "stderr" stream, without allowing partial
results. Unlike L{send_stderr}, this method continues to send data
from the given string until all data has been sent or an error occurs.
Nothing is returned.
@param s: data to send to the client as "stderr" output.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@since: 1.1
"""
while s:
if self.closed:
raise socket.error('Socket is closed')
sent = self.send_stderr(s)
s = s[sent:]
return None | Send data to the channel's "stderr" stream, without allowing partial
results. Unlike L{send_stderr}, this method continues to send data
from the given string until all data has been sent or an error occurs.
Nothing is returned.
@param s: data to send to the client as "stderr" output.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@since: 1.1 | entailment |
def agent_auth(transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
"""
agent = ssh.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print 'Trying ssh-agent key %s' % hexlify(key.get_fingerprint()),
try:
transport.auth_publickey(username, key)
print '... success!'
return
except ssh.SSHException:
print '... nope.' | Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent. | entailment |
def _read_prefetch(self, size):
"""
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
"""
# while not closed, and haven't fetched past the current position, and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
break
if self._prefetch_done or self._closed:
break
self.sftp._read_response()
self._check_exception()
if offset is None:
self._prefetching = False
return None
prefetch = self._prefetch_data[offset]
del self._prefetch_data[offset]
buf_offset = self._realpos - offset
if buf_offset > 0:
self._prefetch_data[offset] = prefetch[:buf_offset]
prefetch = prefetch[buf_offset:]
if size < len(prefetch):
self._prefetch_data[self._realpos + size] = prefetch[size:]
prefetch = prefetch[:size]
return prefetch | read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read. | entailment |
def chmod(self, mode):
"""
Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param mode: new permissions
@type mode: int
"""
self.sftp._log(DEBUG, 'chmod(%s, %r)' % (hexlify(self.handle), mode))
attr = SFTPAttributes()
attr.st_mode = mode
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param mode: new permissions
@type mode: int | entailment |
def chown(self, uid, gid):
"""
Change the owner (C{uid}) and group (C{gid}) of this file. As with
python's C{os.chown} function, you must pass both arguments, so if you
only want to change one, use L{stat} first to retrieve the current
owner and group.
@param uid: new owner's uid
@type uid: int
@param gid: new group id
@type gid: int
"""
self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | Change the owner (C{uid}) and group (C{gid}) of this file. As with
python's C{os.chown} function, you must pass both arguments, so if you
only want to change one, use L{stat} first to retrieve the current
owner and group.
@param uid: new owner's uid
@type uid: int
@param gid: new group id
@type gid: int | entailment |
def utime(self, times):
"""
Set the access and modified times of this file. If
C{times} is C{None}, then the file's access and modified times are set
to the current time. Otherwise, C{times} must be a 2-tuple of numbers,
of the form C{(atime, mtime)}, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from python
for the sake of consistency -- I apologize.
@param times: C{None} or a tuple of (access time, modified time) in
standard internet epoch time (seconds since 01 January 1970 GMT)
@type times: tuple(int)
"""
if times is None:
times = (time.time(), time.time())
self.sftp._log(DEBUG, 'utime(%s, %r)' % (hexlify(self.handle), times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self.sftp._request(CMD_FSETSTAT, self.handle, attr) | Set the access and modified times of this file. If
C{times} is C{None}, then the file's access and modified times are set
to the current time. Otherwise, C{times} must be a 2-tuple of numbers,
of the form C{(atime, mtime)}, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from python
for the sake of consistency -- I apologize.
@param times: C{None} or a tuple of (access time, modified time) in
standard internet epoch time (seconds since 01 January 1970 GMT)
@type times: tuple(int) | entailment |
def flush(self):
"""
Write out any data in the write buffer. This may do nothing if write
buffering is not turned on.
"""
self._write_all(self._wbuffer.getvalue())
self._wbuffer = StringIO()
return | Write out any data in the write buffer. This may do nothing if write
buffering is not turned on. | entailment |
def readline(self, size=None):
"""
Read one entire line from the file. A trailing newline character is
kept in the string (but may be absent when a file ends with an
incomplete line). If the size argument is present and non-negative, it
is a maximum byte count (including the trailing newline) and an
incomplete line may be returned. An empty string is returned only when
EOF is encountered immediately.
@note: Unlike stdio's C{fgets()}, the returned string contains null
characters (C{'\\0'}) if they occurred in the input.
@param size: maximum length of returned string.
@type size: int
@return: next line of the file, or an empty string if the end of the
file has been reached.
@rtype: str
"""
# it's almost silly how complex this function is.
if self._closed:
raise IOError('File is closed')
if not (self._flags & self.FLAG_READ):
raise IOError('File not open for reading')
line = self._rbuffer
while True:
if self._at_trailing_cr and (self._flags & self.FLAG_UNIVERSAL_NEWLINE) and (len(line) > 0):
# edge case: the newline may be '\r\n' and we may have read
# only the first '\r' last time.
if line[0] == '\n':
line = line[1:]
self._record_newline('\r\n')
else:
self._record_newline('\r')
self._at_trailing_cr = False
# check size before looking for a linefeed, in case we already have
# enough.
if (size is not None) and (size >= 0):
if len(line) >= size:
# truncate line and return
self._rbuffer = line[size:]
line = line[:size]
self._pos += len(line)
return line
n = size - len(line)
else:
n = self._bufsize
if ('\n' in line) or ((self._flags & self.FLAG_UNIVERSAL_NEWLINE) and ('\r' in line)):
break
try:
new_data = self._read(n)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
self._rbuffer = ''
self._pos += len(line)
return line
line += new_data
self._realpos += len(new_data)
# find the newline
pos = line.find('\n')
if self._flags & self.FLAG_UNIVERSAL_NEWLINE:
rpos = line.find('\r')
if (rpos >= 0) and ((rpos < pos) or (pos < 0)):
pos = rpos
xpos = pos + 1
if (line[pos] == '\r') and (xpos < len(line)) and (line[xpos] == '\n'):
xpos += 1
self._rbuffer = line[xpos:]
lf = line[pos:xpos]
line = line[:pos] + '\n'
if (len(self._rbuffer) == 0) and (lf == '\r'):
# we could read the line up to a '\r' and there could still be a
# '\n' following that we read next time. note that and eat it.
self._at_trailing_cr = True
else:
self._record_newline(lf)
self._pos += len(line)
return line | Read one entire line from the file. A trailing newline character is
kept in the string (but may be absent when a file ends with an
incomplete line). If the size argument is present and non-negative, it
is a maximum byte count (including the trailing newline) and an
incomplete line may be returned. An empty string is returned only when
EOF is encountered immediately.
@note: Unlike stdio's C{fgets()}, the returned string contains null
characters (C{'\\0'}) if they occurred in the input.
@param size: maximum length of returned string.
@type size: int
@return: next line of the file, or an empty string if the end of the
file has been reached.
@rtype: str | entailment |
def _set_mode(self, mode='r', bufsize=-1):
"""
Subclasses call this method to initialize the BufferedFile.
"""
# set bufsize in any event, because it's used for readline().
self._bufsize = self._DEFAULT_BUFSIZE
if bufsize < 0:
# do no buffering by default, because otherwise writes will get
# buffered in a way that will probably confuse people.
bufsize = 0
if bufsize == 1:
# apparently, line buffering only affects writes. reads are only
# buffered if you call readline (directly or indirectly: iterating
# over a file will indirectly call readline).
self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
elif bufsize > 1:
self._bufsize = bufsize
self._flags |= self.FLAG_BUFFERED
self._flags &= ~self.FLAG_LINE_BUFFERED
elif bufsize == 0:
# unbuffered
self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
if ('r' in mode) or ('+' in mode):
self._flags |= self.FLAG_READ
if ('w' in mode) or ('+' in mode):
self._flags |= self.FLAG_WRITE
if ('a' in mode):
self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
self._size = self._get_size()
self._pos = self._realpos = self._size
if ('b' in mode):
self._flags |= self.FLAG_BINARY
if ('U' in mode):
self._flags |= self.FLAG_UNIVERSAL_NEWLINE
# built-in file objects have this attribute to store which kinds of
# line terminations they've seen:
# <http://www.python.org/doc/current/lib/built-in-funcs.html>
self.newlines = None | Subclasses call this method to initialize the BufferedFile. | entailment |
def from_transport(cls, t):
"""
Create an SFTP client channel from an open L{Transport}.
@param t: an open L{Transport} which is already authenticated
@type t: L{Transport}
@return: a new L{SFTPClient} object, referring to an sftp session
(channel) across the transport
@rtype: L{SFTPClient}
"""
chan = t.open_session()
if chan is None:
return None
chan.invoke_subsystem('sftp')
return cls(chan) | Create an SFTP client channel from an open L{Transport}.
@param t: an open L{Transport} which is already authenticated
@type t: L{Transport}
@return: a new L{SFTPClient} object, referring to an sftp session
(channel) across the transport
@rtype: L{SFTPClient} | entailment |
def listdir_attr(self, path='.'):
"""
Return a list containing L{SFTPAttributes} objects corresponding to
files in the given C{path}. The list is in arbitrary order. It does
not include the special entries C{'.'} and C{'..'} even if they are
present in the folder.
The returned L{SFTPAttributes} objects will each have an additional
field: C{longname}, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will probably
depend on the SFTP server implementation.
@param path: path to list (defaults to C{'.'})
@type path: str
@return: list of attributes
@rtype: list of L{SFTPAttributes}
@since: 1.2
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'listdir(%r)' % path)
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
filelist = []
while True:
try:
t, msg = self._request(CMD_READDIR, handle)
except EOFError, e:
# done with handle
break
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
for i in range(count):
filename = _to_unicode(msg.get_string())
longname = _to_unicode(msg.get_string())
attr = SFTPAttributes._from_msg(msg, filename, longname)
if (filename != '.') and (filename != '..'):
filelist.append(attr)
self._request(CMD_CLOSE, handle)
return filelist | Return a list containing L{SFTPAttributes} objects corresponding to
files in the given C{path}. The list is in arbitrary order. It does
not include the special entries C{'.'} and C{'..'} even if they are
present in the folder.
The returned L{SFTPAttributes} objects will each have an additional
field: C{longname}, which may contain a formatted string of the file's
attributes, in unix format. The content of this string will probably
depend on the SFTP server implementation.
@param path: path to list (defaults to C{'.'})
@type path: str
@return: list of attributes
@rtype: list of L{SFTPAttributes}
@since: 1.2 | entailment |
def remove(self, path):
"""
Remove the file at the given path. This only works on files; for
removing folders (directories), use L{rmdir}.
@param path: path (absolute or relative) of the file to remove
@type path: str
@raise IOError: if the path refers to a folder (directory)
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'remove(%r)' % path)
self._request(CMD_REMOVE, path) | Remove the file at the given path. This only works on files; for
removing folders (directories), use L{rmdir}.
@param path: path (absolute or relative) of the file to remove
@type path: str
@raise IOError: if the path refers to a folder (directory) | entailment |
def rmdir(self, path):
"""
Remove the folder named C{path}.
@param path: name of the folder to remove
@type path: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'rmdir(%r)' % path)
self._request(CMD_RMDIR, path) | Remove the folder named C{path}.
@param path: name of the folder to remove
@type path: str | entailment |
def stat(self, path):
"""
Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a python C{stat} object, the result may not be accessed as a
tuple. This is mostly due to the author's slack factor.
The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
C{st_atime}, and C{st_mtime}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'stat(%r)' % path)
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg) | Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a python C{stat} object, the result may not be accessed as a
tuple. This is mostly due to the author's slack factor.
The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
C{st_atime}, and C{st_mtime}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes | entailment |
def lstat(self, path):
"""
Retrieve information about a file on the remote system, without
following symbolic links (shortcuts). This otherwise behaves exactly
the same as L{stat}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'lstat(%r)' % path)
t, msg = self._request(CMD_LSTAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg) | Retrieve information about a file on the remote system, without
following symbolic links (shortcuts). This otherwise behaves exactly
the same as L{stat}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes | entailment |
def symlink(self, source, dest):
"""
Create a symbolic link (shortcut) of the C{source} path at
C{destination}.
@param source: path of the original file
@type source: str
@param dest: path of the newly created symlink
@type dest: str
"""
dest = self._adjust_cwd(dest)
self._log(DEBUG, 'symlink(%r, %r)' % (source, dest))
if type(source) is unicode:
source = source.encode('utf-8')
self._request(CMD_SYMLINK, source, dest) | Create a symbolic link (shortcut) of the C{source} path at
C{destination}.
@param source: path of the original file
@type source: str
@param dest: path of the newly created symlink
@type dest: str | entailment |
def chmod(self, path, mode):
"""
Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param path: path of the file to change the permissions of
@type path: str
@param mode: new permissions
@type mode: int
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'chmod(%r, %r)' % (path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_SETSTAT, path, attr) | Change the mode (permissions) of a file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param path: path of the file to change the permissions of
@type path: str
@param mode: new permissions
@type mode: int | entailment |
def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink(%r)' % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned %d results' % count)
return _to_unicode(msg.get_string()) | Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str | entailment |
def normalize(self, path):
"""
Return the normalized path (on the server) of a given path. This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the "current folder" (by passing C{'.'}
as C{path}).
@param path: path to be normalized
@type path: str
@return: normalized form of the given path
@rtype: str
@raise IOError: if the path can't be resolved on the server
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'normalize(%r)' % path)
t, msg = self._request(CMD_REALPATH, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count != 1:
raise SFTPError('Realpath returned %d results' % count)
return _to_unicode(msg.get_string()) | Return the normalized path (on the server) of a given path. This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the "current folder" (by passing C{'.'}
as C{path}).
@param path: path to be normalized
@type path: str
@return: normalized form of the given path
@rtype: str
@raise IOError: if the path can't be resolved on the server | entailment |
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4
"""
fr = self.file(remotepath, 'rb')
file_size = self.stat(remotepath).st_size
fr.prefetch()
try:
fl = file(localpath, 'wb')
try:
size = 0
while True:
data = fr.read(32768)
if len(data) == 0:
break
fl.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
finally:
fl.close()
finally:
fr.close()
s = os.stat(localpath)
if s.st_size != size:
raise IOError('size mismatch in get! %d != %d' % (s.st_size, size)) | Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4 | entailment |
def _adjust_cwd(self, path):
"""
Return an adjusted path if we're emulating a "current working
directory" for the server.
"""
if type(path) is unicode:
path = path.encode('utf-8')
if self._cwd is None:
return path
if (len(path) > 0) and (path[0] == '/'):
# absolute path
return path
if self._cwd == '/':
return self._cwd + path
return self._cwd + '/' + path | Return an adjusted path if we're emulating a "current working
directory" for the server. | entailment |
def register(self, obj, resource):
"""
Register a resource to be closed with an object is collected.
When the given C{obj} is garbage-collected by the python interpreter,
the C{resource} will be closed by having its C{close()} method called.
Any exceptions are ignored.
@param obj: the object to track
@type obj: object
@param resource: the resource to close when the object is collected
@type resource: object
"""
def callback(ref):
try:
resource.close()
except:
pass
del self._table[id(resource)]
# keep the weakref in a table so it sticks around long enough to get
# its callback called. :)
self._table[id(resource)] = weakref.ref(obj, callback) | Register a resource to be closed with an object is collected.
When the given C{obj} is garbage-collected by the python interpreter,
the C{resource} will be closed by having its C{close()} method called.
Any exceptions are ignored.
@param obj: the object to track
@type obj: object
@param resource: the resource to close when the object is collected
@type resource: object | entailment |
def _request(self, method, path, data=None, reestablish_session=True):
"""Perform HTTP request for REST API."""
if path.startswith("http"):
url = path # For cases where URL of different form is needed.
else:
url = self._format_path(path)
headers = {"Content-Type": "application/json"}
if self._user_agent:
headers['User-Agent'] = self._user_agent
body = json.dumps(data).encode("utf-8")
try:
response = requests.request(method, url, data=body, headers=headers,
cookies=self._cookies, **self._request_kwargs)
except requests.exceptions.RequestException as err:
# error outside scope of HTTP status codes
# e.g. unable to resolve domain name
raise PureError(err.message)
if response.status_code == 200:
if "application/json" in response.headers.get("Content-Type", ""):
if response.cookies:
self._cookies.update(response.cookies)
else:
self._cookies.clear()
content = response.json()
if isinstance(content, list):
content = ResponseList(content)
elif isinstance(content, dict):
content = ResponseDict(content)
content.headers = response.headers
return content
raise PureError("Response not in JSON: " + response.text)
elif response.status_code == 401 and reestablish_session:
self._start_session()
return self._request(method, path, data, False)
elif response.status_code == 450 and self._renegotiate_rest_version:
# Purity REST API version is incompatible.
old_version = self._rest_version
self._rest_version = self._choose_rest_version()
if old_version == self._rest_version:
# Got 450 error, but the rest version was supported
# Something really unexpected happened.
raise PureHTTPError(self._target, str(self._rest_version), response)
return self._request(method, path, data, reestablish_session)
else:
raise PureHTTPError(self._target, str(self._rest_version), response) | Perform HTTP request for REST API. | entailment |
def _check_rest_version(self, version):
"""Validate a REST API version is supported by the library and target array."""
version = str(version)
if version not in self.supported_rest_versions:
msg = "Library is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
array_rest_versions = self._list_available_rest_versions()
if version not in array_rest_versions:
msg = "Array is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
return LooseVersion(version) | Validate a REST API version is supported by the library and target array. | entailment |
def _choose_rest_version(self):
"""Return the newest REST API version supported by target array."""
versions = self._list_available_rest_versions()
versions = [LooseVersion(x) for x in versions if x in self.supported_rest_versions]
if versions:
return max(versions)
else:
raise PureError(
"Library is incompatible with all REST API versions supported"
"by the target array.") | Return the newest REST API version supported by target array. | entailment |
def _list_available_rest_versions(self):
"""Return a list of the REST API versions supported by the array"""
url = "https://{0}/api/api_version".format(self._target)
data = self._request("GET", url, reestablish_session=False)
return data["version"] | Return a list of the REST API versions supported by the array | entailment |
def _obtain_api_token(self, username, password):
"""Use username and password to obtain and return an API token."""
data = self._request("POST", "auth/apitoken",
{"username": username, "password": password},
reestablish_session=False)
return data["api_token"] | Use username and password to obtain and return an API token. | entailment |
def create_snapshots(self, volumes, **kwargs):
"""Create snapshots of the listed volumes.
:param volumes: List of names of the volumes to snapshot.
:type volumes: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the new snapshots.
:rtype: ResponseDict
"""
data = {"source": volumes, "snap": True}
data.update(kwargs)
return self._request("POST", "volume", data) | Create snapshots of the listed volumes.
:param volumes: List of names of the volumes to snapshot.
:type volumes: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the new snapshots.
:rtype: ResponseDict | entailment |
def create_volume(self, volume, size, **kwargs):
"""Create a volume and return a dictionary describing it.
:param volume: Name of the volume to be created.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the created volume.
:rtype: ResponseDict
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ======
"""
data = {"size": size}
data.update(kwargs)
return self._request("POST", "volume/{0}".format(volume), data) | Create a volume and return a dictionary describing it.
:param volume: Name of the volume to be created.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the created volume.
:rtype: ResponseDict
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ====== | entailment |
def extend_volume(self, volume, size):
"""Extend a volume to a new, larger size.
:param volume: Name of the volume to be extended.
:type volume: str
:type size: int or str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:returns: A dictionary mapping "name" to volume and "size" to the volume's
new size in bytes.
:rtype: ResponseDict
.. note::
The new size must be larger than the volume's old size.
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ======
"""
return self.set_volume(volume, size=size, truncate=False) | Extend a volume to a new, larger size.
:param volume: Name of the volume to be extended.
:type volume: str
:type size: int or str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:returns: A dictionary mapping "name" to volume and "size" to the volume's
new size in bytes.
:rtype: ResponseDict
.. note::
The new size must be larger than the volume's old size.
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ====== | entailment |
def truncate_volume(self, volume, size):
"""Truncate a volume to a new, smaller size.
:param volume: Name of the volume to truncate.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:returns: A dictionary mapping "name" to volume and "size" to the
volume's new size in bytes.
:rtype: ResponseDict
.. warnings also::
Data may be irretrievably lost in this operation.
.. note::
A snapshot of the volume in its previous state is taken and
immediately destroyed, but it is available for recovery for
the 24 hours following the truncation.
"""
return self.set_volume(volume, size=size, truncate=True) | Truncate a volume to a new, smaller size.
:param volume: Name of the volume to truncate.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:returns: A dictionary mapping "name" to volume and "size" to the
volume's new size in bytes.
:rtype: ResponseDict
.. warnings also::
Data may be irretrievably lost in this operation.
.. note::
A snapshot of the volume in its previous state is taken and
immediately destroyed, but it is available for recovery for
the 24 hours following the truncation. | entailment |
def connect_host(self, host, volume, **kwargs):
"""Create a connection between a host and a volume.
:param host: Name of host to connect to volume.
:type host: str
:param volume: Name of volume to connect to host.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST host/:host/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the host and volume.
:rtype: ResponseDict
"""
return self._request(
"POST", "host/{0}/volume/{1}".format(host, volume), kwargs) | Create a connection between a host and a volume.
:param host: Name of host to connect to volume.
:type host: str
:param volume: Name of volume to connect to host.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST host/:host/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the host and volume.
:rtype: ResponseDict | entailment |
def connect_hgroup(self, hgroup, volume, **kwargs):
"""Create a shared connection between a host group and a volume.
:param hgroup: Name of hgroup to connect to volume.
:type hgroup: str
:param volume: Name of volume to connect to hgroup.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST hgroup/:hgroup/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the hgroup and volume.
:rtype: ResponseDict
"""
return self._request(
"POST", "hgroup/{0}/volume/{1}".format(hgroup, volume), kwargs) | Create a shared connection between a host group and a volume.
:param hgroup: Name of hgroup to connect to volume.
:type hgroup: str
:param volume: Name of volume to connect to hgroup.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST hgroup/:hgroup/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the hgroup and volume.
:rtype: ResponseDict | entailment |
def get_offload(self, name, **kwargs):
"""Return a dictionary describing the connected offload target.
:param offload: Name of offload target to get information about.
:type offload: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET offload/::offload**
:type \*\*kwargs: optional
:returns: A dictionary describing the offload connection.
:rtype: ResponseDict
"""
# Unbox if a list to accommodate a bug in REST 1.14
result = self._request("GET", "offload/{0}".format(name), kwargs)
if isinstance(result, list):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result | Return a dictionary describing the connected offload target.
:param offload: Name of offload target to get information about.
:type offload: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET offload/::offload**
:type \*\*kwargs: optional
:returns: A dictionary describing the offload connection.
:rtype: ResponseDict | entailment |
def create_subnet(self, subnet, prefix, **kwargs):
"""Create a subnet.
:param subnet: Name of subnet to be created.
:type subnet: str
:param prefix: Routing prefix of subnet to be created.
:type prefix: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST subnet/:subnet**
:type \*\*kwargs: optional
:returns: A dictionary describing the created subnet.
:rtype: ResponseDict
.. note::
prefix should be specified as an IPv4 CIDR address.
("xxx.xxx.xxx.xxx/nn", representing prefix and prefix length)
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"prefix": prefix}
data.update(kwargs)
return self._request("POST", "subnet/{0}".format(subnet), data) | Create a subnet.
:param subnet: Name of subnet to be created.
:type subnet: str
:param prefix: Routing prefix of subnet to be created.
:type prefix: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST subnet/:subnet**
:type \*\*kwargs: optional
:returns: A dictionary describing the created subnet.
:rtype: ResponseDict
.. note::
prefix should be specified as an IPv4 CIDR address.
("xxx.xxx.xxx.xxx/nn", representing prefix and prefix length)
.. note::
Requires use of REST API 1.5 or later. | entailment |
def create_vlan_interface(self, interface, subnet, **kwargs):
"""Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
"""
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data) | Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later. | entailment |
def set_password(self, admin, new_password, old_password):
"""Set an admin's password.
:param admin: Name of admin whose password is to be set.
:type admin: str
:param new_password: New password for admin.
:type new_password: str
:param old_password: Current password of admin.
:type old_password: str
:returns: A dictionary mapping "name" to admin.
:rtype: ResponseDict
"""
return self.set_admin(admin, password=new_password,
old_password=old_password) | Set an admin's password.
:param admin: Name of admin whose password is to be set.
:type admin: str
:param new_password: New password for admin.
:type new_password: str
:param old_password: Current password of admin.
:type old_password: str
:returns: A dictionary mapping "name" to admin.
:rtype: ResponseDict | entailment |
def disable_directory_service(self, check_peer=False):
"""Disable the directory service.
:param check_peer: If True, disables server authenticity
enforcement. If False, disables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
"""
if check_peer:
return self.set_directory_service(check_peer=False)
return self.set_directory_service(enabled=False) | Disable the directory service.
:param check_peer: If True, disables server authenticity
enforcement. If False, disables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict | entailment |
def enable_directory_service(self, check_peer=False):
"""Enable the directory service.
:param check_peer: If True, enables server authenticity
enforcement. If False, enables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
"""
if check_peer:
return self.set_directory_service(check_peer=True)
return self.set_directory_service(enabled=True) | Enable the directory service.
:param check_peer: If True, enables server authenticity
enforcement. If False, enables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict | entailment |
def create_snmp_manager(self, manager, host, **kwargs):
"""Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
"""
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data) | Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict | entailment |
def connect_array(self, address, connection_key, connection_type, **kwargs):
"""Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"management_address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data) | Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later. | entailment |
def create_pgroup_snapshot(self, source, **kwargs):
"""Create snapshot of pgroup from specified source.
:param source: Name of pgroup of which to take snapshot.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created snapshot.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
# In REST 1.4, support was added for snapshotting multiple pgroups. As a
# result, the endpoint response changed from an object to an array of
# objects. To keep the response type consistent between REST versions,
# we unbox the response when creating a single snapshot.
result = self.create_pgroup_snapshots([source], **kwargs)
if self._rest_version >= LooseVersion("1.4"):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result | Create snapshot of pgroup from specified source.
:param source: Name of pgroup of which to take snapshot.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created snapshot.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later. | entailment |
def send_pgroup_snapshot(self, source, **kwargs):
""" Send an existing pgroup snapshot to target(s)
:param source: Name of pgroup snapshot to send.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the sent snapshots.
:rtype: ResponseList
.. note::
Requires use of REST API 1.16 or later.
"""
data = {"name": [source], "action":"send"}
data.update(kwargs)
return self._request("POST", "pgroup", data) | Send an existing pgroup snapshot to target(s)
:param source: Name of pgroup snapshot to send.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the sent snapshots.
:rtype: ResponseList
.. note::
Requires use of REST API 1.16 or later. | entailment |
def create_pgroup_snapshots(self, sources, **kwargs):
"""Create snapshots of pgroups from specified sources.
:param sources: Names of pgroups of which to take snapshots.
:type sources: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the created snapshots.
:rtype: ResponseList
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"source": sources, "snap": True}
data.update(kwargs)
return self._request("POST", "pgroup", data) | Create snapshots of pgroups from specified sources.
:param sources: Names of pgroups of which to take snapshots.
:type sources: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the created snapshots.
:rtype: ResponseList
.. note::
Requires use of REST API 1.2 or later. | entailment |
def eradicate_pgroup(self, pgroup, **kwargs):
"""Eradicate a destroyed pgroup.
:param pgroup: Name of pgroup to be eradicated.
:type pgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**DELETE pgroup/:pgroup**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
"""
eradicate = {"eradicate": True}
eradicate.update(kwargs)
return self._request("DELETE", "pgroup/{0}".format(pgroup), eradicate) | Eradicate a destroyed pgroup.
:param pgroup: Name of pgroup to be eradicated.
:type pgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**DELETE pgroup/:pgroup**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later. | entailment |
def clone_pod(self, source, dest, **kwargs):
"""Clone an existing pod to a new one.
:param source: Name of the pod the be cloned.
:type source: str
:param dest: Name of the target pod to clone into
:type dest: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pod/:pod**
:type \*\*kwargs: optional
:returns: A dictionary describing the created pod
:rtype: ResponseDict
.. note::
Requires use of REST API 1.13 or later.
"""
data = {"source": source}
data.update(kwargs)
return self._request("POST", "pod/{0}".format(dest), data) | Clone an existing pod to a new one.
:param source: Name of the pod the be cloned.
:type source: str
:param dest: Name of the target pod to clone into
:type dest: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pod/:pod**
:type \*\*kwargs: optional
:returns: A dictionary describing the created pod
:rtype: ResponseDict
.. note::
Requires use of REST API 1.13 or later. | entailment |
def remove_pod(self, pod, array, **kwargs):
"""Remove arrays from a pod.
:param pod: Name of the pod.
:type pod: str
:param array: Array to remove from pod.
:type array: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**DELETE pod/:pod**/array/:array**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to pod and "array" to the pod's
new array list.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.13 or later.
"""
return self._request("DELETE", "pod/{0}/array/{1}".format(pod, array), kwargs) | Remove arrays from a pod.
:param pod: Name of the pod.
:type pod: str
:param array: Array to remove from pod.
:type array: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**DELETE pod/:pod**/array/:array**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to pod and "array" to the pod's
new array list.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.13 or later. | entailment |
def get_certificate(self, **kwargs):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
"""
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET",
"cert/{0}".format(kwargs.pop('name', 'management')), kwargs)
else:
return self._request("GET", "cert", kwargs) | Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later. | entailment |
def list_certificates(self):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing all configured certificates.
:rtype: ResponseList
.. note::
Requires use of REST API 1.12 or later.
"""
# This call takes no parameters.
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET", "cert")
else:
# If someone tries to call this against a too-early api version,
# do the best we can to provide expected behavior.
cert = self._request("GET", "cert")
out = ResponseList([cert])
out.headers = cert.headers
return out | Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing all configured certificates.
:rtype: ResponseList
.. note::
Requires use of REST API 1.12 or later. | entailment |
def get_certificate_signing_request(self, **kwargs):
"""Construct a certificate signing request (CSR) for signing by a
certificate authority (CA).
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert/certificate_signing_request**
:type \*\*kwargs: optional
:returns: A dictionary mapping "certificate_signing_request" to the CSR.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
In version 1.12, purecert was expanded to allow manipulation
of multiple certificates, by name. To preserve backwards compatibility,
the default name, if none is specified, for this version is 'management'
which acts on the certificate previously managed by this command.
"""
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET",
"cert/certificate_signing_request/{0}".format(
kwargs.pop('name', 'management')), kwargs)
else:
return self._request("GET", "cert/certificate_signing_request", kwargs) | Construct a certificate signing request (CSR) for signing by a
certificate authority (CA).
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert/certificate_signing_request**
:type \*\*kwargs: optional
:returns: A dictionary mapping "certificate_signing_request" to the CSR.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
In version 1.12, purecert was expanded to allow manipulation
of multiple certificates, by name. To preserve backwards compatibility,
the default name, if none is specified, for this version is 'management'
which acts on the certificate previously managed by this command. | entailment |
def set_certificate(self, **kwargs):
"""Modify an existing certificate, creating a new self signed one
or importing a certificate signed by a certificate authority (CA).
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
In version 1.12, purecert was expanded to allow manipulation
of multiple certificates, by name. To preserve backwards compatibility,
the default name, if none is specified, for this version is 'management'
which acts on the certificate previously managed by this command.
"""
if self._rest_version >= LooseVersion("1.12"):
return self._request("PUT",
"cert/{0}".format(kwargs.pop('name', 'management')), kwargs)
else:
return self._request("PUT", "cert", kwargs) | Modify an existing certificate, creating a new self signed one
or importing a certificate signed by a certificate authority (CA).
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**PUT cert**
:type \*\*kwargs: optional
:returns: A dictionary describing the configured array certificate.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.3 or later.
In version 1.12, purecert was expanded to allow manipulation
of multiple certificates, by name. To preserve backwards compatibility,
the default name, if none is specified, for this version is 'management'
which acts on the certificate previously managed by this command. | entailment |
def page_through(page_size, function, *args, **kwargs):
"""Return an iterator over all pages of a REST operation.
:param page_size: Number of elements to retrieve per call.
:param function: FlashArray function that accepts limit as an argument.
:param \*args: Positional arguments to be passed to function.
:param \*\*kwargs: Keyword arguments to be passed to function.
:returns: An iterator of tuples containing a page of results for the
function(\*args, \*\*kwargs) and None, or None and a PureError
if a call to retrieve a page fails.
:rtype: iterator
.. note::
Requires use of REST API 1.7 or later.
Only works with functions that accept limit as an argument.
Iterator will retrieve page_size elements per call
Iterator will yield None and an error if a call fails. The next
call will repeat the same call, unless the caller sends in an
alternate page token.
"""
kwargs["limit"] = page_size
def get_page(token):
page_kwargs = kwargs.copy()
if token:
page_kwargs["token"] = token
return function(*args, **page_kwargs)
def page_generator():
token = None
while True:
try:
response = get_page(token)
token = response.headers.get("x-next-token")
except PureError as err:
yield None, err
else:
if response:
sent_token = yield response, None
if sent_token is not None:
token = sent_token
else:
return
return page_generator() | Return an iterator over all pages of a REST operation.
:param page_size: Number of elements to retrieve per call.
:param function: FlashArray function that accepts limit as an argument.
:param \*args: Positional arguments to be passed to function.
:param \*\*kwargs: Keyword arguments to be passed to function.
:returns: An iterator of tuples containing a page of results for the
function(\*args, \*\*kwargs) and None, or None and a PureError
if a call to retrieve a page fails.
:rtype: iterator
.. note::
Requires use of REST API 1.7 or later.
Only works with functions that accept limit as an argument.
Iterator will retrieve page_size elements per call
Iterator will yield None and an error if a call fails. The next
call will repeat the same call, unless the caller sends in an
alternate page token. | entailment |
def set_file_attr(filename, attr):
"""
Change a file's attributes on the local filesystem. The contents of
C{attr} are used to change the permissions, owner, group ownership,
and/or modification & access time of the file, depending on which
attributes are present in C{attr}.
This is meant to be a handy helper function for translating SFTP file
requests into local file operations.
@param filename: name of the file to alter (should usually be an
absolute path).
@type filename: str
@param attr: attributes to change.
@type attr: L{SFTPAttributes}
"""
if sys.platform != 'win32':
# mode operations are meaningless on win32
if attr._flags & attr.FLAG_PERMISSIONS:
os.chmod(filename, attr.st_mode)
if attr._flags & attr.FLAG_UIDGID:
os.chown(filename, attr.st_uid, attr.st_gid)
if attr._flags & attr.FLAG_AMTIME:
os.utime(filename, (attr.st_atime, attr.st_mtime))
if attr._flags & attr.FLAG_SIZE:
open(filename, 'w+').truncate(attr.st_size) | Change a file's attributes on the local filesystem. The contents of
C{attr} are used to change the permissions, owner, group ownership,
and/or modification & access time of the file, depending on which
attributes are present in C{attr}.
This is meant to be a handy helper function for translating SFTP file
requests into local file operations.
@param filename: name of the file to alter (should usually be an
absolute path).
@type filename: str
@param attr: attributes to change.
@type attr: L{SFTPAttributes} | entailment |
def read_all(self, n, check_rekey=False):
"""
Read as close to N bytes as possible, blocking as long as necessary.
@param n: number of bytes to read
@type n: int
@return: the data read
@rtype: str
@raise EOFError: if the socket was closed before all the bytes could
be read
"""
out = ''
# handle over-reading from reading the banner line
if len(self.__remainder) > 0:
out = self.__remainder[:n]
self.__remainder = self.__remainder[n:]
n -= len(out)
if PY22:
return self._py22_read_all(n, out)
while n > 0:
got_timeout = False
try:
x = self.__socket.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
except socket.timeout:
got_timeout = True
except socket.error, e:
# on Linux, sometimes instead of socket.timeout, we get
# EAGAIN. this is a bug in recent (> 2.6.9) kernels but
# we need to work around it.
if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
got_timeout = True
elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
# syscall interrupted; try again
pass
elif self.__closed:
raise EOFError()
else:
raise
if got_timeout:
if self.__closed:
raise EOFError()
if check_rekey and (len(out) == 0) and self.__need_rekey:
raise NeedRekeyException()
self._check_keepalive()
return out | Read as close to N bytes as possible, blocking as long as necessary.
@param n: number of bytes to read
@type n: int
@return: the data read
@rtype: str
@raise EOFError: if the socket was closed before all the bytes could
be read | entailment |
def readline(self, timeout):
"""
Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads.
"""
buf = self.__remainder
while not '\n' in buf:
buf += self._read_timeout(timeout)
n = buf.index('\n')
self.__remainder = buf[n+1:]
buf = buf[:n]
if (len(buf) > 0) and (buf[-1] == '\r'):
buf = buf[:-1]
return buf | Read a line from the socket. We assume no data is pending after the
line, so it's okay to attempt large reads. | entailment |
def send_message(self, data):
"""
Write a block of data using the current cipher, as an SSH block.
"""
# encrypt this sucka
data = str(data)
cmd = ord(data[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
orig_len = len(data)
self.__write_lock.acquire()
try:
if self.__compress_engine_out is not None:
data = self.__compress_engine_out(data)
packet = self._build_packet(data)
if self.__dump_packets:
self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len))
self._log(DEBUG, util.format_binary(packet, 'OUT: '))
if self.__block_engine_out != None:
out = self.__block_engine_out.encrypt(packet)
else:
out = packet
# + mac
if self.__block_engine_out != None:
payload = struct.pack('>I', self.__sequence_number_out) + packet
out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out]
self.__sequence_number_out = (self.__sequence_number_out + 1) & 0xffffffffL
self.write_all(out)
self.__sent_bytes += len(out)
self.__sent_packets += 1
if ((self.__sent_packets >= self.REKEY_PACKETS) or (self.__sent_bytes >= self.REKEY_BYTES)) \
and not self.__need_rekey:
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' %
(self.__sent_packets, self.__sent_bytes))
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
finally:
self.__write_lock.release() | Write a block of data using the current cipher, as an SSH block. | entailment |
def read_message(self):
"""
Only one thread should ever be in this function (no other locking is
done).
@raise SSHException: if the packet is mangled
@raise NeedRekeyException: if the transport should rekey
"""
header = self.read_all(self.__block_size_in, check_rekey=True)
if self.__block_engine_in != None:
header = self.__block_engine_in.decrypt(header)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(header, 'IN: '));
packet_size = struct.unpack('>I', header[:4])[0]
# leftover contains decrypted bytes from the first block (after the length field)
leftover = header[4:]
if (packet_size - len(leftover)) % self.__block_size_in != 0:
raise SSHException('Invalid packet blocking')
buf = self.read_all(packet_size + self.__mac_size_in - len(leftover))
packet = buf[:packet_size - len(leftover)]
post_packet = buf[packet_size - len(leftover):]
if self.__block_engine_in != None:
packet = self.__block_engine_in.decrypt(packet)
if self.__dump_packets:
self._log(DEBUG, util.format_binary(packet, 'IN: '));
packet = leftover + packet
if self.__mac_size_in > 0:
mac = post_packet[:self.__mac_size_in]
mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet
my_mac = compute_hmac(self.__mac_key_in, mac_payload, self.__mac_engine_in)[:self.__mac_size_in]
if my_mac != mac:
raise SSHException('Mismatched MAC')
padding = ord(packet[0])
payload = packet[1:packet_size - padding]
if self.__dump_packets:
self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding))
if self.__compress_engine_in is not None:
payload = self.__compress_engine_in(payload)
msg = Message(payload[1:])
msg.seqno = self.__sequence_number_in
self.__sequence_number_in = (self.__sequence_number_in + 1) & 0xffffffffL
# check for rekey
raw_packet_size = packet_size + self.__mac_size_in + 4
self.__received_bytes += raw_packet_size
self.__received_packets += 1
if self.__need_rekey:
# we've asked to rekey -- give them some packets to comply before
# dropping the connection
self.__received_bytes_overflow += raw_packet_size
self.__received_packets_overflow += 1
if (self.__received_packets_overflow >= self.REKEY_PACKETS_OVERFLOW_MAX) or \
(self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX):
raise SSHException('Remote transport is ignoring rekey requests')
elif (self.__received_packets >= self.REKEY_PACKETS) or \
(self.__received_bytes >= self.REKEY_BYTES):
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes received)' %
(self.__received_packets, self.__received_bytes))
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
cmd = ord(payload[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
if self.__dump_packets:
self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload)))
return cmd, msg | Only one thread should ever be in this function (no other locking is
done).
@raise SSHException: if the packet is mangled
@raise NeedRekeyException: if the transport should rekey | entailment |
def make_tarball(base_name, base_dir, compress='gzip',
verbose=False, dry_run=False):
"""Create a tar file from all the files under 'base_dir'.
This file may be compressed.
:param compress: Compression algorithms. Supported algorithms are:
'gzip': (the default)
'compress'
'bzip2'
None
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
"""
# XXX GNU tar 1.13 has a nifty option to add a prefix directory.
# It's pretty new, though, so we certainly can't require it --
# but it would be nice to take advantage of it to skip the
# "create a tree of hardlinks" step! (Would also be nice to
# detect GNU tar to use its 'z' option and save a step.)
compress_ext = { 'gzip': ".gz",
'bzip2': '.bz2',
'compress': ".Z" }
# flags for compression program, each element of list will be an argument
tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'}
compress_flags = {'compress': ["-f"]}
if compress is not None and compress not in compress_ext.keys():
raise ValueError("bad value for 'compress': must be None, 'gzip',"
"'bzip2' or 'compress'")
archive_name = base_name + ".tar"
if compress and compress in tarfile_compress_flag:
archive_name += compress_ext[compress]
mode = 'w:' + tarfile_compress_flag.get(compress, '')
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
log.info('Creating tar file %s with mode %s' % (archive_name, mode))
if not dry_run:
tar = tarfile.open(archive_name, mode=mode)
# This recursively adds everything underneath base_dir
tar.add(base_dir)
tar.close()
if compress and compress not in tarfile_compress_flag:
spawn([compress] + compress_flags[compress] + [archive_name],
dry_run=dry_run)
return archive_name + compress_ext[compress]
else:
return archive_name | Create a tar file from all the files under 'base_dir'.
This file may be compressed.
:param compress: Compression algorithms. Supported algorithms are:
'gzip': (the default)
'compress'
'bzip2'
None
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename. | entailment |
def auth_interactive(self, username, handler, event, submethods=''):
"""
response_list = handler(title, instructions, prompt_list)
"""
self.transport.lock.acquire()
try:
self.auth_event = event
self.auth_method = 'keyboard-interactive'
self.username = username
self.interactive_handler = handler
self.submethods = submethods
self._request_auth()
finally:
self.transport.lock.release() | response_list = handler(title, instructions, prompt_list) | entailment |
def from_line(cls, line):
"""
Parses the given line of text to find the names for the host,
the type of key, and the key data. The line is expected to be in the
format used by the openssh known_hosts file.
Lines are expected to not have leading or trailing whitespace.
We don't bother to check for comments or empty lines. All of
that should be taken care of before sending the line to us.
@param line: a line from an OpenSSH known_hosts file
@type line: str
"""
fields = line.split(' ')
if len(fields) < 3:
# Bad number of fields
return None
fields = fields[:3]
names, keytype, key = fields
names = names.split(',')
# Decide what kind of key we're looking at and create an object
# to hold it accordingly.
try:
if keytype == 'ssh-rsa':
key = RSAKey(data=base64.decodestring(key))
elif keytype == 'ssh-dss':
key = DSSKey(data=base64.decodestring(key))
else:
return None
except binascii.Error, e:
raise InvalidHostKey(line, e)
return cls(names, key) | Parses the given line of text to find the names for the host,
the type of key, and the key data. The line is expected to be in the
format used by the openssh known_hosts file.
Lines are expected to not have leading or trailing whitespace.
We don't bother to check for comments or empty lines. All of
that should be taken care of before sending the line to us.
@param line: a line from an OpenSSH known_hosts file
@type line: str | entailment |
def to_line(self):
"""
Returns a string in OpenSSH known_hosts file format, or None if
the object is not in a valid state. A trailing newline is
included.
"""
if self.valid:
return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(),
self.key.get_base64())
return None | Returns a string in OpenSSH known_hosts file format, or None if
the object is not in a valid state. A trailing newline is
included. | entailment |
def load(self, filename):
"""
Read a file of known SSH host keys, in the format used by openssh.
This type of file unfortunately doesn't exist on Windows, but on
posix, it will usually be stored in
C{os.path.expanduser("~/.ssh/known_hosts")}.
If this method is called multiple times, the host keys are merged,
not cleared. So multiple calls to C{load} will just call L{add},
replacing any existing entries and adding new ones.
@param filename: name of the file to read host keys from
@type filename: str
@raise IOError: if there was an error reading the file
"""
f = open(filename, 'r')
for line in f:
line = line.strip()
if (len(line) == 0) or (line[0] == '#'):
continue
e = HostKeyEntry.from_line(line)
if e is not None:
self._entries.append(e)
f.close() | Read a file of known SSH host keys, in the format used by openssh.
This type of file unfortunately doesn't exist on Windows, but on
posix, it will usually be stored in
C{os.path.expanduser("~/.ssh/known_hosts")}.
If this method is called multiple times, the host keys are merged,
not cleared. So multiple calls to C{load} will just call L{add},
replacing any existing entries and adding new ones.
@param filename: name of the file to read host keys from
@type filename: str
@raise IOError: if there was an error reading the file | entailment |
def save(self, filename):
"""
Save host keys into a file, in the format used by openssh. The order of
keys in the file will be preserved when possible (if these keys were
loaded from a file originally). The single exception is that combined
lines will be split into individual key lines, which is arguably a bug.
@param filename: name of the file to write
@type filename: str
@raise IOError: if there was an error writing the file
@since: 1.6.1
"""
f = open(filename, 'w')
for e in self._entries:
line = e.to_line()
if line:
f.write(line)
f.close() | Save host keys into a file, in the format used by openssh. The order of
keys in the file will be preserved when possible (if these keys were
loaded from a file originally). The single exception is that combined
lines will be split into individual key lines, which is arguably a bug.
@param filename: name of the file to write
@type filename: str
@raise IOError: if there was an error writing the file
@since: 1.6.1 | entailment |
def lookup(self, hostname):
"""
Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey})
"""
class SubDict (UserDict.DictMixin):
def __init__(self, hostname, entries, hostkeys):
self._hostname = hostname
self._entries = entries
self._hostkeys = hostkeys
def __getitem__(self, key):
for e in self._entries:
if e.key.get_name() == key:
return e.key
raise KeyError(key)
def __setitem__(self, key, val):
for e in self._entries:
if e.key is None:
continue
if e.key.get_name() == key:
# replace
e.key = val
break
else:
# add a new one
e = HostKeyEntry([hostname], val)
self._entries.append(e)
self._hostkeys._entries.append(e)
def keys(self):
return [e.key.get_name() for e in self._entries if e.key is not None]
entries = []
for e in self._entries:
for h in e.hostnames:
if (h.startswith('|1|') and (self.hash_host(hostname, h) == h)) or (h == hostname):
entries.append(e)
if len(entries) == 0:
return None
return SubDict(hostname, entries, self) | Find a hostkey entry for a given hostname or IP. If no entry is found,
C{None} is returned. Otherwise a dictionary of keytype to key is
returned. The keytype will be either C{"ssh-rsa"} or C{"ssh-dss"}.
@param hostname: the hostname (or IP) to lookup
@type hostname: str
@return: keys associated with this host (or C{None})
@rtype: dict(str, L{PKey}) | entailment |
def check(self, hostname, key):
"""
Return True if the given key is associated with the given hostname
in this dictionary.
@param hostname: hostname (or IP) of the SSH server
@type hostname: str
@param key: the key to check
@type key: L{PKey}
@return: C{True} if the key is associated with the hostname; C{False}
if not
@rtype: bool
"""
k = self.lookup(hostname)
if k is None:
return False
host_key = k.get(key.get_name(), None)
if host_key is None:
return False
return str(host_key) == str(key) | Return True if the given key is associated with the given hostname
in this dictionary.
@param hostname: hostname (or IP) of the SSH server
@type hostname: str
@param key: the key to check
@type key: L{PKey}
@return: C{True} if the key is associated with the hostname; C{False}
if not
@rtype: bool | entailment |
def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by openssh when storing
hashed hostnames in the known_hosts file.
@param hostname: the hostname to hash
@type hostname: str
@param salt: optional salt to use when hashing (must be 20 bytes long)
@type salt: str
@return: the hashed hostname
@rtype: str
"""
if salt is None:
salt = rng.read(SHA.digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = base64.decodestring(salt)
assert len(salt) == SHA.digest_size
hmac = HMAC.HMAC(salt, hostname, SHA).digest()
hostkey = '|1|%s|%s' % (base64.encodestring(salt), base64.encodestring(hmac))
return hostkey.replace('\n', '') | Return a "hashed" form of the hostname, as used by openssh when storing
hashed hostnames in the known_hosts file.
@param hostname: the hostname to hash
@type hostname: str
@param salt: optional salt to use when hashing (must be 20 bytes long)
@type salt: str
@return: the hashed hostname
@rtype: str | entailment |
def inflate_long(s, always_positive=False):
"turns a normalized byte string into a long-int (adapted from Crypto.Util.number)"
out = 0L
negative = 0
if not always_positive and (len(s) > 0) and (ord(s[0]) >= 0x80):
negative = 1
if len(s) % 4:
filler = '\x00'
if negative:
filler = '\xff'
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
out = (out << 32) + struct.unpack('>I', s[i:i+4])[0]
if negative:
out -= (1L << (8 * len(s)))
return out | turns a normalized byte string into a long-int (adapted from Crypto.Util.number) | entailment |
def deflate_long(n, add_sign_padding=True):
"turns a long-int into a normalized byte string (adapted from Crypto.Util.number)"
# after much testing, this algorithm was deemed to be the fastest
s = ''
n = long(n)
while (n != 0) and (n != -1):
s = struct.pack('>I', n & 0xffffffffL) + s
n = n >> 32
# strip off leading zeros, FFs
for i in enumerate(s):
if (n == 0) and (i[1] != '\000'):
break
if (n == -1) and (i[1] != '\xff'):
break
else:
# degenerate case, n was either 0 or -1
i = (0,)
if n == 0:
s = '\000'
else:
s = '\xff'
s = s[i[0]:]
if add_sign_padding:
if (n == 0) and (ord(s[0]) >= 0x80):
s = '\x00' + s
if (n == -1) and (ord(s[0]) < 0x80):
s = '\xff' + s
return s | turns a long-int into a normalized byte string (adapted from Crypto.Util.number) | entailment |
def generate_key_bytes(hashclass, salt, key, nbytes):
"""
Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
@param hashclass: class from L{Crypto.Hash} that can be used as a secure
hashing function (like C{MD5} or C{SHA}).
@type hashclass: L{Crypto.Hash}
@param salt: data to salt the hash with.
@type salt: string
@param key: human-entered password or passphrase.
@type key: string
@param nbytes: number of bytes to generate.
@type nbytes: int
@return: key data
@rtype: string
"""
keydata = ''
digest = ''
if len(salt) > 8:
salt = salt[:8]
while nbytes > 0:
hash_obj = hashclass.new()
if len(digest) > 0:
hash_obj.update(digest)
hash_obj.update(key)
hash_obj.update(salt)
digest = hash_obj.digest()
size = min(nbytes, len(digest))
keydata += digest[:size]
nbytes -= size
return keydata | Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
@param hashclass: class from L{Crypto.Hash} that can be used as a secure
hashing function (like C{MD5} or C{SHA}).
@type hashclass: L{Crypto.Hash}
@param salt: data to salt the hash with.
@type salt: string
@param key: human-entered password or passphrase.
@type key: string
@param nbytes: number of bytes to generate.
@type nbytes: int
@return: key data
@rtype: string | entailment |
def retry_on_signal(function):
"""Retries function until it doesn't raise an EINTR error"""
while True:
try:
return function()
except EnvironmentError, e:
if e.errno != errno.EINTR:
raise | Retries function until it doesn't raise an EINTR error | entailment |
def generate(bits, progress_func=None):
"""
Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
@param bits: number of bits the generated key should be.
@type bits: int
@param progress_func: an optional function to call at key points in
key generation (used by C{pyCrypto.PublicKey}).
@type progress_func: function
@return: new private key
@rtype: L{RSAKey}
"""
rsa = RSA.generate(bits, rng.read, progress_func)
key = RSAKey(vals=(rsa.e, rsa.n))
key.d = rsa.d
key.p = rsa.p
key.q = rsa.q
return key | Generate a new private RSA key. This factory function can be used to
generate a new host key or authentication key.
@param bits: number of bits the generated key should be.
@type bits: int
@param progress_func: an optional function to call at key points in
key generation (used by C{pyCrypto.PublicKey}).
@type progress_func: function
@return: new private key
@rtype: L{RSAKey} | entailment |
def _pkcs1imify(self, data):
"""
turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre.
"""
SHA1_DIGESTINFO = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
size = len(util.deflate_long(self.n, 0))
filler = '\xff' * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
return '\x00\x01' + filler + '\x00' + SHA1_DIGESTINFO + data | turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre. | entailment |
def generate(bits=1024, progress_func=None):
"""
Generate a new private DSS key. This factory function can be used to
generate a new host key or authentication key.
@param bits: number of bits the generated key should be.
@type bits: int
@param progress_func: an optional function to call at key points in
key generation (used by C{pyCrypto.PublicKey}).
@type progress_func: function
@return: new private key
@rtype: L{DSSKey}
"""
dsa = DSA.generate(bits, rng.read, progress_func)
key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y))
key.x = dsa.x
return key | Generate a new private DSS key. This factory function can be used to
generate a new host key or authentication key.
@param bits: number of bits the generated key should be.
@type bits: int
@param progress_func: an optional function to call at key points in
key generation (used by C{pyCrypto.PublicKey}).
@type progress_func: function
@return: new private key
@rtype: L{DSSKey} | entailment |
def parse(self, file_obj):
"""
Read an OpenSSH config from the given file object.
@param file_obj: a file-like object to read the config file from
@type file_obj: file
"""
configs = [self._config[0]]
for line in file_obj:
line = line.rstrip('\n').lstrip()
if (line == '') or (line[0] == '#'):
continue
if '=' in line:
key, value = line.split('=', 1)
key = key.strip().lower()
else:
# find first whitespace, and split there
i = 0
while (i < len(line)) and not line[i].isspace():
i += 1
if i == len(line):
raise Exception('Unparsable line: %r' % line)
key = line[:i].lower()
value = line[i:].lstrip()
if key == 'host':
del configs[:]
# the value may be multiple hosts, space-delimited
for host in value.split():
# do we have a pre-existing host config to append to?
matches = [c for c in self._config if c['host'] == host]
if len(matches) > 0:
configs.append(matches[0])
else:
config = { 'host': host }
self._config.append(config)
configs.append(config)
else:
for config in configs:
config[key] = value | Read an OpenSSH config from the given file object.
@param file_obj: a file-like object to read the config file from
@type file_obj: file | entailment |
def lookup(self, hostname):
"""
Return a dict of config options for a given hostname.
The host-matching rules of OpenSSH's C{ssh_config} man page are used,
which means that all configuration options from matching host
specifications are merged, with more specific hostmasks taking
precedence. In other words, if C{"Port"} is set under C{"Host *"}
and also C{"Host *.example.com"}, and the lookup is for
C{"ssh.example.com"}, then the port entry for C{"Host *.example.com"}
will win out.
The keys in the returned dict are all normalized to lowercase (look for
C{"port"}, not C{"Port"}. No other processing is done to the keys or
values.
@param hostname: the hostname to lookup
@type hostname: str
"""
matches = [x for x in self._config if fnmatch.fnmatch(hostname, x['host'])]
# Move * to the end
_star = matches.pop(0)
matches.append(_star)
ret = {}
for m in matches:
for k,v in m.iteritems():
if not k in ret:
ret[k] = v
ret = self._expand_variables(ret, hostname)
del ret['host']
return ret | Return a dict of config options for a given hostname.
The host-matching rules of OpenSSH's C{ssh_config} man page are used,
which means that all configuration options from matching host
specifications are merged, with more specific hostmasks taking
precedence. In other words, if C{"Port"} is set under C{"Host *"}
and also C{"Host *.example.com"}, and the lookup is for
C{"ssh.example.com"}, then the port entry for C{"Host *.example.com"}
will win out.
The keys in the returned dict are all normalized to lowercase (look for
C{"port"}, not C{"Port"}. No other processing is done to the keys or
values.
@param hostname: the hostname to lookup
@type hostname: str | entailment |
def _expand_variables(self, config, hostname ):
"""
Return a dict of config options with expanded substitutions
for a given hostname.
Please refer to man ssh_config(5) for the parameters that
are replaced.
@param config: the config for the hostname
@type hostname: dict
@param hostname: the hostname that the config belongs to
@type hostname: str
"""
if 'hostname' in config:
config['hostname'] = config['hostname'].replace('%h',hostname)
else:
config['hostname'] = hostname
if 'port' in config:
port = config['port']
else:
port = SSH_PORT
user = os.getenv('USER')
if 'user' in config:
remoteuser = config['user']
else:
remoteuser = user
host = socket.gethostname().split('.')[0]
fqdn = socket.getfqdn()
homedir = os.path.expanduser('~')
replacements = {'controlpath' :
[
('%h', config['hostname']),
('%l', fqdn),
('%L', host),
('%n', hostname),
('%p', port),
('%r', remoteuser),
('%u', user)
],
'identityfile' :
[
('~', homedir),
('%d', homedir),
('%h', config['hostname']),
('%l', fqdn),
('%u', user),
('%r', remoteuser)
]
}
for k in config:
if k in replacements:
for find, replace in replacements[k]:
config[k] = config[k].replace(find, str(replace))
return config | Return a dict of config options with expanded substitutions
for a given hostname.
Please refer to man ssh_config(5) for the parameters that
are replaced.
@param config: the config for the hostname
@type hostname: dict
@param hostname: the hostname that the config belongs to
@type hostname: str | entailment |
def url(proto, server, port=None, uri=None):
"""Construct a URL from the given components."""
url_parts = [proto, '://', server]
if port:
port = int(port)
if port < 1 or port > 65535:
raise ValueError('invalid port value')
if not ((proto == 'http' and port == 80) or
(proto == 'https' and port == 443)):
url_parts.append(':')
url_parts.append(str(port))
if uri:
url_parts.append('/')
url_parts.append(requests.utils.quote(uri.strip('/')))
url_parts.append('/')
return ''.join(url_parts) | Construct a URL from the given components. | entailment |
def make_url(self, container=None, resource=None, query_items=None):
"""Create a URL from the specified parts."""
pth = [self._base_url]
if container:
pth.append(container.strip('/'))
if resource:
pth.append(resource)
else:
pth.append('')
url = '/'.join(pth)
if isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
p = requests.PreparedRequest()
p.prepare_url(url, query_items)
return p.url | Create a URL from the specified parts. | entailment |
def head_request(self, container, resource=None):
"""Send a HEAD request."""
url = self.make_url(container, resource)
headers = self._make_headers(None)
try:
rsp = requests.head(url, headers=self._base_headers,
verify=self._verify, timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
if self._dbg_print:
self.__print_req('HEAD', rsp.url, headers, None)
return rsp.status_code | Send a HEAD request. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.