Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def wait_for_datacenter(client, data_center_id):
'''
Poll the data center to become available (for the next provisionig job)
'''
total_sleep_time = 0
seconds = 5
while True:
state = client.get_datacenter(data_center_id)['metadata']['state']
if verbose:
print("datacenter is {}".format(state))
if state == "AVAILABLE":
break
time.sleep(seconds)
total_sleep_time += seconds
if total_sleep_time == 60:
# Increase polling interval after one minute
seconds = 10
elif total_sleep_time == 600:
# Increase polling interval after 10 minutes
seconds = 20 |
def main(argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version,
program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by J. Buchhammer on %s.
Copyright 2016 ProfitBricks GmbH. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-u', '--user', dest='user', help='the login name')
parser.add_argument('-p', '--password', dest='password',
help='the login password')
parser.add_argument('-L', '--Login', dest='loginfile', default=None,
help='the login file to use')
parser.add_argument('-d', '--datacenterid', dest='dc_id',
required=True, default=None,
help='datacenter of the server')
parser.add_argument('-s', '--serverid', dest='serverid', default=None,
help='ID of the server')
parser.add_argument('-n', '--name', dest='servername', default=None,
help='name of the server')
parser.add_argument('-C', '--command', dest='command', default=None,
help='remote shell command to use for shutdown')
parser.add_argument('-v', '--verbose', dest="verbose", action="count",
help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version',
version=program_version_message)
# Process arguments
args = parser.parse_args()
global verbose
verbose = args.verbose
dc_id = args.dc_id
if verbose > 0:
print("Verbose mode on")
if args.serverid is None and args.servername is None:
parser.error("one of 'serverid' or 'name' must be specified")
(user, password) = getLogin(args.loginfile, args.user, args.password)
if user is None or password is None:
raise ValueError("user or password resolved to None")
pbclient = ProfitBricksService(user, password)
server = getServerStates(pbclient, dc_id, args.serverid,
args.servername)
if server is None:
raise Exception(1, "specified server not found")
print("using server {}(id={}) in state {}, {}"
.format(server['name'], server['id'], server['state'],
server['vmstate']))
# ! stop/start/reboot_server() simply return 'True' !
# this implies, that there's NO response nor requestId to track!
if server['vmstate'] == 'SHUTOFF':
print("VM is already shut off")
else:
if args.command is None:
print("no command specified for shutdown of VM")
else:
print("executing {}".format(args.command))
cmdrc = call(args.command, shell=True)
print("executing {} returned {}".format(args.command, cmdrc))
server = wait_for_server(pbclient, dc_id, server['id'],
indicator='vmstate', state='SHUTOFF',
timeout=300)
# first we have to delete all attached volumes
volumes = pbclient.get_attached_volumes(dc_id, server['id'], 0)
for vol in volumes['items']:
print("deleting volume {} of server {}"
.format(vol['id'], server['name']))
pbclient.delete_volume(dc_id, vol['id'])
pbclient.delete_server(dc_id, server['id'])
wait_for_datacenter(pbclient, dc_id)
except KeyboardInterrupt:
# handle keyboard interrupt #
pass
except Exception:
traceback.print_exc()
sys.stderr.write("\n" + program_name + ": for help use --help\n")
return 2
return 0 |
def getLogin(filename, user, passwd):
'''
write user/passwd to login file or get them from file.
This method is not Py3 safe (byte vs. str)
'''
if filename is None:
return (user, passwd)
if os.path.exists(filename):
print("Using file {} for Login".format(filename))
with open(filename, "r") as loginfile:
encoded_cred = loginfile.read()
decoded_cred = b64decode(encoded_cred)
login = decoded_cred.split(':', 1)
return (login[0], login[1])
else:
if user is None or passwd is None:
raise ValueError("user and password must not be None")
print("Writing file {} for Login".format(filename))
with open(filename, "w") as loginfile:
encoded_cred = b64encode(user+":"+passwd)
loginfile.write(encoded_cred)
return (user, passwd) |
def wait_for_request(pbclient, request_id,
timeout=0, initial_wait=5, scaleup=10):
'''
Waits for a request to finish until timeout.
timeout==0 is interpreted as infinite wait time.
Returns a tuple (return code, request status, message) where return code
0 : request successful
1 : request failed
-1 : timeout exceeded
The wait_period is increased every scaleup steps to adjust for long
running requests.
'''
total_wait = 0
wait_period = initial_wait
next_scaleup = scaleup * wait_period
wait = True
while wait:
request_status = pbclient.get_request(request_id, status=True)
state = request_status['metadata']['status']
if state == "DONE":
return(0, state, request_status['metadata']['message'])
if state == 'FAILED':
return(1, state, request_status['metadata']['message'])
if verbose > 0:
print("Request '{}' is in state '{}'. Sleeping for {} seconds..."
.format(request_id, state, wait_period))
sleep(wait_period)
total_wait += wait_period
if timeout != 0 and total_wait > timeout:
wait = False
next_scaleup -= wait_period
if next_scaleup == 0:
wait_period += initial_wait
next_scaleup = scaleup * wait_period
if verbose > 0:
print("scaling up wait_period to {}, next change in {} seconds"
.format(wait_period, next_scaleup))
# end while(wait)
return(-1, state, "request not finished before timeout") |
def main(argv=None):
'''Parse command line options and create a server/volume composite.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version,
program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by J. Buchhammer on %s.
Copyright 2016 ProfitBricks GmbH. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-u', '--user', dest='user', help='the login name')
parser.add_argument('-p', '--password', dest='password',
help='the login password')
parser.add_argument('-L', '--Login', dest='loginfile', default=None,
help='the login file to use')
parser.add_argument('-d', '--datacenterid', dest='datacenterid',
required=True, default=None,
help='datacenter of the new server')
parser.add_argument('-l', '--lanid', dest='lanid', required=True,
default=None, help='LAN of the new server')
parser.add_argument('-n', '--name', dest='servername',
default="SRV_"+datetime.now().isoformat(),
help='name of the new server')
parser.add_argument('-c', '--cores', dest='cores', type=int,
default=2, help='CPU cores')
parser.add_argument('-r', '--ram', dest='ram', type=int, default=4,
help='RAM in GB')
parser.add_argument('-s', '--storage', dest='storage', type=int,
default=4, help='storage in GB')
parser.add_argument('-b', '--boot', dest='bootdevice', default="HDD",
help='boot device')
parser.add_argument('-i', '--imageid', dest='imageid', default=None,
help='installation image')
parser.add_argument('-P', '--imagepassword', dest='imgpassword',
default=None, help='the image password')
parser.add_argument('-v', '--verbose', dest="verbose", action="count",
help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version',
version=program_version_message)
# Process arguments
args = parser.parse_args()
global verbose
verbose = args.verbose
dc_id = args.datacenterid
lan_id = args.lanid
servername = args.servername
if verbose > 0:
print("Verbose mode on")
print("start {} with args {}".format(program_name, str(args)))
# Test images (location de/fra)
# CDROM: 7fc885b3-c9a6-11e5-aa10-52540005ab80 # debian-8.3.0-amd64-netinst.iso
# HDD: 28007a6d-c88a-11e5-aa10-52540005ab80 # CentOS-7-server-2016-02-01
hdimage = args.imageid
cdimage = None
if args.bootdevice == "CDROM":
hdimage = None
cdimage = args.imageid
print("using boot device {} with image {}"
.format(args.bootdevice, args.imageid))
(user, password) = getLogin(args.loginfile, args.user, args.password)
if user is None or password is None:
raise ValueError("user or password resolved to None")
pbclient = ProfitBricksService(user, password)
first_nic = NIC(name="local", ips=[], dhcp=True, lan=lan_id)
volume = Volume(name=servername+"-Disk", size=args.storage,
image=hdimage, image_password=args.imgpassword)
server = Server(name=servername, cores=args.cores, ram=args.ram*1024,
create_volumes=[volume], nics=[first_nic],
boot_cdrom=cdimage)
print("creating server..")
if verbose > 0:
print("SERVER: {}".format(str(server)))
response = pbclient.create_server(dc_id, server)
print("wait for provisioning..")
wait_for_request(pbclient, response["requestId"])
server_id = response['id']
print("Server provisioned with ID {}".format(server_id))
nics = pbclient.list_nics(dc_id, server_id, 1)
# server should have exactly one nic, but we only test empty nic list
if not nics['items']:
raise CLIError("No NICs found for newly created server {}"
.format(server_id))
nic0 = nics['items'][0]
if verbose > 0:
print("NIC0: {}".format(str(nic0)))
(nic_id, nic_mac) = (nic0['id'], nic0['properties']['mac'])
print("NIC of new Server has ID {} and MAC {}".format(nic_id, nic_mac))
print("{} finished w/o errors".format(program_name))
return 0
except KeyboardInterrupt:
# handle keyboard interrupt #
return 0
except Exception:
traceback.print_exc()
sys.stderr.write("\n" + program_name + ": for help use --help\n")
return 2 |
def main(argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version,
program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by J. Buchhammer on %s.
Copyright 2016 ProfitBricks GmbH. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-u', '--user', dest='user', help='the login name')
parser.add_argument('-p', '--password', dest='password',
help='the login password')
parser.add_argument('-L', '--Login', dest='loginfile', default=None,
help='the login file to use')
parser.add_argument('-d', '--datacenterid', dest='dc_id',
required=True, default=None,
help='datacenter of the server')
parser.add_argument('-s', '--serverid', dest='serverid', default=None,
help='ID of the server')
parser.add_argument('-n', '--name', dest='servername', default=None,
help='name of the server')
parser.add_argument('-a', '--action', dest='action', default=None,
required=True, help='what to do with the server')
parser.add_argument('-C', '--command', dest='command', default=None,
help='remote shell command to use for shutdown')
parser.add_argument('-v', '--verbose', dest="verbose", action="count",
help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version',
version=program_version_message)
# Process arguments
args = parser.parse_args()
global verbose
verbose = args.verbose
dc_id = args.dc_id
if verbose > 0:
print("Verbose mode on")
# normalize action
action = args.action.upper()
actions = set(['POWERON', 'POWEROFF', 'START', 'SHUTOFF'])
if action not in actions:
parser.error("action must be on of {}".format(str(actions)))
if args.serverid is None and args.servername is None:
parser.error("one of 'serverid' or 'name' must be specified")
(user, password) = getLogin(args.loginfile, args.user, args.password)
if user is None or password is None:
raise ValueError("user or password resolved to None")
pbclient = ProfitBricksService(user, password)
server = getServerStates(pbclient, dc_id,
args.serverid, args.servername)
if server is None:
raise Exception(1, "specified server not found")
print("using server {}(id={}) in state {}, {}"
.format(server['name'], server['id'], server['state'],
server['vmstate']))
# !!! stop/start/reboot_server() simply return 'True' !!!
# this implies, that there's NO response nor requestId to track!
if action == 'POWEROFF':
if server['state'] == 'INACTIVE':
print("server is already powered off")
else:
# currently use 'forced' poweroff
if server['vmstate'] != 'SHUTOFF':
print("VM is in state {}, {} may lead to inconsistent state"
.format(server['vmstate'], action))
if args.command is None:
print("no command specified for shutdown of VM")
else:
print("executing {}".format(args.command))
cmdrc = call(args.command, shell=True)
print("executing {} returned {}"
.format(args.command, cmdrc))
pbclient.stop_server(dc_id, server['id'])
server = wait_for_server(pbclient, dc_id, server['id'],
state='INACTIVE', timeout=300)
elif action == 'POWERON':
if server['vmstate'] == 'RUNNING':
print("VM is already up and running")
else:
pbclient.start_server(dc_id, server['id'])
server = wait_for_server(pbclient, dc_id, server['id'],
indicator='vmstate', state='RUNNING',
timeout=300)
elif action == 'START':
# this is the same as POWERON
if server['vmstate'] == 'RUNNING':
print("VM is already up and running")
else:
pbclient.start_server(dc_id, server['id'])
server = wait_for_server(pbclient, dc_id, server['id'],
indicator='vmstate', state='RUNNING',
timeout=300)
elif action == 'SHUTOFF':
if server['vmstate'] == 'SHUTOFF':
print("VM is already shut off")
else:
if args.command is None:
print("no command specified for shutdown of VM")
else:
print("executing {}".format(args.command))
cmdrc = call(args.command, shell=True)
print("executing {} returned {}"
.format(args.command, cmdrc))
server = wait_for_server(pbclient, dc_id, server['id'],
indicator='vmstate',
state='SHUTOFF',
timeout=300)
# end if/else(action)
print("server {}(id={}) now in state {}, {}"
.format(server['name'], server['id'], server['state'],
server['vmstate']))
except KeyboardInterrupt:
# handle keyboard interrupt #
pass
except Exception:
traceback.print_exc()
sys.stderr.write("\n" + program_name + ": for help use --help\n")
return 2
return 0 |
def get_dc_inventory(pbclient, dc=None):
''' gets inventory of one data center'''
if pbclient is None:
raise ValueError("argument 'pbclient' must not be None")
if dc is None:
raise ValueError("argument 'dc' must not be None")
dc_inv = [] # inventory list to return
dcid = dc['id']
# dc_data contains dc specific columns
dc_data = [dcid, dc['properties']['name'], dc['properties']['location']]
# first get the servers
# this will build a hash to relate volumes to servers later
# depth 3 is enough to get into volume/nic level plus details
servers = pbclient.list_servers(dcid, 3)
print("found %i servers in data center %s" % (len(servers['items']), dc['properties']['name']))
if verbose > 2:
print(str(servers))
# this will build a hash to relate volumes to servers later
bound_vols = dict() # hash volume-to-server relations
for server in servers['items']:
if verbose > 2:
print("SERVER: %s" % str(server))
serverid = server['id']
# server_data contains server specific columns for later output
server_data = [
server['type'], serverid, server['properties']['name'],
server['metadata']['state']
]
# OS is determined by boot device (volume||cdrom), not a server property.
# Might even be unspecified
bootOS = "NONE"
bootdev = server['properties']['bootVolume']
if bootdev is None:
bootdev = server['properties']['bootCdrom']
print("server %s has boot device %s" % (serverid, "CDROM"))
if bootdev is None:
print("server %s has NO boot device" % (serverid))
else:
bootOS = bootdev['properties']['licenceType']
server_data += [bootOS, server['properties']['cores'], server['properties']['ram']]
server_vols = server['entities']['volumes']['items']
n_volumes = len(server_vols)
total_disk = 0
licence_type = ""
for vol in server_vols:
total_disk += vol['properties']['size']
licence_type = str(vol['properties']['licenceType'])
bound_vols[vol['id']] = serverid
if verbose:
print("volume %s is connected to %s w/ OS %s" % (
vol['id'], bound_vols[vol['id']], licence_type))
server_nics = server['entities']['nics']['items']
n_nics = len(server_nics)
server_data += [
n_nics, n_volumes, total_disk, "",
server['metadata']['createdDate'], server['metadata']['lastModifiedDate']
]
dc_inv.append(dc_data + server_data)
# end for(servers)
# and now the volumes...
volumes = pbclient.list_volumes(dcid, 2) # depth 2 gives max. details
for volume in volumes['items']:
if verbose > 2:
print("VOLUME: %s" % str(volume))
volid = volume['id']
vol_data = [
volume['type'], volid, volume['properties']['name'], volume['metadata']['state'],
volume['properties']['licenceType'], "", "", "", "", volume['properties']['size']
]
connect = 'NONE'
if volid in bound_vols:
connect = bound_vols[volid]
vol_data += [
connect, volume['metadata']['createdDate'], volume['metadata']['lastModifiedDate']
]
dc_inv.append(dc_data + vol_data)
# end for(volumes)
return dc_inv |
def get_dc_network(pbclient, dc=None):
''' gets inventory of one data center'''
if pbclient is None:
raise ValueError("argument 'pbclient' must not be None")
if dc is None:
raise ValueError("argument 'dc' must not be None")
print("getting networks..")
dcid = dc['id']
# dc_data contains dc specific columns
dc_data = [dcid, dc['properties']['name'], dc['properties']['location']]
lbs = pbclient.list_loadbalancers(dcid, 2)
# build lookup hash for loadbalancer's ID->name
lbnames = dict([(lb['id'], lb['properties']['name']) for lb in lbs['items']])
if verbose > 2:
print("LBs: %s" % (str(lbs)))
lans = pbclient.list_lans(dcid, 3)
lan_inv = []
# lookup hash for server's ID->name
servernames = dict()
for lan in lans['items']:
if verbose > 1:
print("LAN: %s" % str(lan))
lan_data = dc_data + [
"LAN "+lan['id'], lan['properties']['name'], lan['properties']['public'],
lan['metadata']['state']
]
nics = lan['entities']['nics']['items']
lan_data.append(len(nics))
if nics:
for nic in nics:
nic_props = nic['properties']
# get the serverid of this nic by href
# !!! HUUUUH this might also be a loadbalancer ID,
# although it's '/servers/<id>/...' !!!
serverid = re.sub(r'^.*servers/([^/]+)/nics.*', r'\1', nic['href'])
if serverid in lbnames:
servertype = "LB"
servername = lbnames[serverid]
print("server entry for %s is LOADBALANCER %s" % (serverid, servername))
else:
servertype = "Server"
if serverid not in servernames:
if verbose:
print("add server entry for %s" % serverid)
server = pbclient.get_server(dcid, serverid, 0)
servernames[serverid] = server['properties']['name']
servername = servernames[serverid]
# end if/else(serverid)
ips = [str(ip) for ip in nic_props['ips']]
nic_data = [
nic['id'], nic_props['mac'], nic_props['dhcp'], ips, nic_props['name'],
nic_props['firewallActive'], servertype, serverid, servername
]
lan_inv.append(lan_data+nic_data)
# end for(nics)
else:
lan_inv.append(lan_data)
# end for(lans)
return lan_inv |
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by J.Buchhammer on %s.
Copyright 2016 ProfitBricks GmbH. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(
description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'-u', '--user', dest='user', required=True, help='the login name')
parser.add_argument(
'-p', '--password', dest='password', help='the login password')
parser.add_argument(
'-d', '--datacenter', '--datacenterid', dest='datacenterid', nargs='?', const='*',
help='show server/storage of datacenter(s)')
parser.add_argument(
'-i', '--image', dest='show_images', action="store_true",
help='show images and snapshots')
parser.add_argument(
'-b', '--ipblock', dest='show_ipblocks', action="store_true",
help='show reserved IP blocks')
parser.add_argument(
'-n', '--network', dest='show_networks', action="store_true",
help='show network assignments')
# parser.add_argument(
# '-r', '--request', dest='show_requests', action="store_true",
# help='show requests')
parser.add_argument(
"-v", "--verbose", dest="verbose", action="count", default=0,
help="set verbosity level [default: %(default)s]")
parser.add_argument(
'-V', '--version', action='version', version=program_version_message)
# Process arguments
args = parser.parse_args()
global verbose
verbose = args.verbose # this is a global to be used in methods
user = args.user
password = args.password
datacenterid = args.datacenterid
print("Welcome to PB-API %s\n" % user)
if password is None:
password = getpass()
if verbose > 0:
print("Verbose mode on")
print("using python ", sys.version_info)
pbclient = ProfitBricksService(user, password)
if datacenterid is not None:
datacenters = {}
if datacenterid == '*':
# the default depth=1 is sufficient, higher values don't provide more details
datacenters = pbclient.list_datacenters()
else:
datacenters['items'] = []
datacenters['items'] = [pbclient.get_datacenter(datacenterid, 1)]
if verbose > 1:
print(pp(datacenters))
print("retrieved %i datacenters " % len(datacenters['items']))
# dump inventory to file
with open("pb_datacenter_inventory.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', lineterminator='\n')
csvwriter.writerow([
'DCID', 'DCName', 'Loc', 'RscType', 'RscID', 'RscName', 'State', 'LicType',
'Cores', 'RAM', '# NICs', '# Volumes', '(Total) Storage', 'Connected to',
'Created', 'Modified'
])
for dc in datacenters['items']:
try:
dc_inv = get_dc_inventory(pbclient, dc)
if verbose:
print("DC %s has %i inventory entries" % (dc['id'], len(dc_inv)))
for row in dc_inv:
csvwriter.writerow(row)
except Exception:
traceback.print_exc()
exit(2)
# end for(datacenters)
if args.show_images:
with open("pb_datacenter_images.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', lineterminator='\n')
csvwriter.writerow([
'Visibility', 'Loc', 'RscType', 'SubType', 'RscID', 'RscName',
'State', 'LicType', 'Size', 'Created', 'Modified'
])
img_inv = get_images(pbclient)
for row in img_inv:
csvwriter.writerow(row)
snap_inv = get_snapshots(pbclient)
for row in snap_inv:
csvwriter.writerow(row)
if args.show_ipblocks:
with open("pb_datacenter_ipblocks.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', lineterminator='\n')
csvwriter.writerow([
'Loc', 'RscType', 'RscID', 'State', 'Size', 'IP addresses'])
ipblocks = get_ipblocks(pbclient)
for row in ipblocks:
csvwriter.writerow(row)
# file is automatically closed after with block
if args.show_networks:
# the default depth=1 is sufficient, higher values don't provide more details
datacenters = pbclient.list_datacenters()
print("retrieved %i datacenters " % len(datacenters['items']))
with open("pb_datacenter_networks.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', lineterminator='\n')
csvwriter.writerow([
'DCID', 'DCName', 'Loc',
'LAN ID', 'LAN name', 'public', 'State', '# NICs',
'NIC ID', 'MAC address', 'DHCP', 'IP(s)', 'NIC name', 'Firewall',
'Connected to', 'ID', 'Name'])
for dc in datacenters['items']:
try:
dc_net = get_dc_network(pbclient, dc)
if verbose:
print("DC %s has %i network entries" % (dc['id'], len(dc_net)))
for row in dc_net:
csvwriter.writerow(row)
except Exception:
traceback.print_exc()
exit(2)
# end for(datacenters)
# just for fun:
# if args.show_requests:
# get_requests(pbclient)
print("%s finished w/o errors" % program_name)
return 0
except KeyboardInterrupt:
# handle keyboard interrupt
return 0
except Exception:
traceback.print_exc()
sys.stderr.write("\n" + program_name + ": for help use --help\n")
return 2 |
def main(argv=None):
'''Parse command line options and dump a datacenter to snapshots and file.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by J. Buchhammer on %s.
Copyright 2016 ProfitBricks GmbH. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-u', '--user', dest='user', help='the login name')
parser.add_argument('-p', '--password', dest='password',
help='the login password')
parser.add_argument('-L', '--Login', dest='loginfile', default=None,
help='the login file to use')
parser.add_argument('-d', '--datacenterid', dest='dc_id',
required=True, default=None,
help='datacenter ID of the server')
parser.add_argument('-o', '--outfile', dest='outfile',
default='dc-def_'+datetime.now().strftime('%Y-%m-%d_%H%M%S'),
help='the output file name')
parser.add_argument('-S', '--Stopalways', dest='stopalways', action='store_true',
help='power off even when VM is running')
parser.add_argument('-v', '--verbose', dest="verbose", action="count",
default=0, help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version',
version=program_version_message)
# Process arguments
args = parser.parse_args()
global verbose
verbose = args.verbose
if verbose > 0:
print("Verbose mode on")
print("start {} with args {}".format(program_name, str(args)))
outfile = args.outfile
if outfile.endswith(".json"):
outfile = os.path.splitext(outfile)
print("Using output file base name '{}'".format(outfile))
(user, password) = getLogin(args.loginfile, args.user, args.password)
if user is None or password is None:
raise ValueError("user or password resolved to None")
pbclient = ProfitBricksService(user, password)
dc_id = args.dc_id
# first get all server's VM and OS state to see if we can start
srv_info = getServerInfo(pbclient, dc_id)
srvon = 0
for server in srv_info:
if server['vmstate'] != 'SHUTOFF':
print("VM {} is in state {}, but should be SHUTOFF"
.format(server['name'], server['vmstate']))
srvon += 1
# end for(srv_info)
if srvon > 0 and not args.stopalways:
print("shutdown running OS before trying again")
return 1
# now power off all VMs before starting the snapshots
for server in srv_info:
controlServerState(pbclient, dc_id, server['id'], action='POWEROFF')
# now let's go
dcdef = pbclient.get_datacenter(dc_id, 5)
print("starting dump of datacenter {}".format(dcdef['properties']['name']))
dcdef_file = outfile+'_source.json'
print("write source dc to {}".format(dcdef_file))
write_dc_definition(dcdef, dcdef_file)
print("get existing Snapshots")
# first get existing snapshots
known_snapshots = dict()
snapshots = pbclient.list_snapshots()
for snap in snapshots['items']:
print("SNAP : {}".format(json.dumps(snap)))
known_snapshots[snap['properties']['name']] = snap['id']
print("create Snapshots, this may take a while ..")
# we do NOT consider dangling volumes, only server-attached ones
vol_snapshots = dict() # map volume id==snapshot name snapshot id
for server in dcdef['entities']['servers']['items']:
print("- server {}".format(server['properties']['name']))
if 'volumes' not in server['entities']:
print(" server {} has no volumes"
.format(server['properties']['name']))
continue
# The volumes are attached by order of creation
# Thus we must sort them to keep the order in the clone
print("setting volume order by deviceNumber")
volumes = server['entities']['volumes']['items']
new_order = sorted(volumes, key=lambda vol: vol['properties']['deviceNumber'])
server['entities']['volumes']['items'] = new_order
for volume in server['entities']['volumes']['items']:
vol_id = volume['id'] # this will be the name too
if vol_id in known_snapshots:
print("use existing snapshot {} of volume {}"
.format(vol_id, volume['properties']['name']))
vol_snapshots[vol_id] = known_snapshots[vol_id]
else:
print("taking snapshot {} of volume {}"
.format(vol_id, volume['properties']['name']))
response = pbclient.create_snapshot(dc_id, vol_id, vol_id,
"auto-created by pb_snapshotDatacenter")
# response has no request id, need to check metadata state (BUSY, AVAILABLE..)
vol_snapshots[vol_id] = response['id']
print("snapshot in progress: {}".format(str(response)))
# end for(volume)
# end for(server)
print("Waiting for snapshots to complete")
snapdone = dict()
while len(snapdone) != len(vol_snapshots):
sleep(10)
for snap_id in vol_snapshots.values():
print("looking for {}".format(snap_id))
if snap_id in snapdone:
continue
snapshot = pbclient.get_snapshot(snap_id)
print("snapshot {} is in state {}"
.format(snap_id, snapshot['metadata']['state']))
if snapshot['metadata']['state'] == 'AVAILABLE':
snapdone[snap_id] = snapshot['metadata']['state']
# end for(vol_snapshots)
# end while(snapdone)
# now replace the volumes image IDs
print("setting snapshot id to volumes")
for server in dcdef['entities']['servers']['items']:
print("- server {}".format(server['properties']['name']))
if 'volumes' not in server['entities']:
print(" server {} has no volumes"
.format(server['properties']['name']))
continue
for volume in server['entities']['volumes']['items']:
vol_id = volume['id'] # this will be the name too
volume['properties']['image'] = vol_snapshots[vol_id]
# end for(volume)
# end for(server)
# As it came out, the LAN id is rearranged by order of creation
# Thus we must sort the LANs to keep the order in the clone
print("setting LAN order by id")
lans = dcdef['entities']['lans']['items']
new_order = sorted(lans, key=lambda lan: lan['id'])
dcdef['entities']['lans']['items'] = new_order
# now sort unordered NICs by MAC and save the dcdef
# reason is, that NICs seem to be ordered by MAC, but API response
# doesn't guarantee the order, which we need for re-creation
print("setting NIC order by MAC")
for server in dcdef['entities']['servers']['items']:
print("- server {}".format(server['properties']['name']))
if 'nics' not in server['entities']:
print(" server {} has no nics"
.format(server['properties']['name']))
continue
nics = server['entities']['nics']['items']
# print("NICs before {}".format(json.dumps(nics)))
new_order = sorted(nics, key=lambda nic: nic['properties']['mac'])
# print("NICs after {}".format(json.dumps(new_order)))
server['entities']['nics']['items'] = new_order
# end for(server)
dcdef_file = outfile+'.json'
print("write snapshot dc to {}".format(dcdef_file))
write_dc_definition(dcdef, dcdef_file)
return 0
except KeyboardInterrupt:
# handle keyboard interrupt
return 0
except Exception:
traceback.print_exc()
sys.stderr.write("\n" + program_name + ": for help use --help\n")
return 2 |
def get_self(session, user_details=None):
"""
Get details about the currently authenticated user
"""
# Set compact to true
if user_details:
user_details['compact'] = True
response = make_get_request(session, 'self', params_data=user_details)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise SelfNotRetrievedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def get_user_by_id(session, user_id, user_details=None):
"""
Get details about specific user
"""
if user_details:
user_details['compact'] = True
response = make_get_request(
session, 'users/{}'.format(user_id), params_data=user_details)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise UserNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def get_self_user_id(session):
"""
Get the currently authenticated user ID
"""
response = make_get_request(session, 'self')
if response.status_code == 200:
return response.json()['result']['id']
else:
raise UserIdNotRetrievedException(
'Error retrieving user id: %s' % response.text, response.text) |
def add_user_jobs(session, job_ids):
"""
Add a list of jobs to the currently authenticated user
"""
jobs_data = {
'jobs[]': job_ids
}
response = make_post_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotAddedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def set_user_jobs(session, job_ids):
"""
Replace the currently authenticated user's list of jobs with a new list of
jobs
"""
jobs_data = {
'jobs[]': job_ids
}
response = make_put_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotSetException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def delete_user_jobs(session, job_ids):
"""
Remove a list of jobs from the currently authenticated user
"""
jobs_data = {
'jobs[]': job_ids
}
response = make_delete_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotDeletedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def get_users(session, query):
"""
Get one or more users
"""
# GET /api/users/0.1/users
response = make_get_request(session, 'users', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise UsersNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def create_project(session, title, description,
currency, budget, jobs):
"""
Create a project
"""
project_data = {'title': title,
'description': description,
'currency': currency,
'budget': budget,
'jobs': jobs
}
# POST /api/projects/0.1/projects/
response = make_post_request(session, 'projects', json_data=project_data)
json_data = response.json()
if response.status_code == 200:
project_data = json_data['result']
p = Project(project_data)
p.url = urljoin(session.url, 'projects/%s' % p.seo_url)
return p
else:
raise ProjectNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'],
) |
def create_hireme_project(session, title, description,
currency, budget, jobs, hireme_initial_bid):
"""
Create a fixed project
"""
jobs.append(create_job_object(id=417)) # Hire Me job, required
project_data = {'title': title,
'description': description,
'currency': currency,
'budget': budget,
'jobs': jobs,
'hireme': True,
'hireme_initial_bid': hireme_initial_bid
}
# POST /api/projects/0.1/projects/
response = make_post_request(session, 'projects', json_data=project_data)
json_data = response.json()
if response.status_code == 200:
project_data = json_data['result']
p = Project(project_data)
p.url = urljoin(session.url, 'projects/%s' % p.seo_url)
return p
else:
raise ProjectNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id'],
) |
def get_projects(session, query):
"""
Get one or more projects
"""
# GET /api/projects/0.1/projects
response = make_get_request(session, 'projects', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise ProjectsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def get_project_by_id(session, project_id, project_details=None, user_details=None):
"""
Get a single project by ID
"""
# GET /api/projects/0.1/projects/<int:project_id>
query = {}
if project_details:
query.update(project_details)
if user_details:
query.update(user_details)
response = make_get_request(
session, 'projects/{}'.format(project_id), params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise ProjectsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def search_projects(session,
query,
search_filter=None,
project_details=None,
user_details=None,
limit=10,
offset=0,
active_only=None):
"""
Search for all projects
"""
search_data = {
'query': query,
'limit': limit,
'offset': offset,
}
if search_filter:
search_data.update(search_filter)
if project_details:
search_data.update(project_details)
if user_details:
search_data.update(user_details)
# GET /api/projects/0.1/projects/all/
# GET /api/projects/0.1/projects/active/
endpoint = 'projects/{}'.format('active' if active_only else 'all')
response = make_get_request(session, endpoint, params_data=search_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise ProjectsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def place_project_bid(session, project_id, bidder_id, description, amount,
period, milestone_percentage):
"""
Place a bid on a project
"""
bid_data = {
'project_id': project_id,
'bidder_id': bidder_id,
'description': description,
'amount': amount,
'period': period,
'milestone_percentage': milestone_percentage,
}
# POST /api/projects/0.1/bids/
response = make_post_request(session, 'bids', json_data=bid_data)
json_data = response.json()
if response.status_code == 200:
bid_data = json_data['result']
return Bid(bid_data)
else:
raise BidNotPlacedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def get_bids(session, project_ids=[], bid_ids=[], limit=10, offset=0):
"""
Get the list of bids
"""
get_bids_data = {}
if bid_ids:
get_bids_data['bids[]'] = bid_ids
if project_ids:
get_bids_data['projects[]'] = project_ids
get_bids_data['limit'] = limit
get_bids_data['offset'] = offset
# GET /api/projects/0.1/bids/
response = make_get_request(session, 'bids', params_data=get_bids_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise BidsNotFoundException(
message=json_data['message'], error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def get_milestones(session, project_ids=[], milestone_ids=[], user_details=None, limit=10, offset=0):
"""
Get the list of milestones
"""
get_milestones_data = {}
if milestone_ids:
get_milestones_data['milestones[]'] = milestone_ids
if project_ids:
get_milestones_data['projects[]'] = project_ids
get_milestones_data['limit'] = limit
get_milestones_data['offset'] = offset
# Add projections if they exist
if user_details:
get_milestones_data.update(user_details)
# GET /api/projects/0.1/milestones/
response = make_get_request(
session, 'milestones', params_data=get_milestones_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MilestonesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def get_milestone_by_id(session, milestone_id, user_details=None):
"""
Get a specific milestone
"""
# GET /api/projects/0.1/milestones/{milestone_id}/
endpoint = 'milestones/{}'.format(milestone_id)
response = make_get_request(session, endpoint, params_data=user_details)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MilestonesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def award_project_bid(session, bid_id):
"""
Award a bid on a project
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
bid_data = {
'action': 'award'
}
# POST /api/projects/0.1/bids/{bid_id}/?action=award
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers,
params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
json_data = response.json()
raise BidNotAwardedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def revoke_project_bid(session, bid_id):
"""
Revoke a bid on a project
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
bid_data = {
'action': 'revoke'
}
# POST /api/projects/0.1/bids/{bid_id}/?action=revoke
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers,
params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
json_data = response.json()
raise BidNotRevokedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def accept_project_bid(session, bid_id):
"""
Accept a bid on a project
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
bid_data = {
'action': 'accept'
}
# POST /api/projects/0.1/bids/{bid_id}/?action=revoke
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers,
params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
json_data = response.json()
raise BidNotAcceptedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def retract_project_bid(session, bid_id):
"""
Retract a bid on a project
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
bid_data = {
'action': 'retract'
}
# POST /api/projects/0.1/bids/{bid_id}/?action=revoke
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers,
params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
json_data = response.json()
raise BidNotRetractedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def highlight_project_bid(session, bid_id):
"""
Highlight a bid on a project
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
bid_data = {
'action': 'highlight'
}
# POST /api/projects/0.1/bids/{bid_id}/?action=revoke
endpoint = 'bids/{}'.format(bid_id)
response = make_put_request(session, endpoint, headers=headers,
params_data=bid_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
json_data = response.json()
raise BidNotHighlightedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def create_milestone_payment(session, project_id, bidder_id, amount,
reason, description):
"""
Create a milestone payment
"""
milestone_data = {
'project_id': project_id,
'bidder_id': bidder_id,
'amount': amount,
'reason': reason,
'description': description
}
# POST /api/projects/0.1/milestones/
response = make_post_request(session, 'milestones',
json_data=milestone_data)
json_data = response.json()
if response.status_code == 200:
milestone_data = json_data['result']
return Milestone(milestone_data)
else:
raise MilestoneNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def post_track(session, user_id, project_id, latitude, longitude):
"""
Start tracking a project by creating a track
"""
tracking_data = {
'user_id': user_id,
'project_id': project_id,
'track_point': {
'latitude': latitude,
'longitude': longitude
}
}
# POST /api/projects/0.1/tracks/
response = make_post_request(session, 'tracks',
json_data=tracking_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise TrackNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def update_track(session, track_id, latitude, longitude, stop_tracking=False):
"""
Updates the current location by creating a new track point and appending
it to the given track
"""
tracking_data = {
'track_point': {
'latitude': latitude,
'longitude': longitude,
},
'stop_tracking': stop_tracking
}
# PUT /api/projects/0.1/tracks/{track_id}/
response = make_put_request(session, 'tracks/{}'.format(track_id),
json_data=tracking_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise TrackNotUpdatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def get_track_by_id(session, track_id, track_point_limit=None, track_point_offset=None):
"""
Gets a specific track
"""
tracking_data = {}
if track_point_limit:
tracking_data['track_point_limit'] = track_point_limit
if track_point_offset:
tracking_data['track_point_offset'] = track_point_offset
# GET /api/projects/0.1/tracks/{track_id}/
response = make_get_request(session, 'tracks/{}'.format(track_id),
params_data=tracking_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise TrackNotFoundException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def release_milestone_payment(session, milestone_id, amount):
"""
Release a milestone payment
"""
params_data = {
'action': 'release',
}
milestone_data = {
'amount': amount,
}
# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release
endpoint = 'milestones/{}'.format(milestone_id)
response = make_put_request(session, endpoint, params_data=params_data,
json_data=milestone_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneNotReleasedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def request_release_milestone_payment(session, milestone_id):
"""
Release a milestone payment
"""
params_data = {
'action': 'request_release',
}
# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release
endpoint = 'milestones/{}'.format(milestone_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneNotRequestedReleaseException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def cancel_milestone_payment(session, milestone_id):
"""
Release a milestone payment
"""
params_data = {
'action': 'cancel',
}
# PUT /api/projects/0.1/milestones/{milestone_id}/?action=release
endpoint = 'milestones/{}'.format(milestone_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneNotCancelledException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def create_milestone_request(session, project_id, bid_id, description, amount):
"""
Create a milestone request
"""
milestone_request_data = {
'project_id': project_id,
'bid_id': bid_id,
'description': description,
'amount': amount,
}
# POST /api/projects/0.1/milestone_requests/
response = make_post_request(session, 'milestone_requests',
json_data=milestone_request_data)
json_data = response.json()
if response.status_code == 200:
milestone_request_data = json_data['result']
return MilestoneRequest(milestone_request_data)
else:
raise MilestoneRequestNotCreatedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def accept_milestone_request(session, milestone_request_id):
"""
Accept a milestone request
"""
params_data = {
'action': 'accept',
}
# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=
# accept
endpoint = 'milestone_requests/{}'.format(milestone_request_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneRequestNotAcceptedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def reject_milestone_request(session, milestone_request_id):
"""
Reject a milestone request
"""
params_data = {
'action': 'reject',
}
# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=
# reject
endpoint = 'milestone_requests/{}'.format(milestone_request_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneRequestNotRejectedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def delete_milestone_request(session, milestone_request_id):
"""
Delete a milestone request
"""
params_data = {
'action': 'delete',
}
# POST /api/projects/0.1/milestone_requests/{milestone_request_id}/?action=
# delete
endpoint = 'milestone_requests/{}'.format(milestone_request_id)
response = make_put_request(session, endpoint, params_data=params_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise MilestoneRequestNotDeletedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def post_review(session, review):
"""
Post a review
"""
# POST /api/projects/0.1/reviews/
response = make_post_request(session, 'reviews', json_data=review)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise ReviewNotPostedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def get_jobs(session, job_ids, seo_details, lang):
"""
Get a list of jobs
"""
get_jobs_data = {
'jobs[]': job_ids,
'seo_details': seo_details,
'lang': lang,
}
# GET /api/projects/0.1/jobs/
response = make_get_request(session, 'jobs', params_data=get_jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise JobsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def create_thread(session, member_ids, context_type, context, message):
"""
Create a thread
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
thread_data = {
'members[]': member_ids,
'context_type': context_type,
'context': context,
'message': message,
}
# POST /api/messages/0.1/threads/
response = make_post_request(session, 'threads', headers,
form_data=thread_data)
json_data = response.json()
if response.status_code == 200:
return Thread(json_data['result'])
else:
raise ThreadNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def create_project_thread(session, member_ids, project_id, message):
"""
Create a project thread
"""
return create_thread(session, member_ids, 'project', project_id, message) |
def post_message(session, thread_id, message):
"""
Add a message to a thread
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
message_data = {
'message': message,
}
# POST /api/messages/0.1/threads/{thread_id}/messages/
endpoint = 'threads/{}/messages'.format(thread_id)
response = make_post_request(session, endpoint, headers,
form_data=message_data)
json_data = response.json()
if response.status_code == 200:
return Message(json_data['result'])
else:
raise MessageNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def post_attachment(session, thread_id, attachments):
"""
Add a message to a thread
"""
files = []
filenames = []
for attachment in attachments:
files.append(attachment['file'])
filenames.append(attachment['filename'])
message_data = {
'attachments[]': filenames,
}
# POST /api/messages/0.1/threads/{thread_id}/messages/
endpoint = 'threads/{}/messages'.format(thread_id)
response = make_post_request(session, endpoint,
form_data=message_data, files=files)
json_data = response.json()
if response.status_code == 200:
return Message(json_data['result'])
else:
raise MessageNotCreatedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def get_messages(session, query, limit=10, offset=0):
"""
Get one or more messages
"""
query['limit'] = limit
query['offset'] = offset
# GET /api/messages/0.1/messages
response = make_get_request(session, 'messages', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MessagesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def search_messages(session, thread_id, query, limit=20,
offset=0, message_context_details=None,
window_above=None, window_below=None):
"""
Search for messages
"""
query = {
'thread_id': thread_id,
'query': query,
'limit': limit,
'offset': offset
}
if message_context_details:
query['message_context_details'] = message_context_details
if window_above:
query['window_above'] = window_above
if window_below:
query['window_below'] = window_below
# GET /api/messages/0.1/messages/search
response = make_get_request(session, 'messages/search', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise MessagesNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def get_threads(session, query):
"""
Get one or more threads
"""
# GET /api/messages/0.1/threads
response = make_get_request(session, 'threads', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise ThreadsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def _clean(zipcode, valid_length=_valid_zipcode_length):
""" Assumes zipcode is of type `str` """
zipcode = zipcode.split("-")[0] # Convert #####-#### to #####
if len(zipcode) != valid_length:
raise ValueError(
'Invalid format, zipcode must be of the format: "#####" or "#####-####"'
)
if _contains_nondigits(zipcode):
raise ValueError('Invalid characters, zipcode may only contain digits and "-".')
return zipcode |
def similar_to(partial_zipcode, zips=_zips):
""" List of zipcode dicts where zipcode prefix matches `partial_zipcode` """
return [z for z in zips if z["zip_code"].startswith(partial_zipcode)] |
def filter_by(zips=_zips, **kwargs):
""" Use `kwargs` to select for desired attributes from list of zipcode dicts """
return [z for z in zips if all([k in z and z[k] == v for k, v in kwargs.items()])] |
def is_valid_identifier(name):
"""Pedantic yet imperfect. Test to see if "name" is a valid python identifier
"""
if not isinstance(name, str):
return False
if '\n' in name:
return False
if name.strip() != name:
return False
try:
code = compile('\n{0}=None'.format(name), filename='<string>', mode='single')
exec(code)
return True
except SyntaxError:
return False |
def from_config(
cls, cfg,
default_fg=DEFAULT_FG_16, default_bg=DEFAULT_BG_16,
default_fg_hi=DEFAULT_FG_256, default_bg_hi=DEFAULT_BG_256,
max_colors=2**24
):
"""
Build a palette definition from either a simple string or a dictionary,
filling in defaults for items not specified.
e.g.:
"dark green"
dark green foreground, black background
{lo: dark gray, hi: "#666"}
dark gray on 16-color terminals, #666 for 256+ color
"""
# TODO: mono
e = PaletteEntry(mono = default_fg,
foreground=default_fg,
background=default_bg,
foreground_high=default_fg_hi,
background_high=default_bg_hi)
if isinstance(cfg, str):
e.foreground_high = cfg
if e.allowed(cfg, 16):
e.foreground = cfg
else:
rgb = AttrSpec(fg=cfg, bg="", colors=max_colors).get_rgb_values()[0:3]
e.foreground = nearest_basic_color(rgb)
elif isinstance(cfg, dict):
bg = cfg.get("bg", None)
if isinstance(bg, str):
e.background_high = bg
if e.allowed(bg, 16):
e.background = bg
else:
rgb = AttrSpec(fg=bg, bg="", colors=max_colors).get_rgb_values()[0:3]
e.background = nearest_basic_color(rgb)
elif isinstance(bg, dict):
e.background_high = bg.get("hi", default_bg_hi)
if "lo" in bg:
if e.allowed(bg["lo"], 16):
e.background = bg["lo"]
else:
rgb = AttrSpec(fg=bg["lo"], bg="", colors=max_colors).get_rgb_values()[0:3]
e.background = nearest_basic_color(rgb)
fg = cfg.get("fg", cfg)
if isinstance(fg, str):
e.foreground_high = fg
if e.allowed(fg, 16):
e.foreground = fg
else:
rgb = AttrSpec(fg=fg, bg="", colors=max_colors).get_rgb_values()[0:3]
e.foreground = nearest_basic_color(rgb)
elif isinstance(fg, dict):
e.foreground_high = fg.get("hi", default_fg_hi)
if "lo" in fg:
if e.allowed(fg["lo"], 16):
e.foreground = fg["lo"]
else:
rgb = AttrSpec(fg=fg["lo"], bg="", colors=max_colors).get_rgb_values()[0:3]
e.foreground = nearest_basic_color(rgb)
return e |
def get_passphrase(passphrase=None):
"""Return a passphrase as found in a passphrase.ghost file
Lookup is done in three locations on non-Windows systems and two on Windows
All:
`cwd/passphrase.ghost`
`~/.ghost/passphrase.ghost`
Only non-Windows:
`/etc/ghost/passphrase.ghost`
"""
for passphrase_file_path in POTENTIAL_PASSPHRASE_LOCATIONS:
if os.path.isfile(passphrase_file_path):
with open(passphrase_file_path) as passphrase_file:
return passphrase_file.read()
return passphrase |
def migrate(src_path,
src_passphrase,
src_backend,
dst_path,
dst_passphrase,
dst_backend):
"""Migrate all keys in a source stash to a destination stash
The migration process will decrypt all keys using the source
stash's passphrase and then encrypt them based on the destination
stash's passphrase.
re-encryption will take place only if the passphrases are differing
"""
src_storage = STORAGE_MAPPING[src_backend](**_parse_path_string(src_path))
dst_storage = STORAGE_MAPPING[dst_backend](**_parse_path_string(dst_path))
src_stash = Stash(src_storage, src_passphrase)
dst_stash = Stash(dst_storage, dst_passphrase)
# TODO: Test that re-encryption does not occur on similar
# passphrases
keys = src_stash.export()
dst_stash.load(src_passphrase, keys=keys) |
def generate_passphrase(size=12):
"""Return a generate string `size` long based on lowercase, uppercase,
and digit chars
"""
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
return str(''.join(random.choice(chars) for _ in range(size))) |
def _build_dict_from_key_value(keys_and_values):
"""Return a dict from a list of key=value pairs
"""
key_dict = {}
for key_value in keys_and_values:
if '=' not in key_value:
raise GhostError('Pair {0} is not of `key=value` format'.format(
key_value))
key, value = key_value.split('=', 1)
key_dict.update({str(key): str(value)})
return key_dict |
def _prettify_dict(key):
"""Return a human readable format of a key (dict).
Example:
Description: My Wonderful Key
Uid: a54d6de1-922a-4998-ad34-cb838646daaa
Created_At: 2016-09-15T12:42:32
Metadata: owner=me;
Modified_At: 2016-09-15T12:42:32
Value: secret_key=my_secret_key;access_key=my_access_key
Name: aws
"""
assert isinstance(key, dict)
pretty_key = ''
for key, value in key.items():
if isinstance(value, dict):
pretty_value = ''
for k, v in value.items():
pretty_value += '{0}={1};'.format(k, v)
value = pretty_value
pretty_key += '{0:15}{1}\n'.format(key.title() + ':', value)
return pretty_key |
def _prettify_list(items):
"""Return a human readable format of a list.
Example:
Available Keys:
- my_first_key
- my_second_key
"""
assert isinstance(items, list)
keys_list = 'Available Keys:'
for item in items:
keys_list += '\n - {0}'.format(item)
return keys_list |
def init_stash(stash_path, passphrase, passphrase_size, backend):
r"""Init a stash
`STASH_PATH` is the path to the storage endpoint. If this isn't supplied,
a default path will be used. In the path, you can specify a name
for the stash (which, if omitted, will default to `ghost`) like so:
`ghost init http://10.10.1.1:8500;stash1`.
After initializing a stash, don't forget you can set environment
variables for both your stash's path and its passphrase.
On Linux/OSx you can run:
export GHOST_STASH_PATH='http://10.10.1.1:8500;stash1'
export GHOST_PASSPHRASE=$(cat passphrase.ghost)
export GHOST_BACKEND='tinydb'
"""
stash_path = stash_path or STORAGE_DEFAULT_PATH_MAPPING[backend]
click.echo('Stash: {0} at {1}'.format(backend, stash_path))
storage = STORAGE_MAPPING[backend](**_parse_path_string(stash_path))
try:
click.echo('Initializing stash...')
if os.path.isfile(PASSPHRASE_FILENAME):
raise GhostError(
'{0} already exists. Overwriting might prevent you '
'from accessing the stash it was generated for. '
'Please make sure to save and remove the file before '
'initializing another stash.'.format(PASSPHRASE_FILENAME))
stash = Stash(
storage,
passphrase=passphrase,
passphrase_size=passphrase_size)
passphrase = stash.init()
if not passphrase:
click.echo('Stash already initialized.')
sys.exit(0)
_write_passphrase_file(passphrase)
except GhostError as ex:
sys.exit(ex)
except (OSError, IOError) as ex:
click.echo("Seems like we've run into a problem.")
file_path = _parse_path_string(stash_path)['db_path']
click.echo(
'Removing stale stash and passphrase: {0}. Note that any '
'directories created are not removed for safety reasons and you '
'might want to remove them manually.'.format(file_path))
if os.path.isfile(file_path):
os.remove(file_path)
sys.exit(ex)
click.echo('Initialized stash at: {0}'.format(stash_path))
click.echo(
'Your passphrase can be found under the `{0}` file in the '
'current directory.'.format(PASSPHRASE_FILENAME))
click.echo(
'Make sure you save your passphrase somewhere safe. '
'If lost, you will lose access to your stash.') |
def put_key(key_name,
value,
description,
meta,
modify,
add,
lock,
key_type,
stash,
passphrase,
backend):
"""Insert a key to the stash
`KEY_NAME` is the name of the key to insert
`VALUE` is a key=value argument which can be provided multiple times.
it is the encrypted value of your key
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Stashing {0} key...'.format(key_type))
stash.put(
name=key_name,
value=_build_dict_from_key_value(value),
modify=modify,
metadata=_build_dict_from_key_value(meta),
description=description,
lock=lock,
key_type=key_type,
add=add)
click.echo('Key stashed successfully')
except GhostError as ex:
sys.exit(ex) |
def lock_key(key_name,
stash,
passphrase,
backend):
"""Lock a key to prevent it from being deleted, purged or modified
`KEY_NAME` is the name of the key to lock
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Locking key...')
stash.lock(key_name=key_name)
click.echo('Key locked successfully')
except GhostError as ex:
sys.exit(ex) |
def unlock_key(key_name,
stash,
passphrase,
backend):
"""Unlock a key to allow it to be modified, deleted or purged
`KEY_NAME` is the name of the key to unlock
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Unlocking key...')
stash.unlock(key_name=key_name)
click.echo('Key unlocked successfully')
except GhostError as ex:
sys.exit(ex) |
def get_key(key_name,
value_name,
jsonify,
no_decrypt,
stash,
passphrase,
backend):
"""Retrieve a key from the stash
\b
`KEY_NAME` is the name of the key to retrieve
`VALUE_NAME` is a single value to retrieve e.g. if the value
of the key `test` is `a=b,b=c`, `ghost get test a`a will return
`b`
"""
if value_name and no_decrypt:
sys.exit('VALUE_NAME cannot be used in conjuction with --no-decrypt')
stash = _get_stash(backend, stash, passphrase, quiet=jsonify or value_name)
try:
key = stash.get(key_name=key_name, decrypt=not no_decrypt)
except GhostError as ex:
sys.exit(ex)
if not key:
sys.exit('Key `{0}` not found'.format(key_name))
if value_name:
key = key['value'].get(value_name)
if not key:
sys.exit(
'Value name `{0}` could not be found under key `{1}`'.format(
value_name, key_name))
if jsonify or value_name:
click.echo(
json.dumps(key, indent=4, sort_keys=False).strip('"'),
nl=True)
else:
click.echo('Retrieving key...')
click.echo('\n' + _prettify_dict(key)) |
def delete_key(key_name, stash, passphrase, backend):
"""Delete a key from the stash
`KEY_NAME` is the name of the key to delete
You can provide that multiple times to delete multiple keys at once
"""
stash = _get_stash(backend, stash, passphrase)
for key in key_name:
try:
click.echo('Deleting key {0}...'.format(key))
stash.delete(key_name=key)
except GhostError as ex:
sys.exit(ex)
click.echo('Keys deleted successfully') |
def list_keys(key_name,
max_suggestions,
cutoff,
jsonify,
locked,
key_type,
stash,
passphrase,
backend):
"""List all keys in the stash
If `KEY_NAME` is provided, will look for keys containing `KEY_NAME`.
If `KEY_NAME` starts with `~`, close matches will be provided according
to `max_suggestions` and `cutoff`.
"""
stash = _get_stash(backend, stash, passphrase, quiet=jsonify)
try:
keys = stash.list(
key_name=key_name,
max_suggestions=max_suggestions,
cutoff=cutoff,
locked_only=locked,
key_type=key_type)
except GhostError as ex:
sys.exit(ex)
if jsonify:
click.echo(json.dumps(keys, indent=4, sort_keys=True))
elif not keys:
click.echo('The stash is empty. Go on, put some keys in there...')
else:
click.echo('Listing all keys...')
click.echo(_prettify_list(keys)) |
def purge_stash(force, stash, passphrase, backend):
"""Purge the stash from all of its keys
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Purging stash...')
stash.purge(force)
# Maybe we should verify that the list is empty
# afterwards?
click.echo('Purge complete!')
except GhostError as ex:
sys.exit(ex) |
def export_keys(output_path, stash, passphrase, backend):
"""Export all keys to a file
"""
stash = _get_stash(backend, stash, passphrase)
try:
click.echo('Exporting stash to {0}...'.format(output_path))
stash.export(output_path=output_path)
click.echo('Export complete!')
except GhostError as ex:
sys.exit(ex) |
def load_keys(key_file, origin_passphrase, stash, passphrase, backend):
"""Load all keys from an exported key file to the stash
`KEY_FILE` is the exported stash file to load keys from
"""
stash = _get_stash(backend, stash, passphrase)
click.echo('Importing all keys from {0}...'.format(key_file))
stash.load(origin_passphrase, key_file=key_file)
click.echo('Import complete!') |
def migrate_stash(source_stash_path,
source_passphrase,
source_backend,
destination_stash_path,
destination_passphrase,
destination_backend):
"""Migrate all keys from a source stash to a destination stash.
`SOURCE_STASH_PATH` and `DESTINATION_STASH_PATH` are the paths
to the stashs you wish to perform the migration on.
"""
click.echo('Migrating all keys from {0} to {1}...'.format(
source_stash_path, destination_stash_path))
try:
migrate(
src_path=source_stash_path,
src_passphrase=source_passphrase,
src_backend=source_backend,
dst_path=destination_stash_path,
dst_passphrase=destination_passphrase,
dst_backend=destination_backend)
except GhostError as ex:
sys.exit(ex)
click.echo('Migration complete!') |
def ssh(key_name, no_tunnel, stash, passphrase, backend):
"""Use an ssh type key to connect to a machine via ssh
Note that trying to use a key of the wrong type (e.g. `secret`)
will result in an error.
`KEY_NAME` is the key to use.
For additional information on the different configuration options
for an ssh type key, see the repo's readme.
"""
# TODO: find_executable or raise
def execute(command):
try:
click.echo('Executing: {0}'.format(' '.join(command)))
subprocess.check_call(' '.join(command), shell=True)
except subprocess.CalledProcessError:
sys.exit(1)
stash = _get_stash(backend, stash, passphrase)
key = stash.get(key_name)
if key:
_assert_is_ssh_type_key(key)
else:
sys.exit('Key `{0}` not found'.format(key_name))
conn_info = key['value']
ssh_key_path = conn_info.get('ssh_key_path')
ssh_key = conn_info.get('ssh_key')
proxy_key_path = conn_info.get('proxy_key_path')
proxy_key = conn_info.get('proxy_key')
id_file = _write_tmp(ssh_key) if ssh_key else ssh_key_path
conn_info['ssh_key_path'] = id_file
if conn_info.get('proxy'):
proxy_id_file = _write_tmp(proxy_key) if proxy_key else proxy_key_path
conn_info['proxy_key_path'] = proxy_id_file
ssh_command = _build_ssh_command(conn_info, no_tunnel)
try:
execute(ssh_command)
finally:
# If they're not equal, that means we've created a temp one which
# should be deleted, else, it's a path to an existing key file.
if id_file != ssh_key_path:
click.echo('Removing temp ssh key file: {0}...'.format(id_file))
os.remove(id_file)
if conn_info.get('proxy') and proxy_id_file != proxy_key_path:
click.echo('Removing temp proxy key file: {0}...'.format(
proxy_id_file))
os.remove(proxy_id_file) |
def _build_ssh_command(conn_info, no_tunnel=False):
"""
# TODO: Document clearly
IndetityFile="~/.ssh/id_rsa"
ProxyCommand="ssh -i ~/.ssh/id_rsa proxy_IP nc HOST_IP HOST_PORT"
"""
command = ['ssh', '-i', conn_info['ssh_key_path'], conn_info['conn']]
if conn_info.get('tunnel') and not no_tunnel:
command.insert(1, conn_info.get('tunnel'))
# Tunnel
command.insert(1, '-L')
# No shell
command.insert(1, '-N')
if conn_info.get('proxy'):
command.extend(_build_proxy_command(conn_info))
if conn_info.get('extend'):
command.append(conn_info.get('extend'))
return command |
def put(self,
name,
value=None,
modify=False,
metadata=None,
description='',
encrypt=True,
lock=False,
key_type='secret',
add=False):
"""Put a key inside the stash
if key exists and modify true: delete and create
if key exists and modify false: fail
if key doesn't exist and modify true: fail
if key doesn't exist and modify false: create
`name` is unique and cannot be changed.
`value` must be provided if the key didn't already exist, otherwise,
the previous value will be retained.
`created_at` will be left unmodified if the key
already existed. Otherwise, the current time will be used.
`modified_at` will be changed to the current time
if the field is being modified.
`metadata` will be updated if provided. If it wasn't
provided the field from the existing key will be used and the
same goes for the `uid` which will be generated if it didn't
previously exist.
`lock` will lock the key to prevent it from being modified or deleted
`add` allows to add values to an existing key instead of overwriting.
Returns the id of the key in the database
"""
def assert_key_is_unlocked(existing_key):
if existing_key and existing_key.get('lock'):
raise GhostError(
'Key `{0}` is locked and therefore cannot be modified. '
'Unlock the key and try again'.format(name))
def assert_value_provided_for_new_key(value, existing_key):
if not value and not existing_key.get('value'):
raise GhostError('You must provide a value for new keys')
self._assert_valid_stash()
self._validate_key_schema(value, key_type)
if value and encrypt and not isinstance(value, dict):
raise GhostError('Value must be of type dict')
# TODO: This should be refactored. `_handle_existing_key` deletes
# the key rather implicitly. It shouldn't do that.
# `existing_key` will be an empty dict if it doesn't exist
key = self._handle_existing_key(name, modify or add)
assert_key_is_unlocked(key)
assert_value_provided_for_new_key(value, key)
new_key = dict(name=name, lock=lock)
if value:
# TODO: fix edge case in which encrypt is false and yet we might
# try to add to an existing key. encrypt=false is only used when
# `load`ing into a new stash, but someone might use it directly
# from the API.
if add:
value = self._update_existing_key(key, value)
new_key['value'] = self._encrypt(value) if encrypt else value
else:
new_key['value'] = key.get('value')
# TODO: Treat a case in which we try to update an existing key
# but don't provide a value in which nothing will happen.
new_key['description'] = description or key.get('description')
new_key['created_at'] = key.get('created_at') or _get_current_time()
new_key['modified_at'] = _get_current_time()
new_key['metadata'] = metadata or key.get('metadata')
new_key['uid'] = key.get('uid') or str(uuid.uuid4())
new_key['type'] = key.get('type') or key_type
key_id = self._storage.put(new_key)
audit(
storage=self._storage.db_path,
action='MODIFY' if (modify or add) else 'PUT',
message=json.dumps(dict(
key_name=new_key['name'],
value='HIDDEN',
description=new_key['description'],
uid=new_key['uid'],
metadata=json.dumps(new_key['metadata']),
lock=new_key['lock'],
type=new_key['type'])))
return key_id |
def get(self, key_name, decrypt=True):
"""Return a key with its parameters if it was found.
"""
self._assert_valid_stash()
key = self._storage.get(key_name).copy()
if not key.get('value'):
return None
if decrypt:
key['value'] = self._decrypt(key['value'])
audit(
storage=self._storage.db_path,
action='GET',
message=json.dumps(dict(key_name=key_name)))
return key |
def list(self,
key_name=None,
max_suggestions=100,
cutoff=0.5,
locked_only=False,
key_type=None):
"""Return a list of all keys.
"""
self._assert_valid_stash()
key_list = [k for k in self._storage.list()
if k['name'] != 'stored_passphrase' and
(k.get('lock') if locked_only else True)]
if key_type:
# To maintain backward compatibility with keys without a type.
# The default key type is secret, in which case we also look for
# keys with no (None) types.
types = ('secret', None) if key_type == 'secret' else [key_type]
key_list = [k for k in key_list if k.get('type') in types]
key_list = [k['name'] for k in key_list]
if key_name:
if key_name.startswith('~'):
key_list = difflib.get_close_matches(
key_name.lstrip('~'), key_list, max_suggestions, cutoff)
else:
key_list = [k for k in key_list if key_name in k]
audit(
storage=self._storage.db_path,
action='LIST' + ('[LOCKED]' if locked_only else ''),
message=json.dumps(dict()))
return key_list |
def delete(self, key_name):
"""Delete a key if it exists.
"""
self._assert_valid_stash()
if key_name == 'stored_passphrase':
raise GhostError(
'`stored_passphrase` is a reserved ghost key name '
'which cannot be deleted')
# TODO: Optimize. We get from the storage twice here for no reason
if not self.get(key_name):
raise GhostError('Key `{0}` not found'.format(key_name))
key = self._storage.get(key_name)
if key.get('lock'):
raise GhostError(
'Key `{0}` is locked and therefore cannot be deleted '
'Please unlock the key and try again'.format(key_name))
deleted = self._storage.delete(key_name)
audit(
storage=self._storage.db_path,
action='DELETE',
message=json.dumps(dict(key_name=key_name)))
if not deleted:
raise GhostError('Failed to delete {0}'.format(key_name)) |
def purge(self, force=False, key_type=None):
"""Purge the stash from all keys
"""
self._assert_valid_stash()
if not force:
raise GhostError(
"The `force` flag must be provided to perform a stash purge. "
"I mean, you don't really want to just delete everything "
"without precautionary measures eh?")
audit(
storage=self._storage.db_path,
action='PURGE',
message=json.dumps(dict()))
for key_name in self.list(key_type=key_type):
self.delete(key_name) |
def export(self, output_path=None, decrypt=False):
"""Export all keys in the stash to a list or a file
"""
self._assert_valid_stash()
all_keys = []
for key in self.list():
# We `dict` this as a precaution as tinydb returns
# a tinydb.database.Element instead of a dictionary
# and well.. I ain't taking no chances
all_keys.append(dict(self.get(key, decrypt=decrypt)))
if all_keys:
if output_path:
with open(output_path, 'w') as output_file:
output_file.write(json.dumps(all_keys, indent=4))
return all_keys
else:
raise GhostError('There are no keys to export') |
def load(self, origin_passphrase, keys=None, key_file=None):
"""Import keys to the stash from either a list of keys or a file
`keys` is a list of dictionaries created by `self.export`
`stash_path` is a path to a file created by `self.export`
"""
# TODO: Handle keys not dict or key_file not json
self._assert_valid_stash()
# Check if both or none are provided (ahh, the mighty xor)
if not (bool(keys) ^ bool(key_file)):
raise GhostError(
'You must either provide a path to an exported stash file '
'or a list of key dicts to import')
if key_file:
with open(key_file) as stash_file:
keys = json.loads(stash_file.read())
# If the passphrases are the same, there's no reason to decrypt
# and re-encrypt. We can simply pass the value.
decrypt = origin_passphrase != self.passphrase
if decrypt:
# TODO: The fact that we need to create a stub stash just to
# decrypt means we should probably have some encryptor class.
stub = Stash(TinyDBStorage('stub'), origin_passphrase)
# TODO: Handle existing keys when loading
for key in keys:
self.put(
name=key['name'],
value=stub._decrypt(key['value']) if decrypt else key['value'],
metadata=key['metadata'],
description=key['description'],
lock=key.get('lock'),
key_type=key.get('type'),
encrypt=decrypt) |
def _encrypt(self, value):
"""Turn a json serializable value into an jsonified, encrypted,
hexa string.
"""
value = json.dumps(value)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
encrypted_value = self.cipher.encrypt(value.encode('utf8'))
hexified_value = binascii.hexlify(encrypted_value).decode('ascii')
return hexified_value |
def _decrypt(self, hexified_value):
"""The exact opposite of _encrypt
"""
encrypted_value = binascii.unhexlify(hexified_value)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
jsonified_value = self.cipher.decrypt(
encrypted_value).decode('ascii')
value = json.loads(jsonified_value)
return value |
def get(self, key_name):
"""Return a dictionary consisting of the key itself
e.g.
{u'created_at': u'2016-10-10 08:31:53',
u'description': None,
u'metadata': None,
u'modified_at': u'2016-10-10 08:31:53',
u'name': u'aws',
u'uid': u'459f12c0-f341-413e-9d7e-7410f912fb74',
u'value': u'the_value'}
"""
result = self.db.search(Query().name == key_name)
if not result:
return {}
return result[0] |
def delete(self, key_name):
"""Delete the key and return true if the key was deleted, else false
"""
self.db.remove(Query().name == key_name)
return self.get(key_name) == {} |
def _construct_key(self, values):
"""Return a dictionary representing a key from a list of columns
and a tuple of values
"""
key = {}
for column, value in zip(self.keys.columns, values):
key.update({column.name: value})
return key |
def put(self, key):
"""Put and return the only unique identifier possible, its url
"""
self._consul_request('PUT', self._key_url(key['name']), json=key)
return key['name'] |
def put(self, key):
"""Put and return the only unique identifier possible, its path
"""
self.client.write(self._key_path(key['name']), **key)
return self._key_path(key['name']) |
def init(self):
"""Create an Elasticsearch index if necessary
"""
# ignore 400 (IndexAlreadyExistsException) when creating an index
self.es.indices.create(index=self.params['index'], ignore=400) |
def init(self):
"""Create a bucket.
"""
try:
self.client.create_bucket(
Bucket=self.db_path,
CreateBucketConfiguration=self.bucket_configuration)
except botocore.exceptions.ClientError as e:
# If the bucket already exists
if 'BucketAlreadyOwnedByYou' not in str(
e.response['Error']['Code']):
raise e |
def put(self, key):
"""Insert the key
:return: Key name
"""
self.client.put_object(
Body=json.dumps(key),
Bucket=self.db_path,
Key=key['name'])
return key['name'] |
def list(self):
"""Lists the keys
:return: Returns a list of all keys (not just key names, but rather
the keys themselves).
"""
response = self.client.list_objects_v2(Bucket=self.db_path)
if u'Contents' in response:
# Filter out everything but the key names
keys = [key[u'Key'] for key in response[u'Contents']]
keys_list = []
for key_name in keys:
key = self.get(key_name)
keys_list.append(key)
return keys_list
return [] |
def get(self, key_name):
"""Gets the key.
:return: The key itself in a dictionary
"""
try:
obj = self.client.get_object(
Bucket=self.db_path,
Key=key_name)['Body'].read().decode("utf-8")
return json.loads(obj)
except botocore.exceptions.ClientError as e:
if 'NoSuchKey' in str(e.response['Error']['Code']):
return {}
raise e |
def delete(self, key_name):
"""Delete the key.
:return: True if it was deleted, False otherwise
"""
self.client.delete_object(
Bucket=self.db_path,
Key=key_name)
return self.get(key_name) == {} |
def is_initialized(self):
"""Check if bucket exists.
:return: True if initialized, False otherwise
"""
try:
return self.client.head_bucket(
Bucket=self.db_path)['ResponseMetadata']['HTTPStatusCode'] \
== 200
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
if 'NoSuchBucket' in str(e.response['Error']['Code']):
return False
raise e |
def terminal(port=default_port(), baud='9600'):
"""Launch minterm from pyserial"""
testargs = ['nodemcu-uploader', port, baud]
# TODO: modifying argv is no good
sys.argv = testargs
# resuse miniterm on main function
miniterm.main() |
def __set_baudrate(self, baud):
"""setting baudrate if supported"""
log.info('Changing communication to %s baud', baud)
self.__writeln(UART_SETUP.format(baud=baud))
# Wait for the string to be sent before switching baud
time.sleep(0.1)
try:
self._port.setBaudrate(baud)
except AttributeError:
#pySerial 2.7
self._port.baudrate = baud |
def set_timeout(self, timeout):
"""Set the timeout for the communication with the device."""
timeout = int(timeout) # will raise on Error
self._timeout = timeout == 0 and 999999 or timeout |
def __clear_buffers(self):
"""Clears the input and output buffers"""
try:
self._port.reset_input_buffer()
self._port.reset_output_buffer()
except AttributeError:
#pySerial 2.7
self._port.flushInput()
self._port.flushOutput() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.