repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
gbowerman/azurerm
azurerm/computerp.py
list_vmss
def list_vmss(access_token, subscription_id, resource_group): '''List VM Scale Sets in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of a list of scale set model views. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
python
def list_vmss(access_token, subscription_id, resource_group): '''List VM Scale Sets in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of a list of scale set model views. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VM Scale Sets in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of a list of scale set model views.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L655-L671
gbowerman/azurerm
azurerm/computerp.py
list_vmss_skus
def list_vmss_skus(access_token, subscription_id, resource_group, vmss_name): '''List the VM skus available for a VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of VM skus. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/skus', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
python
def list_vmss_skus(access_token, subscription_id, resource_group, vmss_name): '''List the VM skus available for a VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of VM skus. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/skus', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List the VM skus available for a VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of VM skus.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L674-L692
gbowerman/azurerm
azurerm/computerp.py
list_vmss_sub
def list_vmss_sub(access_token, subscription_id): '''List VM Scale Sets in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VM scale sets. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
python
def list_vmss_sub(access_token, subscription_id): '''List VM Scale Sets in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VM scale sets. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VM Scale Sets in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VM scale sets.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L695-L709
gbowerman/azurerm
azurerm/computerp.py
list_vmss_vm_instance_view_pg
def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name, link=None): '''Gets one page of a paginated list of scale set VM instance views. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. link (str): Optional link to URI to get list (as part of a paginated API query). Returns: HTTP response. JSON body of list of VM instance views. ''' if link is None: endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines?$expand=instanceView&$select=instanceView', '&api-version=', COMP_API]) else: endpoint = link return do_get(endpoint, access_token)
python
def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name, link=None): '''Gets one page of a paginated list of scale set VM instance views. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. link (str): Optional link to URI to get list (as part of a paginated API query). Returns: HTTP response. JSON body of list of VM instance views. ''' if link is None: endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines?$expand=instanceView&$select=instanceView', '&api-version=', COMP_API]) else: endpoint = link return do_get(endpoint, access_token)
Gets one page of a paginated list of scale set VM instance views. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. link (str): Optional link to URI to get list (as part of a paginated API query). Returns: HTTP response. JSON body of list of VM instance views.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L733-L756
gbowerman/azurerm
azurerm/computerp.py
poweroff_vmss
def poweroff_vmss(access_token, subscription_id, resource_group, vmss_name): '''Power off all the VMs in a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/powerOff?api-version=', COMP_API]) body = '{"instanceIds" : ["*"]}' return do_post(endpoint, body, access_token)
python
def poweroff_vmss(access_token, subscription_id, resource_group, vmss_name): '''Power off all the VMs in a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/powerOff?api-version=', COMP_API]) body = '{"instanceIds" : ["*"]}' return do_post(endpoint, body, access_token)
Power off all the VMs in a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L780-L798
gbowerman/azurerm
azurerm/computerp.py
poweroff_vmss_vms
def poweroff_vmss_vms(access_token, subscription_id, resource_group, vmss_name, instance_ids): '''Poweroff all the VMs in a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. instance_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/powerOff?api-version=', COMP_API]) body = '{"instanceIds" : ' + instance_ids + '}' return do_post(endpoint, body, access_token)
python
def poweroff_vmss_vms(access_token, subscription_id, resource_group, vmss_name, instance_ids): '''Poweroff all the VMs in a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. instance_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/powerOff?api-version=', COMP_API]) body = '{"instanceIds" : ' + instance_ids + '}' return do_post(endpoint, body, access_token)
Poweroff all the VMs in a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. instance_ids (str): String representation of a JSON list of VM IDs. E.g. '[1,2]'. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L801-L820
gbowerman/azurerm
azurerm/computerp.py
put_vmss
def put_vmss(access_token, subscription_id, resource_group, vmss_name, vmss_body): '''Put VMSS body. Can be used to create or update a scale set. E.g. call get_vmss(), make changes to the body, call put_vmss(). Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vmss_body (dictionary): Body containining Returns: HTTP response. JSON body of the virtual machine scale set properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) body = json.dumps(vmss_body) return do_put(endpoint, body, access_token)
python
def put_vmss(access_token, subscription_id, resource_group, vmss_name, vmss_body): '''Put VMSS body. Can be used to create or update a scale set. E.g. call get_vmss(), make changes to the body, call put_vmss(). Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vmss_body (dictionary): Body containining Returns: HTTP response. JSON body of the virtual machine scale set properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) body = json.dumps(vmss_body) return do_put(endpoint, body, access_token)
Put VMSS body. Can be used to create or update a scale set. E.g. call get_vmss(), make changes to the body, call put_vmss(). Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vmss_body (dictionary): Body containining Returns: HTTP response. JSON body of the virtual machine scale set properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L823-L845
gbowerman/azurerm
azurerm/computerp.py
put_vmss_vm
def put_vmss_vm(access_token, subscription_id, resource_group, vmss_name, vm_id, vm_body): '''Update a VMSS VM. E.g. add/remove a data disk from a specifc VM in a scale set (preview). Note: Only currently enabled for Azure Canary regions. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vm_id (int): VM ID of VM to update vmss_body (dictionary): Body containining Returns: HTTP response. JSON body of the virtual machine scale set VM properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines/', str(vm_id), '?api-version=', COMP_API]) body = json.dumps(vm_body) return do_put(endpoint, body, access_token)
python
def put_vmss_vm(access_token, subscription_id, resource_group, vmss_name, vm_id, vm_body): '''Update a VMSS VM. E.g. add/remove a data disk from a specifc VM in a scale set (preview). Note: Only currently enabled for Azure Canary regions. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vm_id (int): VM ID of VM to update vmss_body (dictionary): Body containining Returns: HTTP response. JSON body of the virtual machine scale set VM properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines/', str(vm_id), '?api-version=', COMP_API]) body = json.dumps(vm_body) return do_put(endpoint, body, access_token)
Update a VMSS VM. E.g. add/remove a data disk from a specifc VM in a scale set (preview). Note: Only currently enabled for Azure Canary regions. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the new scale set. vm_id (int): VM ID of VM to update vmss_body (dictionary): Body containining Returns: HTTP response. JSON body of the virtual machine scale set VM properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L848-L870
gbowerman/azurerm
azurerm/computerp.py
scale_vmss
def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity): '''Change the instance count of an existing VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. capacity (int): New number of VMs. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) body = '{"sku":{"capacity":"' + str(capacity) + '"}}' return do_patch(endpoint, body, access_token)
python
def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity): '''Change the instance count of an existing VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. capacity (int): New number of VMs. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) body = '{"sku":{"capacity":"' + str(capacity) + '"}}' return do_patch(endpoint, body, access_token)
Change the instance count of an existing VM Scale Set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. capacity (int): New number of VMs. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L960-L978
gbowerman/azurerm
azurerm/computerp.py
start_vm
def start_vm(access_token, subscription_id, resource_group, vm_name): '''Start a virtual machine. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '/start', '?api-version=', COMP_API]) return do_post(endpoint, '', access_token)
python
def start_vm(access_token, subscription_id, resource_group, vm_name): '''Start a virtual machine. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '/start', '?api-version=', COMP_API]) return do_post(endpoint, '', access_token)
Start a virtual machine. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L981-L1000
gbowerman/azurerm
azurerm/computerp.py
update_vm
def update_vm(access_token, subscription_id, resource_group, vm_name, body): '''Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API]) return do_put(endpoint, body, access_token)
python
def update_vm(access_token, subscription_id, resource_group, vm_name, body): '''Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API]) return do_put(endpoint, body, access_token)
Update a virtual machine with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L1111-L1129
gbowerman/azurerm
azurerm/computerp.py
update_vmss
def update_vmss(access_token, subscription_id, resource_group, vmss_name, body): '''Update a VMSS with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM scale set. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_put(endpoint, body, access_token)
python
def update_vmss(access_token, subscription_id, resource_group, vmss_name, body): '''Update a VMSS with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM scale set. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_put(endpoint, body, access_token)
Update a VMSS with a new JSON body. E.g. do a GET, change something, call this. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. body (dict): JSON body of the VM scale set. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L1132-L1150
gbowerman/azurerm
examples/vmssupgrade.py
get_vm_ids_by_ud
def get_vm_ids_by_ud(access_token, subscription_id, resource_group, vmssname, updatedomain): '''look at VMSS VM instance view to get VM IDs by UD''' instance_viewlist = azurerm.list_vmss_vm_instance_view(access_token, subscription_id, resource_group, vmssname) # print(json.dumps(instance_viewlist, sort_keys=False, indent=2, separators=(',', ': '))) # loop through the instance view list, and build the vm id list of VMs in # the matching UD udinstancelist = [] for instance_view in instance_viewlist['value']: vmud = instance_view['properties']['instance_view']['platformUpdateDomain'] if vmud == updatedomain: udinstancelist.append(instance_view['instanceId']) udinstancelist.sort() return udinstancelist
python
def get_vm_ids_by_ud(access_token, subscription_id, resource_group, vmssname, updatedomain): '''look at VMSS VM instance view to get VM IDs by UD''' instance_viewlist = azurerm.list_vmss_vm_instance_view(access_token, subscription_id, resource_group, vmssname) # print(json.dumps(instance_viewlist, sort_keys=False, indent=2, separators=(',', ': '))) # loop through the instance view list, and build the vm id list of VMs in # the matching UD udinstancelist = [] for instance_view in instance_viewlist['value']: vmud = instance_view['properties']['instance_view']['platformUpdateDomain'] if vmud == updatedomain: udinstancelist.append(instance_view['instanceId']) udinstancelist.sort() return udinstancelist
look at VMSS VM instance view to get VM IDs by UD
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssupgrade.py#L10-L24
gbowerman/azurerm
examples/vmssupgrade.py
main
def main(): '''main routine''' # create parser arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-s', required=True, action='store', help='VM Scale Set name') arg_parser.add_argument('--resourcegroup', '-r', required=True, dest='resource_group', action='store', help='Resource group name') arg_parser.add_argument('--newversion', '-n', dest='newversion', action='store', help='New platform image version string') arg_parser.add_argument('--customuri', '-c', dest='customuri', action='store', help='New custom image URI string') arg_parser.add_argument('--updatedomain', '-u', dest='updatedomain', action='store', type=int, help='Update domain (int)') arg_parser.add_argument('--vmid', '-i', dest='vmid', action='store', type=int, help='Single VM ID (int)') arg_parser.add_argument('--vmlist', '-l', dest='vmlist', action='store', help='List of VM IDs e.g. "["1", "2"]"') arg_parser.add_argument('--nowait', '-w', action='store_true', default=False, help='Start upgrades and then exit without waiting') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') arg_parser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = arg_parser.parse_args() # switches to determine program behavior # go ahead and upgrade without waiting for confirmation when True noprompt = args.noprompt nowait = args.nowait # don't loop waiting for upgrade provisioning to complete when True verbose = args.verbose # print extra status information when True vmssname = args.vmssname resource_group = args.resource_group if args.newversion is not None: newversion = args.newversion storagemode = 'platform' elif args.customuri is not None: customuri = args.customuri storagemode = 'custom' else: arg_parser.error( 'You must specify a new version for platform images or a custom uri for custom images') if args.updatedomain is not None: updatedomain = args.updatedomain upgrademode = 'updatedomain' elif args.vmid is not None: vmid = args.vmid upgrademode = 'vmid' elif args.vmlist is not None: vmlist = args.vmlist upgrademode = 'vmlist' else: arg_parser.error( 'You must specify an update domain, a vm id, or a vm list') # Load Azure app defaults try: with open('vmssconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting vmssconfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get the vmss model vmssmodel = azurerm.get_vmss( access_token, subscription_id, resource_group, vmssname) # print(json.dumps(vmssmodel, sort_keys=False, indent=2, separators=(',', ': '))) if storagemode == 'platform': # check current version imgref = \ vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imagereference'] print('Current image reference in Scale Set model:') print(json.dumps(imgref, sort_keys=False, indent=2, separators=(',', ': '))) # compare current version with new version if imgref['version'] == newversion: print('Scale Set model version is already set to ' + newversion + ', skipping model update.') else: if not noprompt: response = input( 'Confirm version upgrade to: ' + newversion + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imagereference']['version'] = newversion # put the vmss model updateresult = azurerm.update_vmss( access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('OS version updated to ' + newversion + ' in model for VM Scale Set: ' + vmssname) else: # storagemode = custom # check current uri oldimageuri = \ vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] print('Current image URI in Scale Set model:' + oldimageuri) # compare current uri with new uri if oldimageuri == customuri: print('Scale Set model version is already set to ' + customuri + ', skipping model update.') else: if not noprompt: response = input('Confirm uri upgrade to: ' + customuri + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] = customuri # put the vmss model updateresult = azurerm.update_vmss( access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('Image URI updated to ' + customuri + ' in model for VM Scale Set: ' + vmssname) # build the list of VMs to upgrade depending on the upgrademode setting if upgrademode == 'updatedomain': # list the VMSS VM instance views to determine their update domains print('Examining the scale set..') udinstancelist = get_vm_ids_by_ud( access_token, subscription_id, resource_group, vmssname, updatedomain) print('VM instances in UD: ' + str(updatedomain) + ' to upgrade:') print(udinstancelist) vmids = json.dumps(udinstancelist) print('Upgrading VMs in UD: ' + str(updatedomain)) elif upgrademode == 'vmid': vmids = json.dumps([str(vmid)]) print('Upgrading VM ID: ' + str(vmid)) else: # upgrademode = vmlist vmids = vmlist print('Upgrading VM IDs: ' + vmlist) # do manualupgrade on the VMs in the list upgraderesult = azurerm.upgrade_vmss_vms( access_token, subscription_id, resource_group, vmssname, vmids) print(upgraderesult) # now wait for upgrade to complete # query VM scale set instance view if not nowait: updatecomplete = False provisioningstate = '' while not updatecomplete: vmssinstance_view = azurerm.get_vmss_instance_view( access_token, subscription_id, resource_group, vmssname) for status in vmssinstance_view['statuses']: provisioningstate = status['code'] if provisioningstate == 'ProvisioningState/succeeded': updatecomplete = True if verbose: print(provisioningstate) time.sleep(5) else: print('Check Scale Set provisioning state to determine when upgrade is complete.')
python
def main(): '''main routine''' # create parser arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-s', required=True, action='store', help='VM Scale Set name') arg_parser.add_argument('--resourcegroup', '-r', required=True, dest='resource_group', action='store', help='Resource group name') arg_parser.add_argument('--newversion', '-n', dest='newversion', action='store', help='New platform image version string') arg_parser.add_argument('--customuri', '-c', dest='customuri', action='store', help='New custom image URI string') arg_parser.add_argument('--updatedomain', '-u', dest='updatedomain', action='store', type=int, help='Update domain (int)') arg_parser.add_argument('--vmid', '-i', dest='vmid', action='store', type=int, help='Single VM ID (int)') arg_parser.add_argument('--vmlist', '-l', dest='vmlist', action='store', help='List of VM IDs e.g. "["1", "2"]"') arg_parser.add_argument('--nowait', '-w', action='store_true', default=False, help='Start upgrades and then exit without waiting') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show additional information') arg_parser.add_argument('-y', dest='noprompt', action='store_true', default=False, help='Do not prompt for confirmation') args = arg_parser.parse_args() # switches to determine program behavior # go ahead and upgrade without waiting for confirmation when True noprompt = args.noprompt nowait = args.nowait # don't loop waiting for upgrade provisioning to complete when True verbose = args.verbose # print extra status information when True vmssname = args.vmssname resource_group = args.resource_group if args.newversion is not None: newversion = args.newversion storagemode = 'platform' elif args.customuri is not None: customuri = args.customuri storagemode = 'custom' else: arg_parser.error( 'You must specify a new version for platform images or a custom uri for custom images') if args.updatedomain is not None: updatedomain = args.updatedomain upgrademode = 'updatedomain' elif args.vmid is not None: vmid = args.vmid upgrademode = 'vmid' elif args.vmlist is not None: vmlist = args.vmlist upgrademode = 'vmlist' else: arg_parser.error( 'You must specify an update domain, a vm id, or a vm list') # Load Azure app defaults try: with open('vmssconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting vmssconfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get the vmss model vmssmodel = azurerm.get_vmss( access_token, subscription_id, resource_group, vmssname) # print(json.dumps(vmssmodel, sort_keys=False, indent=2, separators=(',', ': '))) if storagemode == 'platform': # check current version imgref = \ vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imagereference'] print('Current image reference in Scale Set model:') print(json.dumps(imgref, sort_keys=False, indent=2, separators=(',', ': '))) # compare current version with new version if imgref['version'] == newversion: print('Scale Set model version is already set to ' + newversion + ', skipping model update.') else: if not noprompt: response = input( 'Confirm version upgrade to: ' + newversion + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imagereference']['version'] = newversion # put the vmss model updateresult = azurerm.update_vmss( access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('OS version updated to ' + newversion + ' in model for VM Scale Set: ' + vmssname) else: # storagemode = custom # check current uri oldimageuri = \ vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] print('Current image URI in Scale Set model:' + oldimageuri) # compare current uri with new uri if oldimageuri == customuri: print('Scale Set model version is already set to ' + customuri + ', skipping model update.') else: if not noprompt: response = input('Confirm uri upgrade to: ' + customuri + ' (y/n)') if response.lower() != 'y': sys.exit(1) # change the version vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] = customuri # put the vmss model updateresult = azurerm.update_vmss( access_token, subscription_id, resource_group, vmssname, json.dumps(vmssmodel)) if verbose: print(updateresult) print('Image URI updated to ' + customuri + ' in model for VM Scale Set: ' + vmssname) # build the list of VMs to upgrade depending on the upgrademode setting if upgrademode == 'updatedomain': # list the VMSS VM instance views to determine their update domains print('Examining the scale set..') udinstancelist = get_vm_ids_by_ud( access_token, subscription_id, resource_group, vmssname, updatedomain) print('VM instances in UD: ' + str(updatedomain) + ' to upgrade:') print(udinstancelist) vmids = json.dumps(udinstancelist) print('Upgrading VMs in UD: ' + str(updatedomain)) elif upgrademode == 'vmid': vmids = json.dumps([str(vmid)]) print('Upgrading VM ID: ' + str(vmid)) else: # upgrademode = vmlist vmids = vmlist print('Upgrading VM IDs: ' + vmlist) # do manualupgrade on the VMs in the list upgraderesult = azurerm.upgrade_vmss_vms( access_token, subscription_id, resource_group, vmssname, vmids) print(upgraderesult) # now wait for upgrade to complete # query VM scale set instance view if not nowait: updatecomplete = False provisioningstate = '' while not updatecomplete: vmssinstance_view = azurerm.get_vmss_instance_view( access_token, subscription_id, resource_group, vmssname) for status in vmssinstance_view['statuses']: provisioningstate = status['code'] if provisioningstate == 'ProvisioningState/succeeded': updatecomplete = True if verbose: print(provisioningstate) time.sleep(5) else: print('Check Scale Set provisioning state to determine when upgrade is complete.')
main routine
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssupgrade.py#L27-L196
gbowerman/azurerm
azurerm/graphfns.py
get_graph_token_from_msi
def get_graph_token_from_msi(): '''get a Microsoft Graph access token using Azure Cloud Shell's MSI_ENDPOINT. Notes: The auth token returned by this function is not an Azure auth token. Use it for querying the Microsoft Graph API. This function only works in an Azure cloud shell or virtual machine. Returns: A Microsoft Graph authentication token string. ''' if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: endpoint = os.environ['MSI_ENDPOINT'] else: return None headers = {'Metadata': 'true'} body = {"resource": 'https://' + GRAPH_RESOURCE_HOST + '/'} ret = requests.post(endpoint, headers=headers, data=body) return ret.json()['access_token']
python
def get_graph_token_from_msi(): '''get a Microsoft Graph access token using Azure Cloud Shell's MSI_ENDPOINT. Notes: The auth token returned by this function is not an Azure auth token. Use it for querying the Microsoft Graph API. This function only works in an Azure cloud shell or virtual machine. Returns: A Microsoft Graph authentication token string. ''' if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: endpoint = os.environ['MSI_ENDPOINT'] else: return None headers = {'Metadata': 'true'} body = {"resource": 'https://' + GRAPH_RESOURCE_HOST + '/'} ret = requests.post(endpoint, headers=headers, data=body) return ret.json()['access_token']
get a Microsoft Graph access token using Azure Cloud Shell's MSI_ENDPOINT. Notes: The auth token returned by this function is not an Azure auth token. Use it for querying the Microsoft Graph API. This function only works in an Azure cloud shell or virtual machine. Returns: A Microsoft Graph authentication token string.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/graphfns.py#L11-L30
gbowerman/azurerm
azurerm/graphfns.py
get_object_id_from_graph
def get_object_id_from_graph(access_token=None): '''Return the object ID for the Graph user who owns the access token. Args: access_token (str): A Microsoft Graph access token. (Not an Azure access token.) If not provided, attempt to get it from MSI_ENDPOINT. Returns: An object ID string for a user or service principal. ''' if access_token is None: access_token = get_graph_token_from_msi() endpoint = 'https://' + GRAPH_RESOURCE_HOST + '/v1.0/me/' headers = {'Authorization': 'Bearer ' + access_token, 'Host': GRAPH_RESOURCE_HOST} ret = requests.get(endpoint, headers=headers) return ret.json()['id']
python
def get_object_id_from_graph(access_token=None): '''Return the object ID for the Graph user who owns the access token. Args: access_token (str): A Microsoft Graph access token. (Not an Azure access token.) If not provided, attempt to get it from MSI_ENDPOINT. Returns: An object ID string for a user or service principal. ''' if access_token is None: access_token = get_graph_token_from_msi() endpoint = 'https://' + GRAPH_RESOURCE_HOST + '/v1.0/me/' headers = {'Authorization': 'Bearer ' + access_token, 'Host': GRAPH_RESOURCE_HOST} ret = requests.get(endpoint, headers=headers) return ret.json()['id']
Return the object ID for the Graph user who owns the access token. Args: access_token (str): A Microsoft Graph access token. (Not an Azure access token.) If not provided, attempt to get it from MSI_ENDPOINT. Returns: An object ID string for a user or service principal.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/graphfns.py#L33-L49
gbowerman/azurerm
azurerm/subfns.py
get_subscription_from_cli
def get_subscription_from_cli(name=None): '''Get the default, or named, subscription id from CLI's local cache. Args: name (str): Optional subscription name. If this is set, the subscription id of the named subscription is returned from the CLI cache if present. If not set, the subscription id of the default subscription is returned. Returns: Azure subscription ID string. Requirements: User has run 'az login' once, or is in Azure Cloud Shell. ''' home = os.path.expanduser('~') azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json' if os.path.isfile(azure_profile_path) is False: print('Error from get_subscription_from_cli(): Cannot find ' + azure_profile_path) return None with io.open(azure_profile_path, 'r', encoding='utf-8-sig') as azure_profile_fd: azure_profile = json.load(azure_profile_fd) for subscription_info in azure_profile['subscriptions']: if (name is None and subscription_info['isDefault'] is True) or \ subscription_info['name'] == name: return subscription_info['id'] return None
python
def get_subscription_from_cli(name=None): '''Get the default, or named, subscription id from CLI's local cache. Args: name (str): Optional subscription name. If this is set, the subscription id of the named subscription is returned from the CLI cache if present. If not set, the subscription id of the default subscription is returned. Returns: Azure subscription ID string. Requirements: User has run 'az login' once, or is in Azure Cloud Shell. ''' home = os.path.expanduser('~') azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json' if os.path.isfile(azure_profile_path) is False: print('Error from get_subscription_from_cli(): Cannot find ' + azure_profile_path) return None with io.open(azure_profile_path, 'r', encoding='utf-8-sig') as azure_profile_fd: azure_profile = json.load(azure_profile_fd) for subscription_info in azure_profile['subscriptions']: if (name is None and subscription_info['isDefault'] is True) or \ subscription_info['name'] == name: return subscription_info['id'] return None
Get the default, or named, subscription id from CLI's local cache. Args: name (str): Optional subscription name. If this is set, the subscription id of the named subscription is returned from the CLI cache if present. If not set, the subscription id of the default subscription is returned. Returns: Azure subscription ID string. Requirements: User has run 'az login' once, or is in Azure Cloud Shell.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/subfns.py#L10-L36
gbowerman/azurerm
azurerm/subfns.py
list_locations
def list_locations(access_token, subscription_id): '''List available locations for a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of locations. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/locations?api-version=', BASE_API]) return do_get(endpoint, access_token)
python
def list_locations(access_token, subscription_id): '''List available locations for a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of locations. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/locations?api-version=', BASE_API]) return do_get(endpoint, access_token)
List available locations for a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of locations.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/subfns.py#L39-L52
gbowerman/azurerm
examples/deploytemplate.py
main
def main(): '''Main routine.''' # validate command line arguments argparser = argparse.ArgumentParser() argparser.add_argument('--uri', '-u', required=True, action='store', help='Template URI') argparser.add_argument('--params', '-f', required=True, action='store', help='Parameters json file') argparser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') argparser.add_argument('--rg', '-g', required=False, action='store', help='Resource Group name') argparser.add_argument('--sub', '-s', required=False, action='store', help='Subscription ID') argparser.add_argument('--genparams', '-p', required=False, action='store', help='Comma separated list of parameters to generate strings for') argparser.add_argument('--wait', '-w', required=False, action='store_true', default=False, help='Wait for deployment to complete and time it') argparser.add_argument('--debug', '-d', required=False, action='store_true', default=False, help='Debug mode: print additional deployment') args = argparser.parse_args() template_uri = args.uri params = args.params rgname = args.rg location = args.location subscription_id = args.sub # Load Azure app defaults try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] if subscription_id is None: subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # load parameters file try: with open(params) as params_file: param_data = json.load(params_file) except FileNotFoundError: sys.exit('Error: Expecting ' + params + ' in current folder') # prep Haikunator haikunator = Haikunator() # if there is a genparams argument generate values and merge the list if args.genparams is not None: newdict = {} genlist = args.genparams.split(',') for param in genlist: # generate a random prhase, include caps and puncs in case it's a passwd newval = haikunator.haikunate(delimiter='-').title() newdict[param] = {'value': newval} params = {**param_data, **newdict} else: params = param_data # create resource group if not specified if rgname is None: rgname = haikunator.haikunate() ret = azurerm.create_resource_group( access_token, subscription_id, rgname, location) print('Creating resource group: ' + rgname + ', location:', location + ', return code:', ret) deployment_name = haikunator.haikunate() # measure time from beginning of deployment call (after creating resource group etc.) start_time = time.time() # deploy template and print response deploy_return = azurerm.deploy_template_uri( access_token, subscription_id, rgname, deployment_name, template_uri, params) print('Deployment name: ' + deployment_name + ', return code:', deploy_return) if 'Response [20' not in str(deploy_return): print('Return from deployment: ', deploy_return.text) sys.exit('Deployment failed. Exiting.. ') if args.debug is True: print(json.dumps(deploy_return.json(), sort_keys=False, indent=2, separators=(',', ': '))) # show deployment status if args.debug is True: print('Deployment status:') deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) print(json.dumps(deploy_return, sort_keys=False, indent=2, separators=(',', ': '))) # wait for deployment to complete if args.wait is True: print('Waiting for provisioning to complete..') provisioning_state = '' try: while True: time.sleep(10) deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) provisioning_state = deploy_return['properties']['provisioningState'] if provisioning_state != 'Running': break print('Provisioning state:', provisioning_state) except KeyError: print('Deployment failure:', deploy_return) elapsed_time = time.time() - start_time print('Elapsed time:', elapsed_time)
python
def main(): '''Main routine.''' # validate command line arguments argparser = argparse.ArgumentParser() argparser.add_argument('--uri', '-u', required=True, action='store', help='Template URI') argparser.add_argument('--params', '-f', required=True, action='store', help='Parameters json file') argparser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') argparser.add_argument('--rg', '-g', required=False, action='store', help='Resource Group name') argparser.add_argument('--sub', '-s', required=False, action='store', help='Subscription ID') argparser.add_argument('--genparams', '-p', required=False, action='store', help='Comma separated list of parameters to generate strings for') argparser.add_argument('--wait', '-w', required=False, action='store_true', default=False, help='Wait for deployment to complete and time it') argparser.add_argument('--debug', '-d', required=False, action='store_true', default=False, help='Debug mode: print additional deployment') args = argparser.parse_args() template_uri = args.uri params = args.params rgname = args.rg location = args.location subscription_id = args.sub # Load Azure app defaults try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] if subscription_id is None: subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # load parameters file try: with open(params) as params_file: param_data = json.load(params_file) except FileNotFoundError: sys.exit('Error: Expecting ' + params + ' in current folder') # prep Haikunator haikunator = Haikunator() # if there is a genparams argument generate values and merge the list if args.genparams is not None: newdict = {} genlist = args.genparams.split(',') for param in genlist: # generate a random prhase, include caps and puncs in case it's a passwd newval = haikunator.haikunate(delimiter='-').title() newdict[param] = {'value': newval} params = {**param_data, **newdict} else: params = param_data # create resource group if not specified if rgname is None: rgname = haikunator.haikunate() ret = azurerm.create_resource_group( access_token, subscription_id, rgname, location) print('Creating resource group: ' + rgname + ', location:', location + ', return code:', ret) deployment_name = haikunator.haikunate() # measure time from beginning of deployment call (after creating resource group etc.) start_time = time.time() # deploy template and print response deploy_return = azurerm.deploy_template_uri( access_token, subscription_id, rgname, deployment_name, template_uri, params) print('Deployment name: ' + deployment_name + ', return code:', deploy_return) if 'Response [20' not in str(deploy_return): print('Return from deployment: ', deploy_return.text) sys.exit('Deployment failed. Exiting.. ') if args.debug is True: print(json.dumps(deploy_return.json(), sort_keys=False, indent=2, separators=(',', ': '))) # show deployment status if args.debug is True: print('Deployment status:') deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) print(json.dumps(deploy_return, sort_keys=False, indent=2, separators=(',', ': '))) # wait for deployment to complete if args.wait is True: print('Waiting for provisioning to complete..') provisioning_state = '' try: while True: time.sleep(10) deploy_return = azurerm.show_deployment( access_token, subscription_id, rgname, deployment_name) provisioning_state = deploy_return['properties']['provisioningState'] if provisioning_state != 'Running': break print('Provisioning state:', provisioning_state) except KeyError: print('Deployment failure:', deploy_return) elapsed_time = time.time() - start_time print('Elapsed time:', elapsed_time)
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/deploytemplate.py#L14-L131
gbowerman/azurerm
azurerm/cosmosdbrp.py
create_cosmosdb_account
def create_cosmosdb_account(access_token, subscription_id, rgname, account_name, location, cosmosdb_kind): '''Create a new Cosmos DB account in the named resource group, with the named location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new Cosmos DB account. location (str): Azure data center location. E.g. westus. cosmosdb_kind (str): Database type. E.g. GlobalDocumentDB. Returns: HTTP response. JSON body of storage account properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '?api-version=', COSMOSDB_API]) cosmosdb_body = {'location': location, 'kind': cosmosdb_kind, 'properties': {'databaseAccountOfferType': 'Standard', 'locations': [{'failoverPriority': 0, 'locationName': location}]}} body = json.dumps(cosmosdb_body) return do_put(endpoint, body, access_token)
python
def create_cosmosdb_account(access_token, subscription_id, rgname, account_name, location, cosmosdb_kind): '''Create a new Cosmos DB account in the named resource group, with the named location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new Cosmos DB account. location (str): Azure data center location. E.g. westus. cosmosdb_kind (str): Database type. E.g. GlobalDocumentDB. Returns: HTTP response. JSON body of storage account properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '?api-version=', COSMOSDB_API]) cosmosdb_body = {'location': location, 'kind': cosmosdb_kind, 'properties': {'databaseAccountOfferType': 'Standard', 'locations': [{'failoverPriority': 0, 'locationName': location}]}} body = json.dumps(cosmosdb_body) return do_put(endpoint, body, access_token)
Create a new Cosmos DB account in the named resource group, with the named location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the new Cosmos DB account. location (str): Azure data center location. E.g. westus. cosmosdb_kind (str): Database type. E.g. GlobalDocumentDB. Returns: HTTP response. JSON body of storage account properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/cosmosdbrp.py#L8-L35
gbowerman/azurerm
azurerm/cosmosdbrp.py
get_cosmosdb_account_keys
def get_cosmosdb_account_keys(access_token, subscription_id, rgname, account_name): '''Get the access keys for the specified Cosmos DB account. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the Cosmos DB account. Returns: HTTP response. JSON body of Cosmos DB account keys. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '/listKeys', '?api-version=', COSMOSDB_API]) return do_post(endpoint, '', access_token)
python
def get_cosmosdb_account_keys(access_token, subscription_id, rgname, account_name): '''Get the access keys for the specified Cosmos DB account. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the Cosmos DB account. Returns: HTTP response. JSON body of Cosmos DB account keys. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '/listKeys', '?api-version=', COSMOSDB_API]) return do_post(endpoint, '', access_token)
Get the access keys for the specified Cosmos DB account. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. account_name (str): Name of the Cosmos DB account. Returns: HTTP response. JSON body of Cosmos DB account keys.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/cosmosdbrp.py#L38-L56
gbowerman/azurerm
azurerm/keyvault.py
create_keyvault
def create_keyvault(access_token, subscription_id, rgname, vault_name, location, template_deployment=True, tenant_id=None, object_id=None): '''Create a new key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. location (str): Azure data center location. E.g. westus2. template_deployment (boolean): Whether to allow deployment from template. tenant_id (str): Optionally specify a tenant ID (otherwise picks first response) from ist_tenants(). object_id (str): Optionally specify an object ID representing user or principal for the access policy. Returns: HTTP response. JSON body of key vault properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) # get tenant ID if not specified if tenant_id is None: ret = list_tenants(access_token) tenant_id = ret['value'][0]['tenantId'] # if object_id is None: access_policies = [{'tenantId': tenant_id, 'objectId': object_id, 'permissions': { 'keys': ['get', 'create', 'delete', 'list', 'update', 'import', 'backup', 'restore', 'recover'], 'secrets': ['get', 'list', 'set', 'delete', 'backup', 'restore', 'recover'], 'certificates': ['get', 'list', 'delete', 'create', 'import', 'update', 'managecontacts', 'getissuers', 'listissuers', 'setissuers', 'deleteissuers', 'manageissuers', 'recover'], 'storage': ['get', 'list', 'delete', 'set', 'update', 'regeneratekey', 'setsas', 'listsas', 'getsas', 'deletesas'] }}] vault_properties = {'tenantId': tenant_id, 'sku': {'family': 'A', 'name': 'standard'}, 'enabledForTemplateDeployment': template_deployment, 'accessPolicies': access_policies} vault_body = {'location': location, 'properties': vault_properties} body = json.dumps(vault_body) return do_put(endpoint, body, access_token)
python
def create_keyvault(access_token, subscription_id, rgname, vault_name, location, template_deployment=True, tenant_id=None, object_id=None): '''Create a new key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. location (str): Azure data center location. E.g. westus2. template_deployment (boolean): Whether to allow deployment from template. tenant_id (str): Optionally specify a tenant ID (otherwise picks first response) from ist_tenants(). object_id (str): Optionally specify an object ID representing user or principal for the access policy. Returns: HTTP response. JSON body of key vault properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) # get tenant ID if not specified if tenant_id is None: ret = list_tenants(access_token) tenant_id = ret['value'][0]['tenantId'] # if object_id is None: access_policies = [{'tenantId': tenant_id, 'objectId': object_id, 'permissions': { 'keys': ['get', 'create', 'delete', 'list', 'update', 'import', 'backup', 'restore', 'recover'], 'secrets': ['get', 'list', 'set', 'delete', 'backup', 'restore', 'recover'], 'certificates': ['get', 'list', 'delete', 'create', 'import', 'update', 'managecontacts', 'getissuers', 'listissuers', 'setissuers', 'deleteissuers', 'manageissuers', 'recover'], 'storage': ['get', 'list', 'delete', 'set', 'update', 'regeneratekey', 'setsas', 'listsas', 'getsas', 'deletesas'] }}] vault_properties = {'tenantId': tenant_id, 'sku': {'family': 'A', 'name': 'standard'}, 'enabledForTemplateDeployment': template_deployment, 'accessPolicies': access_policies} vault_body = {'location': location, 'properties': vault_properties} body = json.dumps(vault_body) return do_put(endpoint, body, access_token)
Create a new key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. location (str): Azure data center location. E.g. westus2. template_deployment (boolean): Whether to allow deployment from template. tenant_id (str): Optionally specify a tenant ID (otherwise picks first response) from ist_tenants(). object_id (str): Optionally specify an object ID representing user or principal for the access policy. Returns: HTTP response. JSON body of key vault properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L9-L56
gbowerman/azurerm
azurerm/keyvault.py
delete_keyvault
def delete_keyvault(access_token, subscription_id, rgname, vault_name): '''Deletes a key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_delete(endpoint, access_token)
python
def delete_keyvault(access_token, subscription_id, rgname, vault_name): '''Deletes a key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_delete(endpoint, access_token)
Deletes a key vault in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the new key vault. Returns: HTTP response. 200 OK.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L59-L76
gbowerman/azurerm
azurerm/keyvault.py
get_keyvault
def get_keyvault(access_token, subscription_id, rgname, vault_name): '''Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_get(endpoint, access_token)
python
def get_keyvault(access_token, subscription_id, rgname, vault_name): '''Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults/', vault_name, '?api-version=', KEYVAULT_API]) return do_get(endpoint, access_token)
Gets details about the named key vault. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. vault_name (str): Name of the key vault. Returns: HTTP response. JSON body of key vault properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L79-L96
gbowerman/azurerm
azurerm/keyvault.py
list_keyvaults
def list_keyvaults(access_token, subscription_id, rgname): '''Lists key vaults in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
python
def list_keyvaults(access_token, subscription_id, rgname): '''Lists key vaults in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
Lists key vaults in the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. 200 OK.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L99-L115
gbowerman/azurerm
azurerm/keyvault.py
list_keyvaults_sub
def list_keyvaults_sub(access_token, subscription_id): '''Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
python
def list_keyvaults_sub(access_token, subscription_id): '''Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.KeyVault/vaults', '?api-version=', KEYVAULT_API]) return do_get_next(endpoint, access_token)
Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L118-L132
gbowerman/azurerm
azurerm/keyvault.py
set_keyvault_secret
def set_keyvault_secret(access_token, vault_uri, secret_name, secret_value): '''Adds a secret to a key vault using the key vault URI. Creates a new version if the secret already exists. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net. secret_name (str): Name of the secret to add. secret_value (str): Value of the secret. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([vault_uri, '/secrets/', secret_name, '?api-version=', '7.0']) current_time = datetime.datetime.now().isoformat() attributes = {'created': current_time, 'enabled': True, 'exp': None, 'nbf': None, 'recoveryLevel': 'Purgeable', 'updated': current_time} secret_body = {'attributes': attributes, 'contentType': None, 'kid': None, 'managed': None, 'tags': {'file-encoding': 'utf-8'}, 'value': secret_value} body = json.dumps(secret_body) print(body) return do_put(endpoint, body, access_token)
python
def set_keyvault_secret(access_token, vault_uri, secret_name, secret_value): '''Adds a secret to a key vault using the key vault URI. Creates a new version if the secret already exists. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net. secret_name (str): Name of the secret to add. secret_value (str): Value of the secret. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([vault_uri, '/secrets/', secret_name, '?api-version=', '7.0']) current_time = datetime.datetime.now().isoformat() attributes = {'created': current_time, 'enabled': True, 'exp': None, 'nbf': None, 'recoveryLevel': 'Purgeable', 'updated': current_time} secret_body = {'attributes': attributes, 'contentType': None, 'kid': None, 'managed': None, 'tags': {'file-encoding': 'utf-8'}, 'value': secret_value} body = json.dumps(secret_body) print(body) return do_put(endpoint, body, access_token)
Adds a secret to a key vault using the key vault URI. Creates a new version if the secret already exists. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.vault.azure.net. secret_name (str): Name of the secret to add. secret_value (str): Value of the secret. Returns: HTTP response. 200 OK.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L135-L166
gbowerman/azurerm
azurerm/keyvault.py
delete_keyvault_secret
def delete_keyvault_secret(access_token, vault_uri, secret_name): '''Deletes a secret from a key vault using the key vault URI. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.azure.net. secret_name (str): Name of the secret to add. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([vault_uri, '/secrets/', secret_name, '?api-version=', '7.0']) return do_delete(endpoint, access_token)
python
def delete_keyvault_secret(access_token, vault_uri, secret_name): '''Deletes a secret from a key vault using the key vault URI. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.azure.net. secret_name (str): Name of the secret to add. Returns: HTTP response. 200 OK. ''' endpoint = ''.join([vault_uri, '/secrets/', secret_name, '?api-version=', '7.0']) return do_delete(endpoint, access_token)
Deletes a secret from a key vault using the key vault URI. Args: access_token (str): A valid Azure authentication token. vault_uri (str): Vault URI e.g. https://myvault.azure.net. secret_name (str): Name of the secret to add. Returns: HTTP response. 200 OK.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/keyvault.py#L169-L183
gbowerman/azurerm
azurerm/insightsrp.py
create_autoscale_rule
def create_autoscale_rule(subscription_id, resource_group, vmss_name, metric_name, operator, threshold, direction, change_count, time_grain='PT1M', time_window='PT5M', cool_down='PT1M'): '''Create a new autoscale rule - pass the output in a list to create_autoscale_setting(). Args: subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of scale set to apply scale events to. metric_name (str): Name of metric being evaluated. operator (str): Operator to evaluate. E.g. "GreaterThan". threshold (str): Threshold to trigger action. direction (str): Direction of action. E.g. Increase. change_count (str): How many to increase or decrease by. time_grain (str): Optional. Measurement granularity. Default 'PT1M'. time_window (str): Optional. Range of time to collect data over. Default 'PT5M'. cool_down (str): Optional. Time to wait after last scaling action. ISO 8601 format. Default 'PT1M'. Returns: HTTP response. JSON body of autoscale setting. ''' metric_trigger = {'metricName': metric_name} metric_trigger['metricNamespace'] = '' metric_trigger['metricResourceUri'] = '/subscriptions/' + subscription_id + \ '/resourceGroups/' + resource_group + \ '/providers/Microsoft.Compute/virtualMachineScaleSets/' + vmss_name metric_trigger['timeGrain'] = time_grain metric_trigger['statistic'] = 'Average' metric_trigger['timeWindow'] = time_window metric_trigger['timeAggregation'] = 'Average' metric_trigger['operator'] = operator metric_trigger['threshold'] = threshold scale_action = {'direction': direction} scale_action['type'] = 'ChangeCount' scale_action['value'] = str(change_count) scale_action['cooldown'] = cool_down new_rule = {'metricTrigger': metric_trigger} new_rule['scaleAction'] = scale_action return new_rule
python
def create_autoscale_rule(subscription_id, resource_group, vmss_name, metric_name, operator, threshold, direction, change_count, time_grain='PT1M', time_window='PT5M', cool_down='PT1M'): '''Create a new autoscale rule - pass the output in a list to create_autoscale_setting(). Args: subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of scale set to apply scale events to. metric_name (str): Name of metric being evaluated. operator (str): Operator to evaluate. E.g. "GreaterThan". threshold (str): Threshold to trigger action. direction (str): Direction of action. E.g. Increase. change_count (str): How many to increase or decrease by. time_grain (str): Optional. Measurement granularity. Default 'PT1M'. time_window (str): Optional. Range of time to collect data over. Default 'PT5M'. cool_down (str): Optional. Time to wait after last scaling action. ISO 8601 format. Default 'PT1M'. Returns: HTTP response. JSON body of autoscale setting. ''' metric_trigger = {'metricName': metric_name} metric_trigger['metricNamespace'] = '' metric_trigger['metricResourceUri'] = '/subscriptions/' + subscription_id + \ '/resourceGroups/' + resource_group + \ '/providers/Microsoft.Compute/virtualMachineScaleSets/' + vmss_name metric_trigger['timeGrain'] = time_grain metric_trigger['statistic'] = 'Average' metric_trigger['timeWindow'] = time_window metric_trigger['timeAggregation'] = 'Average' metric_trigger['operator'] = operator metric_trigger['threshold'] = threshold scale_action = {'direction': direction} scale_action['type'] = 'ChangeCount' scale_action['value'] = str(change_count) scale_action['cooldown'] = cool_down new_rule = {'metricTrigger': metric_trigger} new_rule['scaleAction'] = scale_action return new_rule
Create a new autoscale rule - pass the output in a list to create_autoscale_setting(). Args: subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of scale set to apply scale events to. metric_name (str): Name of metric being evaluated. operator (str): Operator to evaluate. E.g. "GreaterThan". threshold (str): Threshold to trigger action. direction (str): Direction of action. E.g. Increase. change_count (str): How many to increase or decrease by. time_grain (str): Optional. Measurement granularity. Default 'PT1M'. time_window (str): Optional. Range of time to collect data over. Default 'PT5M'. cool_down (str): Optional. Time to wait after last scaling action. ISO 8601 format. Default 'PT1M'. Returns: HTTP response. JSON body of autoscale setting.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L8-L46
gbowerman/azurerm
azurerm/insightsrp.py
create_autoscale_setting
def create_autoscale_setting(access_token, subscription_id, resource_group, setting_name, vmss_name, location, minval, maxval, default, autoscale_rules, notify=None): '''Create a new autoscale setting for a scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. setting_name (str): Name of the autoscale setting. vmss_name (str): Name of scale set to apply scale events to. location (str): Azure data center location. E.g. westus. minval (int): Minimum number of VMs. maxval (int): Maximum number of VMs. default (int): Default VM number when no data available. autoscale_rules (list): List of outputs from create_autoscale_rule(). notify (str): Optional. Returns: HTTP response. JSON body of autoscale setting. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/autoscaleSettings/', setting_name, '?api-version=', INSIGHTS_API]) autoscale_setting = {'location': location} profile = {'name': 'Profile1'} capacity = {'minimum': str(minval)} capacity['maximum'] = str(maxval) capacity['default'] = str(default) profile['capacity'] = capacity profile['rules'] = autoscale_rules profiles = [profile] properties = {'name': setting_name} properties['profiles'] = profiles properties['targetResourceUri'] = '/subscriptions/' + subscription_id + \ '/resourceGroups/' + resource_group + \ '/providers/Microsoft.Compute/virtualMachineScaleSets/' + vmss_name properties['enabled'] = True if notify is not None: notification = {'operation': 'Scale'} email = {'sendToSubscriptionAdministrato': False} email['sendToSubscriptionCoAdministrators'] = False email['customEmails'] = [notify] notification = {'email': email} properties['notifications'] = [notification] autoscale_setting['properties'] = properties body = json.dumps(autoscale_setting) return do_put(endpoint, body, access_token)
python
def create_autoscale_setting(access_token, subscription_id, resource_group, setting_name, vmss_name, location, minval, maxval, default, autoscale_rules, notify=None): '''Create a new autoscale setting for a scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. setting_name (str): Name of the autoscale setting. vmss_name (str): Name of scale set to apply scale events to. location (str): Azure data center location. E.g. westus. minval (int): Minimum number of VMs. maxval (int): Maximum number of VMs. default (int): Default VM number when no data available. autoscale_rules (list): List of outputs from create_autoscale_rule(). notify (str): Optional. Returns: HTTP response. JSON body of autoscale setting. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/autoscaleSettings/', setting_name, '?api-version=', INSIGHTS_API]) autoscale_setting = {'location': location} profile = {'name': 'Profile1'} capacity = {'minimum': str(minval)} capacity['maximum'] = str(maxval) capacity['default'] = str(default) profile['capacity'] = capacity profile['rules'] = autoscale_rules profiles = [profile] properties = {'name': setting_name} properties['profiles'] = profiles properties['targetResourceUri'] = '/subscriptions/' + subscription_id + \ '/resourceGroups/' + resource_group + \ '/providers/Microsoft.Compute/virtualMachineScaleSets/' + vmss_name properties['enabled'] = True if notify is not None: notification = {'operation': 'Scale'} email = {'sendToSubscriptionAdministrato': False} email['sendToSubscriptionCoAdministrators'] = False email['customEmails'] = [notify] notification = {'email': email} properties['notifications'] = [notification] autoscale_setting['properties'] = properties body = json.dumps(autoscale_setting) return do_put(endpoint, body, access_token)
Create a new autoscale setting for a scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. setting_name (str): Name of the autoscale setting. vmss_name (str): Name of scale set to apply scale events to. location (str): Azure data center location. E.g. westus. minval (int): Minimum number of VMs. maxval (int): Maximum number of VMs. default (int): Default VM number when no data available. autoscale_rules (list): List of outputs from create_autoscale_rule(). notify (str): Optional. Returns: HTTP response. JSON body of autoscale setting.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L49-L97
gbowerman/azurerm
azurerm/insightsrp.py
list_autoscale_settings
def list_autoscale_settings(access_token, subscription_id): '''List the autoscale settings in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of autoscale settings. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/', '/autoscaleSettings?api-version=', INSIGHTS_API]) return do_get(endpoint, access_token)
python
def list_autoscale_settings(access_token, subscription_id): '''List the autoscale settings in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of autoscale settings. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/', '/autoscaleSettings?api-version=', INSIGHTS_API]) return do_get(endpoint, access_token)
List the autoscale settings in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of autoscale settings.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L100-L114
gbowerman/azurerm
azurerm/insightsrp.py
list_insights_components
def list_insights_components(access_token, subscription_id, resource_group): '''List the Microsoft Insights components in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of components. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/', '/components?api-version=', INSIGHTS_COMPONENTS_API]) return do_get(endpoint, access_token)
python
def list_insights_components(access_token, subscription_id, resource_group): '''List the Microsoft Insights components in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of components. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/microsoft.insights/', '/components?api-version=', INSIGHTS_COMPONENTS_API]) return do_get(endpoint, access_token)
List the Microsoft Insights components in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of components.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L117-L133
gbowerman/azurerm
azurerm/insightsrp.py
list_metric_defs_for_resource
def list_metric_defs_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name): '''List the monitoring metric definitions for a resource. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. resource_provider (str): Type of resource provider. resource_type (str): Type of resource. resource_name (str): Name of resource. Returns: HTTP response. JSON body of metric definitions. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metricdefinitions?api-version=', INSIGHTS_METRICS_API]) return do_get(endpoint, access_token)
python
def list_metric_defs_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name): '''List the monitoring metric definitions for a resource. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. resource_provider (str): Type of resource provider. resource_type (str): Type of resource. resource_name (str): Name of resource. Returns: HTTP response. JSON body of metric definitions. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metricdefinitions?api-version=', INSIGHTS_METRICS_API]) return do_get(endpoint, access_token)
List the monitoring metric definitions for a resource. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. resource_provider (str): Type of resource provider. resource_type (str): Type of resource. resource_name (str): Name of resource. Returns: HTTP response. JSON body of metric definitions.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L136-L159
gbowerman/azurerm
azurerm/insightsrp.py
get_metrics_for_resource
def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name): '''Get the monitoring metrics for a resource. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. resource_type (str): Type of resource. resource_name (str): Name of resource. Returns: HTTP response. JSON body of resource metrics. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metrics?api-version=', INSIGHTS_PREVIEW_API]) return do_get(endpoint, access_token)
python
def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name): '''Get the monitoring metrics for a resource. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. resource_type (str): Type of resource. resource_name (str): Name of resource. Returns: HTTP response. JSON body of resource metrics. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metrics?api-version=', INSIGHTS_PREVIEW_API]) return do_get(endpoint, access_token)
Get the monitoring metrics for a resource. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. resource_type (str): Type of resource. resource_name (str): Name of resource. Returns: HTTP response. JSON body of resource metrics.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L162-L184
gbowerman/azurerm
azurerm/insightsrp.py
get_events_for_subscription
def get_events_for_subscription(access_token, subscription_id, start_timestamp): '''Get the insights evens for a subsctipion since the specific timestamp. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'. Returns: HTTP response. JSON body of insights events. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/eventtypes/management/values?api-version=', INSIGHTS_API, '&$filter=eventTimestamp ge \'', start_timestamp, '\'']) return do_get(endpoint, access_token)
python
def get_events_for_subscription(access_token, subscription_id, start_timestamp): '''Get the insights evens for a subsctipion since the specific timestamp. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'. Returns: HTTP response. JSON body of insights events. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/microsoft.insights/eventtypes/management/values?api-version=', INSIGHTS_API, '&$filter=eventTimestamp ge \'', start_timestamp, '\'']) return do_get(endpoint, access_token)
Get the insights evens for a subsctipion since the specific timestamp. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. start_timestamp (str): timestamp to get events from. E.g. '2017-05-01T00:00:00.0000000Z'. Returns: HTTP response. JSON body of insights events.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/insightsrp.py#L187-L202
gbowerman/azurerm
examples/create_vm.py
main
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--name', '-n', required=True, action='store', help='Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.name rgname = args.rgname location = args.location # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # initialize haikunator hkn = Haikunator() # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location) nsg_id = rmreturn.json()['id'] print('nsg_id = ' + nsg_id) # create NSG rule nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule, description='ssh rule', destination_range='22') print(rmreturn) print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) # create VNET vnetname = name + 'vnet' print('Creating VNet: ' + vnetname) rmreturn = azurerm.create_vnet(access_token, subscription_id, rgname, vnetname, location, nsg_id=nsg_id) print(rmreturn) # print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) subnet_id = rmreturn.json()['properties']['subnets'][0]['id'] print('subnet_id = ' + subnet_id) # create public IP address public_ip_name = name + 'ip' dns_label = name + 'ip' print('Creating public IP address: ' + public_ip_name) rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name, dns_label, location) print(rmreturn) ip_id = rmreturn.json()['id'] print('ip_id = ' + ip_id) print('Waiting for IP provisioning..') waiting = True while waiting: ipa = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name) if ipa['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create NIC nic_name = name + 'nic' print('Creating NIC: ' + nic_name) rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id, subnet_id, location) #print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) nic_id = rmreturn.json()['id'] print('Waiting for NIC provisioning..') waiting = True while waiting: nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name) if nic['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create VM vm_name = name vm_size = 'Standard_D1' publisher = 'CoreOS' offer = 'CoreOS' sku = 'Stable' version = 'latest' username = 'azure' password = hkn.haikunate(delimiter=',') # creates random password print('password = ' + password) print('Creating VM: ' + vm_name) rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vm_size, publisher, offer, sku, version, nic_id, location, username=username, password=password) print(rmreturn) print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
python
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--name', '-n', required=True, action='store', help='Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--location', '-l', required=True, action='store', help='Location, e.g. eastus') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.name rgname = args.rgname location = args.location # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # initialize haikunator hkn = Haikunator() # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location) nsg_id = rmreturn.json()['id'] print('nsg_id = ' + nsg_id) # create NSG rule nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule, description='ssh rule', destination_range='22') print(rmreturn) print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) # create VNET vnetname = name + 'vnet' print('Creating VNet: ' + vnetname) rmreturn = azurerm.create_vnet(access_token, subscription_id, rgname, vnetname, location, nsg_id=nsg_id) print(rmreturn) # print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) subnet_id = rmreturn.json()['properties']['subnets'][0]['id'] print('subnet_id = ' + subnet_id) # create public IP address public_ip_name = name + 'ip' dns_label = name + 'ip' print('Creating public IP address: ' + public_ip_name) rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name, dns_label, location) print(rmreturn) ip_id = rmreturn.json()['id'] print('ip_id = ' + ip_id) print('Waiting for IP provisioning..') waiting = True while waiting: ipa = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name) if ipa['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create NIC nic_name = name + 'nic' print('Creating NIC: ' + nic_name) rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id, subnet_id, location) #print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': '))) nic_id = rmreturn.json()['id'] print('Waiting for NIC provisioning..') waiting = True while waiting: nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name) if nic['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create VM vm_name = name vm_size = 'Standard_D1' publisher = 'CoreOS' offer = 'CoreOS' sku = 'Stable' version = 'latest' username = 'azure' password = hkn.haikunate(delimiter=',') # creates random password print('password = ' + password) print('Creating VM: ' + vm_name) rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vm_size, publisher, offer, sku, version, nic_id, location, username=username, password=password) print(rmreturn) print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/create_vm.py#L10-L121
gbowerman/azurerm
examples/list_vms.py
main
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) vmlist = azurerm.list_vms_sub(access_token, subscription_id) print(json.dumps(vmlist, sort_keys=False, indent=2, separators=(',', ': '))) ''' for vm in vmlist['value']: count += 1 name = vm['name'] location = vm['location'] offer = vm['properties']['storageProfile']['imageReference']['offer'] sku = vm['properties']['storageProfile']['imageReference']['sku'] print(''.join([str(count), ': ', name, # ', RG: ', rgname, ', location: ', location, ', OS: ', offer, ' ', sku])) '''
python
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) vmlist = azurerm.list_vms_sub(access_token, subscription_id) print(json.dumps(vmlist, sort_keys=False, indent=2, separators=(',', ': '))) ''' for vm in vmlist['value']: count += 1 name = vm['name'] location = vm['location'] offer = vm['properties']['storageProfile']['imageReference']['offer'] sku = vm['properties']['storageProfile']['imageReference']['sku'] print(''.join([str(count), ': ', name, # ', RG: ', rgname, ', location: ', location, ', OS: ', offer, ' ', sku])) '''
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_vms.py#L8-L37
gbowerman/azurerm
azurerm/templates.py
deploy_template
def deploy_template(access_token, subscription_id, resource_group, deployment_name, template, parameters): '''Deploy a template referenced by a JSON string, with parameters as a JSON string. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template (str): String representatipn of a JSON template body. parameters (str): String representation of a JSON template parameters body. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'template': template} properties['mode'] = 'Incremental' properties['parameters'] = parameters template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
python
def deploy_template(access_token, subscription_id, resource_group, deployment_name, template, parameters): '''Deploy a template referenced by a JSON string, with parameters as a JSON string. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template (str): String representatipn of a JSON template body. parameters (str): String representation of a JSON template parameters body. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'template': template} properties['mode'] = 'Incremental' properties['parameters'] = parameters template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
Deploy a template referenced by a JSON string, with parameters as a JSON string. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template (str): String representatipn of a JSON template body. parameters (str): String representation of a JSON template parameters body. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/templates.py#L7-L32
gbowerman/azurerm
azurerm/templates.py
deploy_template_uri
def deploy_template_uri(access_token, subscription_id, resource_group, deployment_name, template_uri, parameters): '''Deploy a template referenced by a URI, with parameters as a JSON string. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters (str): String representation of a JSON template parameters body. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'templateLink': {'uri': template_uri}} properties['mode'] = 'Incremental' properties['parameters'] = parameters template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
python
def deploy_template_uri(access_token, subscription_id, resource_group, deployment_name, template_uri, parameters): '''Deploy a template referenced by a URI, with parameters as a JSON string. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters (str): String representation of a JSON template parameters body. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'templateLink': {'uri': template_uri}} properties['mode'] = 'Incremental' properties['parameters'] = parameters template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
Deploy a template referenced by a URI, with parameters as a JSON string. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters (str): String representation of a JSON template parameters body. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/templates.py#L35-L60
gbowerman/azurerm
azurerm/templates.py
deploy_template_uri_param_uri
def deploy_template_uri_param_uri(access_token, subscription_id, resource_group, deployment_name, template_uri, parameters_uri): '''Deploy a template with both template and parameters referenced by URIs. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters_uri (str): URI which points to a JSON parameters file (e.g. github raw location). Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'templateLink': {'uri': template_uri}} properties['mode'] = 'Incremental' properties['parametersLink'] = {'uri': parameters_uri} template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
python
def deploy_template_uri_param_uri(access_token, subscription_id, resource_group, deployment_name, template_uri, parameters_uri): '''Deploy a template with both template and parameters referenced by URIs. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters_uri (str): URI which points to a JSON parameters file (e.g. github raw location). Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API]) properties = {'templateLink': {'uri': template_uri}} properties['mode'] = 'Incremental' properties['parametersLink'] = {'uri': parameters_uri} template_body = {'properties': properties} body = json.dumps(template_body) return do_put(endpoint, body, access_token)
Deploy a template with both template and parameters referenced by URIs. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. deployment_name (str): A name you give to the deployment. template_uri (str): URI which points to a JSON template (e.g. github raw location). parameters_uri (str): URI which points to a JSON parameters file (e.g. github raw location). Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/templates.py#L63-L88
gbowerman/azurerm
examples/vmssdisk_cliauth.py
attach_model
def attach_model(subscription, rgname, vmssvm_model, diskname, lun): '''Attach a data disk to a VMSS VM model''' disk_id = '/subscriptions/' + subscription + '/resourceGroups/' + rgname + \ '/providers/Microsoft.Compute/disks/' + diskname disk_model = {'lun': lun, 'createOption': 'Attach', 'caching': 'None', 'managedDisk': {'storageAccountType': 'Standard_LRS', 'id': disk_id}} vmssvm_model['properties']['storageProfile']['dataDisks'].append( disk_model) return vmssvm_model
python
def attach_model(subscription, rgname, vmssvm_model, diskname, lun): '''Attach a data disk to a VMSS VM model''' disk_id = '/subscriptions/' + subscription + '/resourceGroups/' + rgname + \ '/providers/Microsoft.Compute/disks/' + diskname disk_model = {'lun': lun, 'createOption': 'Attach', 'caching': 'None', 'managedDisk': {'storageAccountType': 'Standard_LRS', 'id': disk_id}} vmssvm_model['properties']['storageProfile']['dataDisks'].append( disk_model) return vmssvm_model
Attach a data disk to a VMSS VM model
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssdisk_cliauth.py#L9-L17
gbowerman/azurerm
examples/vmssdisk_cliauth.py
detach_model
def detach_model(vmssvm_model, lun): '''Detach a data disk from a VMSS VM model''' data_disks = vmssvm_model['properties']['storageProfile']['dataDisks'] data_disks[:] = [disk for disk in data_disks if disk.get('lun') != lun] vmssvm_model['properties']['storageProfile']['dataDisks'] = data_disks return vmssvm_model
python
def detach_model(vmssvm_model, lun): '''Detach a data disk from a VMSS VM model''' data_disks = vmssvm_model['properties']['storageProfile']['dataDisks'] data_disks[:] = [disk for disk in data_disks if disk.get('lun') != lun] vmssvm_model['properties']['storageProfile']['dataDisks'] = data_disks return vmssvm_model
Detach a data disk from a VMSS VM model
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssdisk_cliauth.py#L20-L25
gbowerman/azurerm
examples/vmssdisk_cliauth.py
main
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') subscription_id = azurerm.get_subscription_from_cli() # authenticate access_token = azurerm.get_access_token_from_cli() # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
python
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') subscription_id = azurerm.get_subscription_from_cli() # authenticate access_token = azurerm.get_access_token_from_cli() # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssdisk_cliauth.py#L28-L82
gbowerman/azurerm
azurerm/adalfns.py
get_access_token
def get_access_token(tenant_id, application_id, application_secret): '''get an Azure access token using the adal library. Args: tenant_id (str): Tenant id of the user's account. application_id (str): Application id of a Service Principal account. application_secret (str): Application secret (password) of the Service Principal account. Returns: An Azure authentication token string. ''' context = adal.AuthenticationContext( get_auth_endpoint() + tenant_id, api_version=None) token_response = context.acquire_token_with_client_credentials( get_resource_endpoint(), application_id, application_secret) return token_response.get('accessToken')
python
def get_access_token(tenant_id, application_id, application_secret): '''get an Azure access token using the adal library. Args: tenant_id (str): Tenant id of the user's account. application_id (str): Application id of a Service Principal account. application_secret (str): Application secret (password) of the Service Principal account. Returns: An Azure authentication token string. ''' context = adal.AuthenticationContext( get_auth_endpoint() + tenant_id, api_version=None) token_response = context.acquire_token_with_client_credentials( get_resource_endpoint(), application_id, application_secret) return token_response.get('accessToken')
get an Azure access token using the adal library. Args: tenant_id (str): Tenant id of the user's account. application_id (str): Application id of a Service Principal account. application_secret (str): Application secret (password) of the Service Principal account. Returns: An Azure authentication token string.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/adalfns.py#L13-L28
gbowerman/azurerm
azurerm/adalfns.py
get_access_token_from_cli
def get_access_token_from_cli(): '''Get an Azure authentication token from CLI's cache. Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login' recently), or if you are running in Azure Cloud Shell (aka cloud console) Returns: An Azure authentication token string. ''' # check if running in cloud shell, if so, pick up token from MSI_ENDPOINT if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: endpoint = os.environ['MSI_ENDPOINT'] headers = {'Metadata': 'true'} body = {"resource": "https://management.azure.com/"} ret = requests.post(endpoint, headers=headers, data=body) return ret.json()['access_token'] else: # not running cloud shell home = os.path.expanduser('~') sub_username = "" # 1st identify current subscription azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json' if os.path.isfile(azure_profile_path) is False: print('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path) return None with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd: subs = json.load(azure_profile_fd) for sub in subs['subscriptions']: if sub['isDefault'] == True: sub_username = sub['user']['name'] if sub_username == "": print('Error from get_access_token_from_cli(): Default subscription not found in ' + \ azure_profile_path) return None # look for acces_token access_keys_path = home + os.sep + '.azure' + os.sep + 'accessTokens.json' if os.path.isfile(access_keys_path) is False: print('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path) return None with open(access_keys_path, 'r') as access_keys_fd: keys = json.load(access_keys_fd) # loop through accessTokens.json until first unexpired entry found for key in keys: if key['userId'] == sub_username: if 'accessToken' not in keys[0]: print('Error from get_access_token_from_cli(): accessToken not found in ' + \ access_keys_path) return None if 'tokenType' not in keys[0]: print('Error from get_access_token_from_cli(): tokenType not found in ' + \ access_keys_path) return None if 'expiresOn' not in keys[0]: print('Error from get_access_token_from_cli(): expiresOn not found in ' + \ access_keys_path) return None expiry_date_str = key['expiresOn'] # check date and skip past expired entries if 'T' in expiry_date_str: exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ') else: exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f') if exp_date < dt.now(): continue else: return key['accessToken'] # if dropped out of the loop, token expired print('Error from get_access_token_from_cli(): token expired. Run \'az login\'') return None
python
def get_access_token_from_cli(): '''Get an Azure authentication token from CLI's cache. Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login' recently), or if you are running in Azure Cloud Shell (aka cloud console) Returns: An Azure authentication token string. ''' # check if running in cloud shell, if so, pick up token from MSI_ENDPOINT if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: endpoint = os.environ['MSI_ENDPOINT'] headers = {'Metadata': 'true'} body = {"resource": "https://management.azure.com/"} ret = requests.post(endpoint, headers=headers, data=body) return ret.json()['access_token'] else: # not running cloud shell home = os.path.expanduser('~') sub_username = "" # 1st identify current subscription azure_profile_path = home + os.sep + '.azure' + os.sep + 'azureProfile.json' if os.path.isfile(azure_profile_path) is False: print('Error from get_access_token_from_cli(): Cannot find ' + azure_profile_path) return None with codecs.open(azure_profile_path, 'r', 'utf-8-sig') as azure_profile_fd: subs = json.load(azure_profile_fd) for sub in subs['subscriptions']: if sub['isDefault'] == True: sub_username = sub['user']['name'] if sub_username == "": print('Error from get_access_token_from_cli(): Default subscription not found in ' + \ azure_profile_path) return None # look for acces_token access_keys_path = home + os.sep + '.azure' + os.sep + 'accessTokens.json' if os.path.isfile(access_keys_path) is False: print('Error from get_access_token_from_cli(): Cannot find ' + access_keys_path) return None with open(access_keys_path, 'r') as access_keys_fd: keys = json.load(access_keys_fd) # loop through accessTokens.json until first unexpired entry found for key in keys: if key['userId'] == sub_username: if 'accessToken' not in keys[0]: print('Error from get_access_token_from_cli(): accessToken not found in ' + \ access_keys_path) return None if 'tokenType' not in keys[0]: print('Error from get_access_token_from_cli(): tokenType not found in ' + \ access_keys_path) return None if 'expiresOn' not in keys[0]: print('Error from get_access_token_from_cli(): expiresOn not found in ' + \ access_keys_path) return None expiry_date_str = key['expiresOn'] # check date and skip past expired entries if 'T' in expiry_date_str: exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%dT%H:%M:%S.%fZ') else: exp_date = dt.strptime(key['expiresOn'], '%Y-%m-%d %H:%M:%S.%f') if exp_date < dt.now(): continue else: return key['accessToken'] # if dropped out of the loop, token expired print('Error from get_access_token_from_cli(): token expired. Run \'az login\'') return None
Get an Azure authentication token from CLI's cache. Will only work if CLI local cache has an unexpired auth token (i.e. you ran 'az login' recently), or if you are running in Azure Cloud Shell (aka cloud console) Returns: An Azure authentication token string.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/adalfns.py#L31-L105
gbowerman/azurerm
examples/list_vm_images.py
main
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) count = 0 vmimglist = azurerm.list_vm_images_sub(access_token, subscription_id) print(json.dumps(vmimglist, sort_keys=False, indent=2, separators=(',', ': '))) for vm_image in vmimglist['value']: count += 1 name = vm_image['name'] location = vm_image['location'] offer = vm_image['properties']['storageProfile']['imageReference']['offer'] sku = vm_image['properties']['storageProfile']['imageReference']['sku'] print(''.join([str(count), ': ', name, ', location: ', location, ', OS: ', offer, ' ', sku]))
python
def main(): '''Main routine.''' # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) count = 0 vmimglist = azurerm.list_vm_images_sub(access_token, subscription_id) print(json.dumps(vmimglist, sort_keys=False, indent=2, separators=(',', ': '))) for vm_image in vmimglist['value']: count += 1 name = vm_image['name'] location = vm_image['location'] offer = vm_image['properties']['storageProfile']['imageReference']['offer'] sku = vm_image['properties']['storageProfile']['imageReference']['sku'] print(''.join([str(count), ': ', name, ', location: ', location, ', OS: ', offer, ' ', sku]))
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_vm_images.py#L8-L36
gbowerman/azurerm
azurerm/vmimages.py
list_offers
def list_offers(access_token, subscription_id, location, publisher): '''List available VM image offers from a publisher. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): Publisher name, e.g. Canonical. Returns: HTTP response with JSON list of image offers. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers?api-version=', COMP_API]) return do_get(endpoint, access_token)
python
def list_offers(access_token, subscription_id, location, publisher): '''List available VM image offers from a publisher. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): Publisher name, e.g. Canonical. Returns: HTTP response with JSON list of image offers. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers?api-version=', COMP_API]) return do_get(endpoint, access_token)
List available VM image offers from a publisher. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): Publisher name, e.g. Canonical. Returns: HTTP response with JSON list of image offers.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/vmimages.py#L7-L25
gbowerman/azurerm
azurerm/vmimages.py
list_skus
def list_skus(access_token, subscription_id, location, publisher, offer): '''List available VM image skus for a publisher offer. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. Returns: HTTP response with JSON list of skus. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus?api-version=', COMP_API]) return do_get(endpoint, access_token)
python
def list_skus(access_token, subscription_id, location, publisher, offer): '''List available VM image skus for a publisher offer. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. Returns: HTTP response with JSON list of skus. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus?api-version=', COMP_API]) return do_get(endpoint, access_token)
List available VM image skus for a publisher offer. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. Returns: HTTP response with JSON list of skus.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/vmimages.py#L47-L67
gbowerman/azurerm
azurerm/vmimages.py
list_sku_versions
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku): '''List available versions for a given publisher's sku. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. sku (str): VM image sku. E.g. 2016-Datacenter. Returns: HTTP response with JSON list of versions. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus/', sku, '/versions?api-version=', COMP_API]) return do_get(endpoint, access_token)
python
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku): '''List available versions for a given publisher's sku. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. sku (str): VM image sku. E.g. 2016-Datacenter. Returns: HTTP response with JSON list of versions. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus/', sku, '/versions?api-version=', COMP_API]) return do_get(endpoint, access_token)
List available versions for a given publisher's sku. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. sku (str): VM image sku. E.g. 2016-Datacenter. Returns: HTTP response with JSON list of versions.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/vmimages.py#L70-L92
gbowerman/azurerm
examples/vmssvmdisk.py
main
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201 and rmreturn.status_code != 202: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
python
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201 and rmreturn.status_code != 202: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssvmdisk.py#L27-L90
gbowerman/azurerm
examples/jumpbox.py
main
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmname', '-n', required=True, action='store', help='Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--user', '-u', required=False, action='store', default='azure', help='Optional username') arg_parser.add_argument('--password', '-p', required=False, action='store', help='Optional password') arg_parser.add_argument('--sshkey', '-k', required=False, action='store', help='SSH public key') arg_parser.add_argument('--sshpath', '-s', required=False, action='store', help='SSH public key file path') arg_parser.add_argument('--location', '-l', required=False, action='store', help='Location, e.g. eastus') arg_parser.add_argument('--vmsize', required=False, action='store', default='Standard_D1_V2', help='VM size, defaults to Standard_D1_V2') arg_parser.add_argument('--dns', '-d', required=False, action='store', help='DNS, e.g. myuniquename') arg_parser.add_argument('--vnet', required=False, action='store', help='Optional VNET Name (else first VNET in resource group used)') arg_parser.add_argument('--nowait', action='store_true', default=False, help='Do not wait for VM to finish provisioning') arg_parser.add_argument('--nonsg', action='store_true', default=False, help='Do not create a network security group on the NIC') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.vmname rgname = args.rgname vnet = args.vnet location = args.location username = args.user password = args.password sshkey = args.sshkey sshpath = args.sshpath verbose = args.verbose dns_label = args.dns no_wait = args.nowait no_nsg = args.nonsg vmsize = args.vmsize # make sure all authentication scenarios are handled if sshkey is not None and sshpath is not None: sys.exit('Error: You can provide an SSH public key, or a public key file path, not both.') if password is not None and (sshkey is not None or sshpath is not None): sys.exit('Error: provide a password or SSH key (or nothing), not both') use_password = False if password is not None: use_password = True else: if sshkey is None and sshpath is None: # no auth parameters were provided # look for ~/id_rsa.pub home = os.path.expanduser('~') sshpath = home + os.sep + '.ssh' + os.sep + 'id_rsa.pub' if os.path.isfile(sshpath) is False: print('Default public key file not found.') use_password = True password = Haikunator().haikunate(delimiter=',') # creates random password print('Created new password = ' + password) else: print('Default public key file found') if use_password is False: print('Reading public key..') if sshkey is None: # at this point sshpath should have a valid Value with open(sshpath, 'r') as pub_ssh_file_fd: sshkey = pub_ssh_file_fd.read() # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # if no location parameter was specified now would be a good time to figure out the location if location is None: try: rgroup = azurerm.get_resource_group(access_token, subscription_id, rgname) location = rgroup['location'] except KeyError: print('Cannot find resource group ' + rgname + '. Check connection/authorization.') print(json.dumps(rgroup, sort_keys=False, indent=2, separators=(',', ': '))) sys.exit() print('location = ' + location) # get VNET print('Getting VNet') vnet_not_found = False if vnet is None: print('VNet not set, checking resource group') # get first VNET in resource group try: vnets = azurerm.list_vnets_rg(access_token, subscription_id, rgname) # print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': '))) vnetresource = vnets['value'][0] except IndexError: print('No VNET found in resource group.') vnet_not_found = True vnet = name + 'vnet' else: print('Getting VNet: ' + vnet) vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet) if 'properties' not in vnetresource: print('VNet ' + vnet + ' not found in resource group ' + rgname) vnet_not_found = True if vnet_not_found is True: # create a vnet print('Creating vnet: ' + vnet) rmresource = azurerm.create_vnet(access_token, subscription_id, rgname, vnet, location, \ address_prefix='10.0.0.0/16', nsg_id=None) if rmresource.status_code != 201: print('Error ' + str(vnetresource.status_code) + ' creating VNET. ' + vnetresource.text) sys.exit() vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet) try: subnet_id = vnetresource['properties']['subnets'][0]['id'] except KeyError: print('Subnet not found for VNet ' + vnet) sys.exit() if verbose is True: print('subnet_id = ' + subnet_id) public_ip_name = name + 'ip' if dns_label is None: dns_label = name + 'dns' print('Creating public ipaddr') rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name, dns_label, location) if rmreturn.status_code not in [200, 201]: print(rmreturn.text) sys.exit('Error: ' + str(rmreturn.status_code) + ' from azurerm.create_public_ip()') ip_id = rmreturn.json()['id'] if verbose is True: print('ip_id = ' + ip_id) print('Waiting for IP provisioning..') waiting = True while waiting: pip = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name) if pip['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) if no_nsg is True: nsg_id = None else: # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location) if rmreturn.status_code not in [200, 201]: print('Error ' + str(rmreturn.status_code) + ' creating NSG. ' + rmreturn.text) sys.exit() nsg_id = rmreturn.json()['id'] # create NSG rule for ssh, scp nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule, description='ssh rule', destination_range='22') if rmreturn.status_code not in [200, 201]: print('Error ' + str(rmreturn.status_code) + ' creating NSG rule. ' + rmreturn.text) sys.exit() # create NIC nic_name = name + 'nic' print('Creating NIC: ' + nic_name) rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id, subnet_id, location, nsg_id=nsg_id) if rmreturn.status_code not in [200, 201]: print('Error ' + rmreturn.status_code + ' creating NSG rule. ' + rmreturn.text) sys.exit() nic_id = rmreturn.json()['id'] print('Waiting for NIC provisioning..') waiting = True while waiting: nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name) if nic['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create VM vm_name = name #publisher = 'CoreOS' #offer = 'CoreOS' #sku = 'Stable' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04-LTS' version = 'latest' print('Creating VM: ' + vm_name) if use_password is True: rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize, publisher, offer, sku, version, nic_id, location, username=username, password=password) else: rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize, publisher, offer, sku, version, nic_id, location, username=username, public_key=sshkey) if rmreturn.status_code != 201: sys.exit('Error ' + rmreturn.status_code + ' creating VM. ' + rmreturn.text) if no_wait is False: print('Waiting for VM provisioning..') waiting = True while waiting: vm_model = azurerm.get_vm(access_token, subscription_id, rgname, vm_name) if vm_model['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(5) print('VM provisioning complete.') print('Connect with:') print('ssh ' + dns_label + '.' + location + '.cloudapp.azure.com -l ' + username)
python
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmname', '-n', required=True, action='store', help='Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--user', '-u', required=False, action='store', default='azure', help='Optional username') arg_parser.add_argument('--password', '-p', required=False, action='store', help='Optional password') arg_parser.add_argument('--sshkey', '-k', required=False, action='store', help='SSH public key') arg_parser.add_argument('--sshpath', '-s', required=False, action='store', help='SSH public key file path') arg_parser.add_argument('--location', '-l', required=False, action='store', help='Location, e.g. eastus') arg_parser.add_argument('--vmsize', required=False, action='store', default='Standard_D1_V2', help='VM size, defaults to Standard_D1_V2') arg_parser.add_argument('--dns', '-d', required=False, action='store', help='DNS, e.g. myuniquename') arg_parser.add_argument('--vnet', required=False, action='store', help='Optional VNET Name (else first VNET in resource group used)') arg_parser.add_argument('--nowait', action='store_true', default=False, help='Do not wait for VM to finish provisioning') arg_parser.add_argument('--nonsg', action='store_true', default=False, help='Do not create a network security group on the NIC') arg_parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Print operational details') args = arg_parser.parse_args() name = args.vmname rgname = args.rgname vnet = args.vnet location = args.location username = args.user password = args.password sshkey = args.sshkey sshpath = args.sshpath verbose = args.verbose dns_label = args.dns no_wait = args.nowait no_nsg = args.nonsg vmsize = args.vmsize # make sure all authentication scenarios are handled if sshkey is not None and sshpath is not None: sys.exit('Error: You can provide an SSH public key, or a public key file path, not both.') if password is not None and (sshkey is not None or sshpath is not None): sys.exit('Error: provide a password or SSH key (or nothing), not both') use_password = False if password is not None: use_password = True else: if sshkey is None and sshpath is None: # no auth parameters were provided # look for ~/id_rsa.pub home = os.path.expanduser('~') sshpath = home + os.sep + '.ssh' + os.sep + 'id_rsa.pub' if os.path.isfile(sshpath) is False: print('Default public key file not found.') use_password = True password = Haikunator().haikunate(delimiter=',') # creates random password print('Created new password = ' + password) else: print('Default public key file found') if use_password is False: print('Reading public key..') if sshkey is None: # at this point sshpath should have a valid Value with open(sshpath, 'r') as pub_ssh_file_fd: sshkey = pub_ssh_file_fd.read() # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # if no location parameter was specified now would be a good time to figure out the location if location is None: try: rgroup = azurerm.get_resource_group(access_token, subscription_id, rgname) location = rgroup['location'] except KeyError: print('Cannot find resource group ' + rgname + '. Check connection/authorization.') print(json.dumps(rgroup, sort_keys=False, indent=2, separators=(',', ': '))) sys.exit() print('location = ' + location) # get VNET print('Getting VNet') vnet_not_found = False if vnet is None: print('VNet not set, checking resource group') # get first VNET in resource group try: vnets = azurerm.list_vnets_rg(access_token, subscription_id, rgname) # print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': '))) vnetresource = vnets['value'][0] except IndexError: print('No VNET found in resource group.') vnet_not_found = True vnet = name + 'vnet' else: print('Getting VNet: ' + vnet) vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet) if 'properties' not in vnetresource: print('VNet ' + vnet + ' not found in resource group ' + rgname) vnet_not_found = True if vnet_not_found is True: # create a vnet print('Creating vnet: ' + vnet) rmresource = azurerm.create_vnet(access_token, subscription_id, rgname, vnet, location, \ address_prefix='10.0.0.0/16', nsg_id=None) if rmresource.status_code != 201: print('Error ' + str(vnetresource.status_code) + ' creating VNET. ' + vnetresource.text) sys.exit() vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet) try: subnet_id = vnetresource['properties']['subnets'][0]['id'] except KeyError: print('Subnet not found for VNet ' + vnet) sys.exit() if verbose is True: print('subnet_id = ' + subnet_id) public_ip_name = name + 'ip' if dns_label is None: dns_label = name + 'dns' print('Creating public ipaddr') rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name, dns_label, location) if rmreturn.status_code not in [200, 201]: print(rmreturn.text) sys.exit('Error: ' + str(rmreturn.status_code) + ' from azurerm.create_public_ip()') ip_id = rmreturn.json()['id'] if verbose is True: print('ip_id = ' + ip_id) print('Waiting for IP provisioning..') waiting = True while waiting: pip = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name) if pip['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) if no_nsg is True: nsg_id = None else: # create NSG nsg_name = name + 'nsg' print('Creating NSG: ' + nsg_name) rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location) if rmreturn.status_code not in [200, 201]: print('Error ' + str(rmreturn.status_code) + ' creating NSG. ' + rmreturn.text) sys.exit() nsg_id = rmreturn.json()['id'] # create NSG rule for ssh, scp nsg_rule = 'ssh' print('Creating NSG rule: ' + nsg_rule) rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name, nsg_rule, description='ssh rule', destination_range='22') if rmreturn.status_code not in [200, 201]: print('Error ' + str(rmreturn.status_code) + ' creating NSG rule. ' + rmreturn.text) sys.exit() # create NIC nic_name = name + 'nic' print('Creating NIC: ' + nic_name) rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id, subnet_id, location, nsg_id=nsg_id) if rmreturn.status_code not in [200, 201]: print('Error ' + rmreturn.status_code + ' creating NSG rule. ' + rmreturn.text) sys.exit() nic_id = rmreturn.json()['id'] print('Waiting for NIC provisioning..') waiting = True while waiting: nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name) if nic['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(1) # create VM vm_name = name #publisher = 'CoreOS' #offer = 'CoreOS' #sku = 'Stable' publisher = 'Canonical' offer = 'UbuntuServer' sku = '16.04-LTS' version = 'latest' print('Creating VM: ' + vm_name) if use_password is True: rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize, publisher, offer, sku, version, nic_id, location, username=username, password=password) else: rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize, publisher, offer, sku, version, nic_id, location, username=username, public_key=sshkey) if rmreturn.status_code != 201: sys.exit('Error ' + rmreturn.status_code + ' creating VM. ' + rmreturn.text) if no_wait is False: print('Waiting for VM provisioning..') waiting = True while waiting: vm_model = azurerm.get_vm(access_token, subscription_id, rgname, vm_name) if vm_model['properties']['provisioningState'] == 'Succeeded': waiting = False time.sleep(5) print('VM provisioning complete.') print('Connect with:') print('ssh ' + dns_label + '.' + location + '.cloudapp.azure.com -l ' + username)
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/jumpbox.py#L12-L245
gbowerman/azurerm
examples/insights_metrics.py
main
def main(): '''Main routine.''' # process arguments if len(sys.argv) < 3: usage() rgname = sys.argv[1] vmss = sys.argv[2] # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] sub_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get metric definitions provider = 'Microsoft.Compute' resource_type = 'virtualMachineScaleSets' metric_definitions = azurerm.list_metric_defs_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metric_definitions, sort_keys=False, indent=2, separators=(',', ': '))) metrics = azurerm.get_metrics_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metrics, sort_keys=False, indent=2, separators=(',', ': ')))
python
def main(): '''Main routine.''' # process arguments if len(sys.argv) < 3: usage() rgname = sys.argv[1] vmss = sys.argv[2] # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit("Error: Expecting azurermconfig.json in current folder") tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] sub_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get metric definitions provider = 'Microsoft.Compute' resource_type = 'virtualMachineScaleSets' metric_definitions = azurerm.list_metric_defs_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metric_definitions, sort_keys=False, indent=2, separators=(',', ': '))) metrics = azurerm.get_metrics_for_resource(access_token, sub_id, rgname, provider, resource_type, vmss) print(json.dumps(metrics, sort_keys=False, indent=2, separators=(',', ': ')))
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/insights_metrics.py#L12-L48
gbowerman/azurerm
examples/list_vmss_pips.py
main
def main(): '''main reoutine''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='VMSS Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--details', '-a', required=False, action='store', help='Print all details') args = arg_parser.parse_args() name = args.vmssname rgname = args.rgname details = args.details # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting azurermconfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get public IPs public_ips = azurerm.get_vmss_public_ips(access_token, subscription_id, rgname, name) # print details if details is True: print(json.dumps(public_ips, sort_keys=False, indent=2, separators=(',', ': '))) else: for pip in public_ips['value']: vm_id = re.search('Machines/(.*)/networkInt', pip['id']).group(1) ipaddr = pip['properties']['ipAddress'] print('VM id: ' + vm_id + ', IP: ' + ipaddr)
python
def main(): '''main reoutine''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='VMSS Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--details', '-a', required=False, action='store', help='Print all details') args = arg_parser.parse_args() name = args.vmssname rgname = args.rgname details = args.details # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: print("Error: Expecting azurermconfig.json in current folder") sys.exit() tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # get public IPs public_ips = azurerm.get_vmss_public_ips(access_token, subscription_id, rgname, name) # print details if details is True: print(json.dumps(public_ips, sort_keys=False, indent=2, separators=(',', ': '))) else: for pip in public_ips['value']: vm_id = re.search('Machines/(.*)/networkInt', pip['id']).group(1) ipaddr = pip['properties']['ipAddress'] print('VM id: ' + vm_id + ', IP: ' + ipaddr)
main reoutine
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_vmss_pips.py#L10-L51
gbowerman/azurerm
examples/list_rgs.py
main
def main(): '''Main routine.''' # if in Azure cloud shell, authenticate using the MSI endpoint if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: access_token = azurerm.get_access_token_from_cli() subscription_id = azurerm.get_subscription_from_cli() else: # load service principal details from a config file try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # list resource groups resource_groups = azurerm.list_resource_groups(access_token, subscription_id) for rgname in resource_groups['value']: print(rgname['name'] + ', ' + rgname['location']) ''' rg_details = azurerm.get_resource_group(access_token, subscription_id, rgname['name']) print(json.dumps(rg_details, sort_keys=False, indent=2, separators=(',', ': '))) '''
python
def main(): '''Main routine.''' # if in Azure cloud shell, authenticate using the MSI endpoint if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: access_token = azurerm.get_access_token_from_cli() subscription_id = azurerm.get_subscription_from_cli() else: # load service principal details from a config file try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # list resource groups resource_groups = azurerm.list_resource_groups(access_token, subscription_id) for rgname in resource_groups['value']: print(rgname['name'] + ', ' + rgname['location']) ''' rg_details = azurerm.get_resource_group(access_token, subscription_id, rgname['name']) print(json.dumps(rg_details, sort_keys=False, indent=2, separators=(',', ': '))) '''
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_rgs.py#L8-L36
gbowerman/azurerm
azurerm/container.py
create_container_definition
def create_container_definition(container_name, image, port=80, cpu=1.0, memgb=1.5, environment=None): '''Makes a python dictionary of container properties. Args: container_name: The name of the container. image (str): Container image string. E.g. nginx. port (int): TCP port number. E.g. 8080. cpu (float): Amount of CPU to allocate to container. E.g. 1.0. memgb (float): Memory in GB to allocate to container. E.g. 1.5. environment (list): A list of [{'name':'envname', 'value':'envvalue'}]. Sets environment variables in the container. Returns: A Python dictionary of container properties, pass a list of these to create_container_group(). ''' container = {'name': container_name} container_properties = {'image': image} container_properties['ports'] = [{'port': port}] container_properties['resources'] = { 'requests': {'cpu': cpu, 'memoryInGB': memgb}} container['properties'] = container_properties if environment is not None: container_properties['environmentVariables'] = environment return container
python
def create_container_definition(container_name, image, port=80, cpu=1.0, memgb=1.5, environment=None): '''Makes a python dictionary of container properties. Args: container_name: The name of the container. image (str): Container image string. E.g. nginx. port (int): TCP port number. E.g. 8080. cpu (float): Amount of CPU to allocate to container. E.g. 1.0. memgb (float): Memory in GB to allocate to container. E.g. 1.5. environment (list): A list of [{'name':'envname', 'value':'envvalue'}]. Sets environment variables in the container. Returns: A Python dictionary of container properties, pass a list of these to create_container_group(). ''' container = {'name': container_name} container_properties = {'image': image} container_properties['ports'] = [{'port': port}] container_properties['resources'] = { 'requests': {'cpu': cpu, 'memoryInGB': memgb}} container['properties'] = container_properties if environment is not None: container_properties['environmentVariables'] = environment return container
Makes a python dictionary of container properties. Args: container_name: The name of the container. image (str): Container image string. E.g. nginx. port (int): TCP port number. E.g. 8080. cpu (float): Amount of CPU to allocate to container. E.g. 1.0. memgb (float): Memory in GB to allocate to container. E.g. 1.5. environment (list): A list of [{'name':'envname', 'value':'envvalue'}]. Sets environment variables in the container. Returns: A Python dictionary of container properties, pass a list of these to create_container_group().
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L7-L33
gbowerman/azurerm
azurerm/container.py
create_container_instance_group
def create_container_instance_group(access_token, subscription_id, resource_group, container_group_name, container_list, location, ostype='Linux', port=80, iptype='public'): '''Create a new container group with a list of containers specifified by container_list. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_list (list): A list of container properties. Use create_container_definition to create each container property set. location (str): Azure data center location. E.g. westus. ostype (str): Container operating system type. Linux or Windows. port (int): TCP port number. E.g. 8080. iptype (str): Type of IP address. E.g. public. Returns: HTTP response with JSON body of container group. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) container_group_body = {'location': location} properties = {'osType': ostype} properties['containers'] = container_list ipport = {'protocol': 'TCP'} ipport['port'] = port ipaddress = {'ports': [ipport]} ipaddress['type'] = iptype properties['ipAddress'] = ipaddress container_group_body['properties'] = properties body = json.dumps(container_group_body) return do_put(endpoint, body, access_token)
python
def create_container_instance_group(access_token, subscription_id, resource_group, container_group_name, container_list, location, ostype='Linux', port=80, iptype='public'): '''Create a new container group with a list of containers specifified by container_list. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_list (list): A list of container properties. Use create_container_definition to create each container property set. location (str): Azure data center location. E.g. westus. ostype (str): Container operating system type. Linux or Windows. port (int): TCP port number. E.g. 8080. iptype (str): Type of IP address. E.g. public. Returns: HTTP response with JSON body of container group. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) container_group_body = {'location': location} properties = {'osType': ostype} properties['containers'] = container_list ipport = {'protocol': 'TCP'} ipport['port'] = port ipaddress = {'ports': [ipport]} ipaddress['type'] = iptype properties['ipAddress'] = ipaddress container_group_body['properties'] = properties body = json.dumps(container_group_body) return do_put(endpoint, body, access_token)
Create a new container group with a list of containers specifified by container_list. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_list (list): A list of container properties. Use create_container_definition to create each container property set. location (str): Azure data center location. E.g. westus. ostype (str): Container operating system type. Linux or Windows. port (int): TCP port number. E.g. 8080. iptype (str): Type of IP address. E.g. public. Returns: HTTP response with JSON body of container group.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L36-L72
gbowerman/azurerm
azurerm/container.py
delete_container_instance_group
def delete_container_instance_group(access_token, subscription_id, resource_group, container_group_name): '''Delete a container group from a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_delete(endpoint, access_token)
python
def delete_container_instance_group(access_token, subscription_id, resource_group, container_group_name): '''Delete a container group from a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_delete(endpoint, access_token)
Delete a container group from a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L75-L94
gbowerman/azurerm
azurerm/container.py
get_container_instance_group
def get_container_instance_group(access_token, subscription_id, resource_group, container_group_name): '''Get the JSON definition of a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response. JSON body of container group. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
python
def get_container_instance_group(access_token, subscription_id, resource_group, container_group_name): '''Get the JSON definition of a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response. JSON body of container group. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
Get the JSON definition of a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. Returns: HTTP response. JSON body of container group.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L97-L116
gbowerman/azurerm
azurerm/container.py
get_container_instance_logs
def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name, container_name=None): '''Get the container logs for containers in a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_name (str): Optional name of a container in the group. Returns: HTTP response. Container logs. ''' if container_name is None: container_name = container_group_name endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '/containers/', container_name, '/logs?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
python
def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name, container_name=None): '''Get the container logs for containers in a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_name (str): Optional name of a container in the group. Returns: HTTP response. Container logs. ''' if container_name is None: container_name = container_group_name endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '/containers/', container_name, '/logs?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
Get the container logs for containers in a container group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. container_group_name (str): Name of container instance group. container_name (str): Optional name of a container in the group. Returns: HTTP response. Container logs.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L119-L141
gbowerman/azurerm
azurerm/container.py
list_container_instance_groups
def list_container_instance_groups(access_token, subscription_id, resource_group): '''List the container groups in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON list of container groups and their properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
python
def list_container_instance_groups(access_token, subscription_id, resource_group): '''List the container groups in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON list of container groups and their properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
List the container groups in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON list of container groups and their properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L144-L160
gbowerman/azurerm
azurerm/container.py
list_container_instance_groups_sub
def list_container_instance_groups_sub(access_token, subscription_id): '''List the container groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of container groups and their properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
python
def list_container_instance_groups_sub(access_token, subscription_id): '''List the container groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of container groups and their properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
List the container groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON list of container groups and their properties.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/container.py#L163-L177
gbowerman/azurerm
azurerm/resourcegroups.py
create_resource_group
def create_resource_group(access_token, subscription_id, rgname, location): '''Create a resource group in the specified location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. location (str): Azure data center location. E.g. westus. Returns: HTTP response. JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) rg_body = {'location': location} body = json.dumps(rg_body) return do_put(endpoint, body, access_token)
python
def create_resource_group(access_token, subscription_id, rgname, location): '''Create a resource group in the specified location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. location (str): Azure data center location. E.g. westus. Returns: HTTP response. JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) rg_body = {'location': location} body = json.dumps(rg_body) return do_put(endpoint, body, access_token)
Create a resource group in the specified location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. location (str): Azure data center location. E.g. westus. Returns: HTTP response. JSON body.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/resourcegroups.py#L7-L25
gbowerman/azurerm
azurerm/resourcegroups.py
delete_resource_group
def delete_resource_group(access_token, subscription_id, rgname): '''Delete the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) return do_delete(endpoint, access_token)
python
def delete_resource_group(access_token, subscription_id, rgname): '''Delete the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) return do_delete(endpoint, access_token)
Delete the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/resourcegroups.py#L28-L43
gbowerman/azurerm
azurerm/resourcegroups.py
export_template
def export_template(access_token, subscription_id, rgname): '''Capture the specified resource group as a template Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/exportTemplate', '?api-version=', RESOURCE_API]) rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']} body = json.dumps(rg_body) return do_post(endpoint, body, access_token)
python
def export_template(access_token, subscription_id, rgname): '''Capture the specified resource group as a template Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/exportTemplate', '?api-version=', RESOURCE_API]) rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']} body = json.dumps(rg_body) return do_post(endpoint, body, access_token)
Capture the specified resource group as a template Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/resourcegroups.py#L46-L64
gbowerman/azurerm
azurerm/resourcegroups.py
get_resource_group
def get_resource_group(access_token, subscription_id, rgname): '''Get details about the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '?api-version=', RESOURCE_API]) return do_get(endpoint, access_token)
python
def get_resource_group(access_token, subscription_id, rgname): '''Get details about the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', rgname, '?api-version=', RESOURCE_API]) return do_get(endpoint, access_token)
Get details about the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/resourcegroups.py#L67-L82
gbowerman/azurerm
azurerm/resourcegroups.py
list_resource_groups
def list_resource_groups(access_token, subscription_id): '''List the resource groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', '?api-version=', RESOURCE_API]) return do_get(endpoint, access_token)
python
def list_resource_groups(access_token, subscription_id): '''List the resource groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', '?api-version=', RESOURCE_API]) return do_get(endpoint, access_token)
List the resource groups in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/resourcegroups.py#L103-L117
gbowerman/azurerm
examples/get_vmss_rolling_cloudshell.py
main
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument( '--vmssname', '-n', required=True, action='store', help='VMSS Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--details', '-a', required=False, action='store_true', default=False, help='Print all details') args = arg_parser.parse_args() name = args.vmssname rgname = args.rgname details = args.details # authenticate access_token = azurerm.get_access_token_from_cli() subscription_id = azurerm.get_subscription_from_cli() # get rolling upgrade latest status upgrade_status = azurerm.get_vmss_rolling_upgrades( access_token, subscription_id, rgname, name) # print details if details is True: print(json.dumps(upgrade_status, sort_keys=False, indent=2, separators=(',', ': '))) else: print(json.dumps(upgrade_status, sort_keys=False, indent=2, separators=(',', ': ')))
python
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument( '--vmssname', '-n', required=True, action='store', help='VMSS Name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--details', '-a', required=False, action='store_true', default=False, help='Print all details') args = arg_parser.parse_args() name = args.vmssname rgname = args.rgname details = args.details # authenticate access_token = azurerm.get_access_token_from_cli() subscription_id = azurerm.get_subscription_from_cli() # get rolling upgrade latest status upgrade_status = azurerm.get_vmss_rolling_upgrades( access_token, subscription_id, rgname, name) # print details if details is True: print(json.dumps(upgrade_status, sort_keys=False, indent=2, separators=(',', ': '))) else: print(json.dumps(upgrade_status, sort_keys=False, indent=2, separators=(',', ': ')))
Main routine.
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/get_vmss_rolling_cloudshell.py#L9-L39
byteweaver/django-coupons
coupons/south_migrations/0005_coupon_users.py
Migration.forwards
def forwards(self, orm): "Write your forwards methods here." # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. for coupon in orm['coupons.Coupon'].objects.all(): if coupon.user is not None or coupon.redeemed_at is not None: orm['coupons.CouponUser'].objects.create( coupon=coupon, user=coupon.user, redeemed_at=coupon.redeemed_at )
python
def forwards(self, orm): "Write your forwards methods here." # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. for coupon in orm['coupons.Coupon'].objects.all(): if coupon.user is not None or coupon.redeemed_at is not None: orm['coupons.CouponUser'].objects.create( coupon=coupon, user=coupon.user, redeemed_at=coupon.redeemed_at )
Write your forwards methods here.
https://github.com/byteweaver/django-coupons/blob/27e15403b6aa99997a9e5239949b4c462c0ed2c2/coupons/south_migrations/0005_coupon_users.py#L9-L20
paylogic/pip-accel
pip_accel/caches/__init__.py
CacheManager.get
def get(self, requirement): """ Get a distribution archive from any of the available caches. :param requirement: A :class:`.Requirement` object. :returns: The absolute pathname of a local file or :data:`None` when the distribution archive is missing from all available caches. """ filename = self.generate_filename(requirement) for backend in list(self.backends): try: pathname = backend.get(filename) if pathname is not None: return pathname except CacheBackendDisabledError as e: logger.debug("Disabling %s because it requires configuration: %s", backend, e) self.backends.remove(backend) except Exception as e: logger.exception("Disabling %s because it failed: %s", backend, e) self.backends.remove(backend)
python
def get(self, requirement): """ Get a distribution archive from any of the available caches. :param requirement: A :class:`.Requirement` object. :returns: The absolute pathname of a local file or :data:`None` when the distribution archive is missing from all available caches. """ filename = self.generate_filename(requirement) for backend in list(self.backends): try: pathname = backend.get(filename) if pathname is not None: return pathname except CacheBackendDisabledError as e: logger.debug("Disabling %s because it requires configuration: %s", backend, e) self.backends.remove(backend) except Exception as e: logger.exception("Disabling %s because it failed: %s", backend, e) self.backends.remove(backend)
Get a distribution archive from any of the available caches. :param requirement: A :class:`.Requirement` object. :returns: The absolute pathname of a local file or :data:`None` when the distribution archive is missing from all available caches.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/__init__.py#L159-L178
paylogic/pip-accel
pip_accel/caches/__init__.py
CacheManager.put
def put(self, requirement, handle): """ Store a distribution archive in all of the available caches. :param requirement: A :class:`.Requirement` object. :param handle: A file-like object that provides access to the distribution archive. """ filename = self.generate_filename(requirement) for backend in list(self.backends): handle.seek(0) try: backend.put(filename, handle) except CacheBackendDisabledError as e: logger.debug("Disabling %s because it requires configuration: %s", backend, e) self.backends.remove(backend) except Exception as e: logger.exception("Disabling %s because it failed: %s", backend, e) self.backends.remove(backend)
python
def put(self, requirement, handle): """ Store a distribution archive in all of the available caches. :param requirement: A :class:`.Requirement` object. :param handle: A file-like object that provides access to the distribution archive. """ filename = self.generate_filename(requirement) for backend in list(self.backends): handle.seek(0) try: backend.put(filename, handle) except CacheBackendDisabledError as e: logger.debug("Disabling %s because it requires configuration: %s", backend, e) self.backends.remove(backend) except Exception as e: logger.exception("Disabling %s because it failed: %s", backend, e) self.backends.remove(backend)
Store a distribution archive in all of the available caches. :param requirement: A :class:`.Requirement` object. :param handle: A file-like object that provides access to the distribution archive.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/__init__.py#L180-L198
paylogic/pip-accel
pip_accel/caches/__init__.py
CacheManager.generate_filename
def generate_filename(self, requirement): """ Generate a distribution archive filename for a package. :param requirement: A :class:`.Requirement` object. :returns: The filename of the distribution archive (a string) including a single leading directory component to indicate the cache format revision. """ return FILENAME_PATTERN % (self.config.cache_format_revision, requirement.name, requirement.version, get_python_version())
python
def generate_filename(self, requirement): """ Generate a distribution archive filename for a package. :param requirement: A :class:`.Requirement` object. :returns: The filename of the distribution archive (a string) including a single leading directory component to indicate the cache format revision. """ return FILENAME_PATTERN % (self.config.cache_format_revision, requirement.name, requirement.version, get_python_version())
Generate a distribution archive filename for a package. :param requirement: A :class:`.Requirement` object. :returns: The filename of the distribution archive (a string) including a single leading directory component to indicate the cache format revision.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/__init__.py#L200-L211
paylogic/pip-accel
pip_accel/deps/__init__.py
SystemPackageManager.install_dependencies
def install_dependencies(self, requirement): """ Install missing dependencies for the given requirement. :param requirement: A :class:`.Requirement` object. :returns: :data:`True` when missing system packages were installed, :data:`False` otherwise. :raises: :exc:`.DependencyInstallationRefused` when automatic installation is disabled or refused by the operator. :raises: :exc:`.DependencyInstallationFailed` when the installation of missing system packages fails. If `pip-accel` fails to build a binary distribution, it will call this method as a last chance to install missing dependencies. If this function does not raise an exception, `pip-accel` will retry the build once. """ install_timer = Timer() missing_dependencies = self.find_missing_dependencies(requirement) if missing_dependencies: # Compose the command line for the install command. install_command = shlex.split(self.install_command) + missing_dependencies # Prepend `sudo' to the command line? if not WINDOWS and not is_root(): # FIXME Ideally this should properly detect the presence of `sudo'. # Or maybe this should just be embedded in the *.ini files? install_command.insert(0, 'sudo') # Always suggest the installation command to the operator. logger.info("You seem to be missing %s: %s", pluralize(len(missing_dependencies), "dependency", "dependencies"), concatenate(missing_dependencies)) logger.info("You can install %s with this command: %s", "it" if len(missing_dependencies) == 1 else "them", " ".join(install_command)) if self.config.auto_install is False: # Refuse automatic installation and don't prompt the operator when the configuration says no. self.installation_refused(requirement, missing_dependencies, "automatic installation is disabled") # Get the operator's permission to install the missing package(s). if self.config.auto_install: logger.info("Got permission to install %s (via auto_install option).", pluralize(len(missing_dependencies), "dependency", "dependencies")) elif self.confirm_installation(requirement, missing_dependencies, install_command): logger.info("Got permission to install %s (via interactive prompt).", pluralize(len(missing_dependencies), "dependency", "dependencies")) else: logger.error("Refused installation of missing %s!", "dependency" if len(missing_dependencies) == 1 else "dependencies") self.installation_refused(requirement, missing_dependencies, "manual installation was refused") if subprocess.call(install_command) == 0: logger.info("Successfully installed %s in %s.", pluralize(len(missing_dependencies), "dependency", "dependencies"), install_timer) return True else: logger.error("Failed to install %s.", pluralize(len(missing_dependencies), "dependency", "dependencies")) msg = "Failed to install %s required by Python package %s! (%s)" raise DependencyInstallationFailed(msg % (pluralize(len(missing_dependencies), "system package", "system packages"), requirement.name, concatenate(missing_dependencies))) return False
python
def install_dependencies(self, requirement): """ Install missing dependencies for the given requirement. :param requirement: A :class:`.Requirement` object. :returns: :data:`True` when missing system packages were installed, :data:`False` otherwise. :raises: :exc:`.DependencyInstallationRefused` when automatic installation is disabled or refused by the operator. :raises: :exc:`.DependencyInstallationFailed` when the installation of missing system packages fails. If `pip-accel` fails to build a binary distribution, it will call this method as a last chance to install missing dependencies. If this function does not raise an exception, `pip-accel` will retry the build once. """ install_timer = Timer() missing_dependencies = self.find_missing_dependencies(requirement) if missing_dependencies: # Compose the command line for the install command. install_command = shlex.split(self.install_command) + missing_dependencies # Prepend `sudo' to the command line? if not WINDOWS and not is_root(): # FIXME Ideally this should properly detect the presence of `sudo'. # Or maybe this should just be embedded in the *.ini files? install_command.insert(0, 'sudo') # Always suggest the installation command to the operator. logger.info("You seem to be missing %s: %s", pluralize(len(missing_dependencies), "dependency", "dependencies"), concatenate(missing_dependencies)) logger.info("You can install %s with this command: %s", "it" if len(missing_dependencies) == 1 else "them", " ".join(install_command)) if self.config.auto_install is False: # Refuse automatic installation and don't prompt the operator when the configuration says no. self.installation_refused(requirement, missing_dependencies, "automatic installation is disabled") # Get the operator's permission to install the missing package(s). if self.config.auto_install: logger.info("Got permission to install %s (via auto_install option).", pluralize(len(missing_dependencies), "dependency", "dependencies")) elif self.confirm_installation(requirement, missing_dependencies, install_command): logger.info("Got permission to install %s (via interactive prompt).", pluralize(len(missing_dependencies), "dependency", "dependencies")) else: logger.error("Refused installation of missing %s!", "dependency" if len(missing_dependencies) == 1 else "dependencies") self.installation_refused(requirement, missing_dependencies, "manual installation was refused") if subprocess.call(install_command) == 0: logger.info("Successfully installed %s in %s.", pluralize(len(missing_dependencies), "dependency", "dependencies"), install_timer) return True else: logger.error("Failed to install %s.", pluralize(len(missing_dependencies), "dependency", "dependencies")) msg = "Failed to install %s required by Python package %s! (%s)" raise DependencyInstallationFailed(msg % (pluralize(len(missing_dependencies), "system package", "system packages"), requirement.name, concatenate(missing_dependencies))) return False
Install missing dependencies for the given requirement. :param requirement: A :class:`.Requirement` object. :returns: :data:`True` when missing system packages were installed, :data:`False` otherwise. :raises: :exc:`.DependencyInstallationRefused` when automatic installation is disabled or refused by the operator. :raises: :exc:`.DependencyInstallationFailed` when the installation of missing system packages fails. If `pip-accel` fails to build a binary distribution, it will call this method as a last chance to install missing dependencies. If this function does not raise an exception, `pip-accel` will retry the build once.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/deps/__init__.py#L84-L144
paylogic/pip-accel
pip_accel/deps/__init__.py
SystemPackageManager.find_missing_dependencies
def find_missing_dependencies(self, requirement): """ Find missing dependencies of a Python package. :param requirement: A :class:`.Requirement` object. :returns: A list of strings with system package names. """ known_dependencies = self.find_known_dependencies(requirement) if known_dependencies: installed_packages = self.find_installed_packages() logger.debug("Checking for missing dependencies of %s ..", requirement.name) missing_dependencies = sorted(set(known_dependencies).difference(installed_packages)) if missing_dependencies: logger.debug("Found %s: %s", pluralize(len(missing_dependencies), "missing dependency", "missing dependencies"), concatenate(missing_dependencies)) else: logger.info("All known dependencies are already installed.") return missing_dependencies
python
def find_missing_dependencies(self, requirement): """ Find missing dependencies of a Python package. :param requirement: A :class:`.Requirement` object. :returns: A list of strings with system package names. """ known_dependencies = self.find_known_dependencies(requirement) if known_dependencies: installed_packages = self.find_installed_packages() logger.debug("Checking for missing dependencies of %s ..", requirement.name) missing_dependencies = sorted(set(known_dependencies).difference(installed_packages)) if missing_dependencies: logger.debug("Found %s: %s", pluralize(len(missing_dependencies), "missing dependency", "missing dependencies"), concatenate(missing_dependencies)) else: logger.info("All known dependencies are already installed.") return missing_dependencies
Find missing dependencies of a Python package. :param requirement: A :class:`.Requirement` object. :returns: A list of strings with system package names.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/deps/__init__.py#L146-L164
paylogic/pip-accel
pip_accel/deps/__init__.py
SystemPackageManager.find_known_dependencies
def find_known_dependencies(self, requirement): """ Find the known dependencies of a Python package. :param requirement: A :class:`.Requirement` object. :returns: A list of strings with system package names. """ logger.info("Checking for known dependencies of %s ..", requirement.name) known_dependencies = sorted(self.dependencies.get(requirement.name.lower(), [])) if known_dependencies: logger.info("Found %s: %s", pluralize(len(known_dependencies), "known dependency", "known dependencies"), concatenate(known_dependencies)) else: logger.info("No known dependencies... Maybe you have a suggestion?") return known_dependencies
python
def find_known_dependencies(self, requirement): """ Find the known dependencies of a Python package. :param requirement: A :class:`.Requirement` object. :returns: A list of strings with system package names. """ logger.info("Checking for known dependencies of %s ..", requirement.name) known_dependencies = sorted(self.dependencies.get(requirement.name.lower(), [])) if known_dependencies: logger.info("Found %s: %s", pluralize(len(known_dependencies), "known dependency", "known dependencies"), concatenate(known_dependencies)) else: logger.info("No known dependencies... Maybe you have a suggestion?") return known_dependencies
Find the known dependencies of a Python package. :param requirement: A :class:`.Requirement` object. :returns: A list of strings with system package names.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/deps/__init__.py#L166-L180
paylogic/pip-accel
pip_accel/deps/__init__.py
SystemPackageManager.find_installed_packages
def find_installed_packages(self): """ Find the installed system packages. :returns: A list of strings with system package names. :raises: :exc:`.SystemDependencyError` when the command to list the installed system packages fails. """ list_command = subprocess.Popen(self.list_command, shell=True, stdout=subprocess.PIPE) stdout, stderr = list_command.communicate() if list_command.returncode != 0: raise SystemDependencyError("The command to list the installed system packages failed! ({command})", command=self.list_command) installed_packages = sorted(stdout.decode().split()) logger.debug("Found %i installed system package(s): %s", len(installed_packages), installed_packages) return installed_packages
python
def find_installed_packages(self): """ Find the installed system packages. :returns: A list of strings with system package names. :raises: :exc:`.SystemDependencyError` when the command to list the installed system packages fails. """ list_command = subprocess.Popen(self.list_command, shell=True, stdout=subprocess.PIPE) stdout, stderr = list_command.communicate() if list_command.returncode != 0: raise SystemDependencyError("The command to list the installed system packages failed! ({command})", command=self.list_command) installed_packages = sorted(stdout.decode().split()) logger.debug("Found %i installed system package(s): %s", len(installed_packages), installed_packages) return installed_packages
Find the installed system packages. :returns: A list of strings with system package names. :raises: :exc:`.SystemDependencyError` when the command to list the installed system packages fails.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/deps/__init__.py#L182-L197
paylogic/pip-accel
pip_accel/deps/__init__.py
SystemPackageManager.installation_refused
def installation_refused(self, requirement, missing_dependencies, reason): """ Raise :exc:`.DependencyInstallationRefused` with a user friendly message. :param requirement: A :class:`.Requirement` object. :param missing_dependencies: A list of strings with missing dependencies. :param reason: The reason why installation was refused (a string). """ msg = "Missing %s (%s) required by Python package %s (%s) but %s!" raise DependencyInstallationRefused( msg % (pluralize(len(missing_dependencies), "system package", "system packages"), concatenate(missing_dependencies), requirement.name, requirement.version, reason) )
python
def installation_refused(self, requirement, missing_dependencies, reason): """ Raise :exc:`.DependencyInstallationRefused` with a user friendly message. :param requirement: A :class:`.Requirement` object. :param missing_dependencies: A list of strings with missing dependencies. :param reason: The reason why installation was refused (a string). """ msg = "Missing %s (%s) required by Python package %s (%s) but %s!" raise DependencyInstallationRefused( msg % (pluralize(len(missing_dependencies), "system package", "system packages"), concatenate(missing_dependencies), requirement.name, requirement.version, reason) )
Raise :exc:`.DependencyInstallationRefused` with a user friendly message. :param requirement: A :class:`.Requirement` object. :param missing_dependencies: A list of strings with missing dependencies. :param reason: The reason why installation was refused (a string).
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/deps/__init__.py#L199-L214
paylogic/pip-accel
pip_accel/deps/__init__.py
SystemPackageManager.confirm_installation
def confirm_installation(self, requirement, missing_dependencies, install_command): """ Ask the operator's permission to install missing system packages. :param requirement: A :class:`.Requirement` object. :param missing_dependencies: A list of strings with missing dependencies. :param install_command: A list of strings with the command line needed to install the missing dependencies. :raises: :exc:`.DependencyInstallationRefused` when the operator refuses. """ try: return prompt_for_confirmation(format( "Do you want me to install %s %s?", "this" if len(missing_dependencies) == 1 else "these", "dependency" if len(missing_dependencies) == 1 else "dependencies", ), default=True) except KeyboardInterrupt: # Control-C is a negative response but doesn't # otherwise interrupt the program flow. return False
python
def confirm_installation(self, requirement, missing_dependencies, install_command): """ Ask the operator's permission to install missing system packages. :param requirement: A :class:`.Requirement` object. :param missing_dependencies: A list of strings with missing dependencies. :param install_command: A list of strings with the command line needed to install the missing dependencies. :raises: :exc:`.DependencyInstallationRefused` when the operator refuses. """ try: return prompt_for_confirmation(format( "Do you want me to install %s %s?", "this" if len(missing_dependencies) == 1 else "these", "dependency" if len(missing_dependencies) == 1 else "dependencies", ), default=True) except KeyboardInterrupt: # Control-C is a negative response but doesn't # otherwise interrupt the program flow. return False
Ask the operator's permission to install missing system packages. :param requirement: A :class:`.Requirement` object. :param missing_dependencies: A list of strings with missing dependencies. :param install_command: A list of strings with the command line needed to install the missing dependencies. :raises: :exc:`.DependencyInstallationRefused` when the operator refuses.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/deps/__init__.py#L216-L235
paylogic/pip-accel
pip_accel/utils.py
compact
def compact(text, **kw): """ Compact whitespace in a string and format any keyword arguments into the string. :param text: The text to compact (a string). :param kw: Any keyword arguments to apply using :func:`str.format()`. :returns: The compacted, formatted string. The whitespace compaction preserves paragraphs. """ return '\n\n'.join(' '.join(p.split()) for p in text.split('\n\n')).format(**kw)
python
def compact(text, **kw): """ Compact whitespace in a string and format any keyword arguments into the string. :param text: The text to compact (a string). :param kw: Any keyword arguments to apply using :func:`str.format()`. :returns: The compacted, formatted string. The whitespace compaction preserves paragraphs. """ return '\n\n'.join(' '.join(p.split()) for p in text.split('\n\n')).format(**kw)
Compact whitespace in a string and format any keyword arguments into the string. :param text: The text to compact (a string). :param kw: Any keyword arguments to apply using :func:`str.format()`. :returns: The compacted, formatted string. The whitespace compaction preserves paragraphs.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L41-L51
paylogic/pip-accel
pip_accel/utils.py
expand_path
def expand_path(pathname): """ Expand the home directory in a pathname based on the effective user id. :param pathname: A pathname that may start with ``~/``, indicating the path should be interpreted as being relative to the home directory of the current (effective) user. :returns: The (modified) pathname. This function is a variant of :func:`os.path.expanduser()` that doesn't use ``$HOME`` but instead uses the home directory of the effective user id. This is basically a workaround for ``sudo -s`` not resetting ``$HOME``. """ # The following logic previously used regular expressions but that approach # turned out to be very error prone, hence the current contraption based on # direct string manipulation :-). home_directory = find_home_directory() separators = set([os.sep]) if os.altsep is not None: separators.add(os.altsep) if len(pathname) >= 2 and pathname[0] == '~' and pathname[1] in separators: pathname = os.path.join(home_directory, pathname[2:]) # Also expand environment variables. return parse_path(pathname)
python
def expand_path(pathname): """ Expand the home directory in a pathname based on the effective user id. :param pathname: A pathname that may start with ``~/``, indicating the path should be interpreted as being relative to the home directory of the current (effective) user. :returns: The (modified) pathname. This function is a variant of :func:`os.path.expanduser()` that doesn't use ``$HOME`` but instead uses the home directory of the effective user id. This is basically a workaround for ``sudo -s`` not resetting ``$HOME``. """ # The following logic previously used regular expressions but that approach # turned out to be very error prone, hence the current contraption based on # direct string manipulation :-). home_directory = find_home_directory() separators = set([os.sep]) if os.altsep is not None: separators.add(os.altsep) if len(pathname) >= 2 and pathname[0] == '~' and pathname[1] in separators: pathname = os.path.join(home_directory, pathname[2:]) # Also expand environment variables. return parse_path(pathname)
Expand the home directory in a pathname based on the effective user id. :param pathname: A pathname that may start with ``~/``, indicating the path should be interpreted as being relative to the home directory of the current (effective) user. :returns: The (modified) pathname. This function is a variant of :func:`os.path.expanduser()` that doesn't use ``$HOME`` but instead uses the home directory of the effective user id. This is basically a workaround for ``sudo -s`` not resetting ``$HOME``.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L54-L77
paylogic/pip-accel
pip_accel/utils.py
find_home_directory
def find_home_directory(): """ Look up the home directory of the effective user id. :returns: The pathname of the home directory (a string). .. note:: On Windows this uses the ``%APPDATA%`` environment variable (if available) and otherwise falls back to ``~/Application Data``. """ if WINDOWS: directory = os.environ.get('APPDATA') if not directory: directory = os.path.expanduser(r'~\Application Data') else: # This module isn't available on Windows so we have to import it here. import pwd # Look up the home directory of the effective user id so we can # generate pathnames relative to the home directory. entry = pwd.getpwuid(os.getuid()) directory = entry.pw_dir return directory
python
def find_home_directory(): """ Look up the home directory of the effective user id. :returns: The pathname of the home directory (a string). .. note:: On Windows this uses the ``%APPDATA%`` environment variable (if available) and otherwise falls back to ``~/Application Data``. """ if WINDOWS: directory = os.environ.get('APPDATA') if not directory: directory = os.path.expanduser(r'~\Application Data') else: # This module isn't available on Windows so we have to import it here. import pwd # Look up the home directory of the effective user id so we can # generate pathnames relative to the home directory. entry = pwd.getpwuid(os.getuid()) directory = entry.pw_dir return directory
Look up the home directory of the effective user id. :returns: The pathname of the home directory (a string). .. note:: On Windows this uses the ``%APPDATA%`` environment variable (if available) and otherwise falls back to ``~/Application Data``.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L90-L110
paylogic/pip-accel
pip_accel/utils.py
makedirs
def makedirs(path, mode=0o777): """ Create a directory if it doesn't already exist (keeping concurrency in mind). :param path: The pathname of the directory to create (a string). :param mode: The mode to apply to newly created directories (an integer, defaults to the octal number ``0777``). :returns: :data:`True` when the directory was created, :data:`False` if it already existed. :raises: Any exceptions raised by :func:`os.makedirs()` except for :data:`errno.EEXIST` (this error is swallowed and :data:`False` is returned instead). """ try: os.makedirs(path, mode) return True except OSError as e: if e.errno != errno.EEXIST: # We don't want to swallow errors other than EEXIST, # because we could be obscuring a real problem. raise return False
python
def makedirs(path, mode=0o777): """ Create a directory if it doesn't already exist (keeping concurrency in mind). :param path: The pathname of the directory to create (a string). :param mode: The mode to apply to newly created directories (an integer, defaults to the octal number ``0777``). :returns: :data:`True` when the directory was created, :data:`False` if it already existed. :raises: Any exceptions raised by :func:`os.makedirs()` except for :data:`errno.EEXIST` (this error is swallowed and :data:`False` is returned instead). """ try: os.makedirs(path, mode) return True except OSError as e: if e.errno != errno.EEXIST: # We don't want to swallow errors other than EEXIST, # because we could be obscuring a real problem. raise return False
Create a directory if it doesn't already exist (keeping concurrency in mind). :param path: The pathname of the directory to create (a string). :param mode: The mode to apply to newly created directories (an integer, defaults to the octal number ``0777``). :returns: :data:`True` when the directory was created, :data:`False` if it already existed. :raises: Any exceptions raised by :func:`os.makedirs()` except for :data:`errno.EEXIST` (this error is swallowed and :data:`False` is returned instead).
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L141-L162
paylogic/pip-accel
pip_accel/utils.py
same_directories
def same_directories(path1, path2): """ Check if two pathnames refer to the same directory. :param path1: The first pathname (a string). :param path2: The second pathname (a string). :returns: :data:`True` if both pathnames refer to the same directory, :data:`False` otherwise. """ if all(os.path.isdir(p) for p in (path1, path2)): try: return os.path.samefile(path1, path2) except AttributeError: # On Windows and Python 2 os.path.samefile() is unavailable. return os.path.realpath(path1) == os.path.realpath(path2) else: return False
python
def same_directories(path1, path2): """ Check if two pathnames refer to the same directory. :param path1: The first pathname (a string). :param path2: The second pathname (a string). :returns: :data:`True` if both pathnames refer to the same directory, :data:`False` otherwise. """ if all(os.path.isdir(p) for p in (path1, path2)): try: return os.path.samefile(path1, path2) except AttributeError: # On Windows and Python 2 os.path.samefile() is unavailable. return os.path.realpath(path1) == os.path.realpath(path2) else: return False
Check if two pathnames refer to the same directory. :param path1: The first pathname (a string). :param path2: The second pathname (a string). :returns: :data:`True` if both pathnames refer to the same directory, :data:`False` otherwise.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L165-L181
paylogic/pip-accel
pip_accel/utils.py
hash_files
def hash_files(method, *files): """ Calculate the hexadecimal digest of one or more local files. :param method: The hash method (a string, given to :func:`hashlib.new()`). :param files: The pathname(s) of file(s) to hash (zero or more strings). :returns: The calculated hex digest (a string). """ context = hashlib.new(method) for filename in files: with open(filename, 'rb') as handle: while True: chunk = handle.read(4096) if not chunk: break context.update(chunk) return context.hexdigest()
python
def hash_files(method, *files): """ Calculate the hexadecimal digest of one or more local files. :param method: The hash method (a string, given to :func:`hashlib.new()`). :param files: The pathname(s) of file(s) to hash (zero or more strings). :returns: The calculated hex digest (a string). """ context = hashlib.new(method) for filename in files: with open(filename, 'rb') as handle: while True: chunk = handle.read(4096) if not chunk: break context.update(chunk) return context.hexdigest()
Calculate the hexadecimal digest of one or more local files. :param method: The hash method (a string, given to :func:`hashlib.new()`). :param files: The pathname(s) of file(s) to hash (zero or more strings). :returns: The calculated hex digest (a string).
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L184-L200
paylogic/pip-accel
pip_accel/utils.py
replace_file
def replace_file(src, dst): """ Overwrite a file (in an atomic fashion when possible). :param src: The pathname of the source file (a string). :param dst: The pathname of the destination file (a string). """ # Try os.replace() which was introduced in Python 3.3 # (this should work on POSIX as well as Windows systems). try: os.replace(src, dst) return except AttributeError: pass # Try os.rename() which is atomic on UNIX but refuses to overwrite existing # files on Windows. try: os.rename(src, dst) return except OSError as e: if e.errno != errno.EEXIST: raise # Finally we fall back to the dumb approach required only on Windows. # See https://bugs.python.org/issue8828 for a long winded discussion. os.remove(dst) os.rename(src, dst)
python
def replace_file(src, dst): """ Overwrite a file (in an atomic fashion when possible). :param src: The pathname of the source file (a string). :param dst: The pathname of the destination file (a string). """ # Try os.replace() which was introduced in Python 3.3 # (this should work on POSIX as well as Windows systems). try: os.replace(src, dst) return except AttributeError: pass # Try os.rename() which is atomic on UNIX but refuses to overwrite existing # files on Windows. try: os.rename(src, dst) return except OSError as e: if e.errno != errno.EEXIST: raise # Finally we fall back to the dumb approach required only on Windows. # See https://bugs.python.org/issue8828 for a long winded discussion. os.remove(dst) os.rename(src, dst)
Overwrite a file (in an atomic fashion when possible). :param src: The pathname of the source file (a string). :param dst: The pathname of the destination file (a string).
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L203-L228
paylogic/pip-accel
pip_accel/utils.py
requirement_is_installed
def requirement_is_installed(expr): """ Check whether a requirement is installed. :param expr: A requirement specification similar to those used in pip requirement files (a string). :returns: :data:`True` if the requirement is available (installed), :data:`False` otherwise. """ required_dist = next(parse_requirements(expr)) try: installed_dist = get_distribution(required_dist.key) return installed_dist in required_dist except DistributionNotFound: return False
python
def requirement_is_installed(expr): """ Check whether a requirement is installed. :param expr: A requirement specification similar to those used in pip requirement files (a string). :returns: :data:`True` if the requirement is available (installed), :data:`False` otherwise. """ required_dist = next(parse_requirements(expr)) try: installed_dist = get_distribution(required_dist.key) return installed_dist in required_dist except DistributionNotFound: return False
Check whether a requirement is installed. :param expr: A requirement specification similar to those used in pip requirement files (a string). :returns: :data:`True` if the requirement is available (installed), :data:`False` otherwise.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L263-L277
paylogic/pip-accel
pip_accel/utils.py
uninstall
def uninstall(*package_names): """ Uninstall one or more packages using the Python equivalent of ``pip uninstall --yes``. The package(s) to uninstall must be installed, otherwise pip will raise an ``UninstallationError``. You can check for installed packages using :func:`is_installed()`. :param package_names: The names of one or more Python packages (strings). """ command = UninstallCommand() opts, args = command.parse_args(['--yes'] + list(package_names)) command.run(opts, args)
python
def uninstall(*package_names): """ Uninstall one or more packages using the Python equivalent of ``pip uninstall --yes``. The package(s) to uninstall must be installed, otherwise pip will raise an ``UninstallationError``. You can check for installed packages using :func:`is_installed()`. :param package_names: The names of one or more Python packages (strings). """ command = UninstallCommand() opts, args = command.parse_args(['--yes'] + list(package_names)) command.run(opts, args)
Uninstall one or more packages using the Python equivalent of ``pip uninstall --yes``. The package(s) to uninstall must be installed, otherwise pip will raise an ``UninstallationError``. You can check for installed packages using :func:`is_installed()`. :param package_names: The names of one or more Python packages (strings).
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L290-L302
paylogic/pip-accel
pip_accel/utils.py
match_option
def match_option(argument, short_option, long_option): """ Match a command line argument against a short and long option. :param argument: The command line argument (a string). :param short_option: The short option (a string). :param long_option: The long option (a string). :returns: :data:`True` if the argument matches, :data:`False` otherwise. """ return short_option[1] in argument[1:] if is_short_option(argument) else argument == long_option
python
def match_option(argument, short_option, long_option): """ Match a command line argument against a short and long option. :param argument: The command line argument (a string). :param short_option: The short option (a string). :param long_option: The long option (a string). :returns: :data:`True` if the argument matches, :data:`False` otherwise. """ return short_option[1] in argument[1:] if is_short_option(argument) else argument == long_option
Match a command line argument against a short and long option. :param argument: The command line argument (a string). :param short_option: The short option (a string). :param long_option: The long option (a string). :returns: :data:`True` if the argument matches, :data:`False` otherwise.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L305-L314
paylogic/pip-accel
pip_accel/utils.py
match_option_with_value
def match_option_with_value(arguments, option, value): """ Check if a list of command line options contains an option with a value. :param arguments: The command line arguments (a list of strings). :param option: The long option (a string). :param value: The expected value (a string). :returns: :data:`True` if the command line contains the option/value pair, :data:`False` otherwise. """ return ('%s=%s' % (option, value) in arguments or contains_sublist(arguments, [option, value]))
python
def match_option_with_value(arguments, option, value): """ Check if a list of command line options contains an option with a value. :param arguments: The command line arguments (a list of strings). :param option: The long option (a string). :param value: The expected value (a string). :returns: :data:`True` if the command line contains the option/value pair, :data:`False` otherwise. """ return ('%s=%s' % (option, value) in arguments or contains_sublist(arguments, [option, value]))
Check if a list of command line options contains an option with a value. :param arguments: The command line arguments (a list of strings). :param option: The long option (a string). :param value: The expected value (a string). :returns: :data:`True` if the command line contains the option/value pair, :data:`False` otherwise.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L327-L338
paylogic/pip-accel
pip_accel/utils.py
contains_sublist
def contains_sublist(lst, sublst): """ Check if one list contains the items from another list (in the same order). :param lst: The main list. :param sublist: The sublist to check for. :returns: :data:`True` if the main list contains the items from the sublist in the same order, :data:`False` otherwise. Based on `this StackOverflow answer <http://stackoverflow.com/a/3314913>`_. """ n = len(sublst) return any((sublst == lst[i:i + n]) for i in range(len(lst) - n + 1))
python
def contains_sublist(lst, sublst): """ Check if one list contains the items from another list (in the same order). :param lst: The main list. :param sublist: The sublist to check for. :returns: :data:`True` if the main list contains the items from the sublist in the same order, :data:`False` otherwise. Based on `this StackOverflow answer <http://stackoverflow.com/a/3314913>`_. """ n = len(sublst) return any((sublst == lst[i:i + n]) for i in range(len(lst) - n + 1))
Check if one list contains the items from another list (in the same order). :param lst: The main list. :param sublist: The sublist to check for. :returns: :data:`True` if the main list contains the items from the sublist in the same order, :data:`False` otherwise. Based on `this StackOverflow answer <http://stackoverflow.com/a/3314913>`_.
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/utils.py#L341-L353
eyurtsev/fcsparser
fcsparser/api.py
fromfile
def fromfile(file, dtype, count, *args, **kwargs): """Wrapper around np.fromfile to support any file-like object.""" try: return numpy.fromfile(file, dtype=dtype, count=count, *args, **kwargs) except (TypeError, IOError): return numpy.frombuffer(file.read(count * numpy.dtype(dtype).itemsize), dtype=dtype, count=count, *args, **kwargs)
python
def fromfile(file, dtype, count, *args, **kwargs): """Wrapper around np.fromfile to support any file-like object.""" try: return numpy.fromfile(file, dtype=dtype, count=count, *args, **kwargs) except (TypeError, IOError): return numpy.frombuffer(file.read(count * numpy.dtype(dtype).itemsize), dtype=dtype, count=count, *args, **kwargs)
Wrapper around np.fromfile to support any file-like object.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L29-L35
eyurtsev/fcsparser
fcsparser/api.py
parse
def parse(path, meta_data_only=False, compensate=False, channel_naming='$PnS', reformat_meta=False, data_set=0, dtype='float32', encoding="utf-8"): """Parse an fcs file at the location specified by the path. Parameters ---------- path: str Path of .fcs file meta_data_only: bool If True, the parse_fcs only returns the meta_data (the TEXT segment of the FCS file) output_format: 'DataFrame' | 'ndarray' If set to 'DataFrame' the returned channel_naming: '$PnS' | '$PnN' Determines which meta data field is used for naming the channels. The default should be $PnS (even though it is not guaranteed to be unique) $PnN stands for the short name (guaranteed to be unique). Will look like 'FL1-H' $PnS stands for the actual name (not guaranteed to be unique). Will look like 'FSC-H' (Forward scatter) The chosen field will be used to population self.channels Note: These names are not flipped in the implementation. It looks like they were swapped for some reason in the official FCS specification. reformat_meta: bool If true, the meta data is reformatted with the channel information organized into a DataFrame and moved into the '_channels_' key data_set: int Index of retrieved data set in the fcs file. This value specifies the data set being retrieved from an fcs file with multiple data sets. dtype: str | None If provided, will force convert all data into this dtype. This is set by default to auto-convert to float32 to deal with cases in which the original data has been stored using a smaller data type (e.g., unit8). This modifies the original data, but should make follow up analysis safer in basically all cases. encoding: str Provide encoding type of the text section. Returns ------- if meta_data_only is True: meta_data: dict Contains a dictionary with the meta data information Otherwise: a 2-tuple with the first element the meta_data (dictionary) the second element the data (in either DataFrame or numpy format) Examples -------- fname = '../tests/data/EY_2013-05-03_EID_214_PID_1120_Piperacillin_Well_B7.001.fcs' meta = parse_fcs(fname, meta_data_only=True) meta, data_pandas = parse_fcs(fname, meta_data_only=False) """ if compensate: raise ParserFeatureNotImplementedError(u'Compensation has not been implemented yet.') read_data = not meta_data_only fcs_parser = FCSParser(path, read_data=read_data, channel_naming=channel_naming, data_set=data_set, encoding=encoding) if reformat_meta: fcs_parser.reformat_meta() meta = fcs_parser.annotation if meta_data_only: return meta else: # Then include both meta and dataframe. df = fcs_parser.dataframe df = df.astype(dtype) if dtype else df return meta, df
python
def parse(path, meta_data_only=False, compensate=False, channel_naming='$PnS', reformat_meta=False, data_set=0, dtype='float32', encoding="utf-8"): """Parse an fcs file at the location specified by the path. Parameters ---------- path: str Path of .fcs file meta_data_only: bool If True, the parse_fcs only returns the meta_data (the TEXT segment of the FCS file) output_format: 'DataFrame' | 'ndarray' If set to 'DataFrame' the returned channel_naming: '$PnS' | '$PnN' Determines which meta data field is used for naming the channels. The default should be $PnS (even though it is not guaranteed to be unique) $PnN stands for the short name (guaranteed to be unique). Will look like 'FL1-H' $PnS stands for the actual name (not guaranteed to be unique). Will look like 'FSC-H' (Forward scatter) The chosen field will be used to population self.channels Note: These names are not flipped in the implementation. It looks like they were swapped for some reason in the official FCS specification. reformat_meta: bool If true, the meta data is reformatted with the channel information organized into a DataFrame and moved into the '_channels_' key data_set: int Index of retrieved data set in the fcs file. This value specifies the data set being retrieved from an fcs file with multiple data sets. dtype: str | None If provided, will force convert all data into this dtype. This is set by default to auto-convert to float32 to deal with cases in which the original data has been stored using a smaller data type (e.g., unit8). This modifies the original data, but should make follow up analysis safer in basically all cases. encoding: str Provide encoding type of the text section. Returns ------- if meta_data_only is True: meta_data: dict Contains a dictionary with the meta data information Otherwise: a 2-tuple with the first element the meta_data (dictionary) the second element the data (in either DataFrame or numpy format) Examples -------- fname = '../tests/data/EY_2013-05-03_EID_214_PID_1120_Piperacillin_Well_B7.001.fcs' meta = parse_fcs(fname, meta_data_only=True) meta, data_pandas = parse_fcs(fname, meta_data_only=False) """ if compensate: raise ParserFeatureNotImplementedError(u'Compensation has not been implemented yet.') read_data = not meta_data_only fcs_parser = FCSParser(path, read_data=read_data, channel_naming=channel_naming, data_set=data_set, encoding=encoding) if reformat_meta: fcs_parser.reformat_meta() meta = fcs_parser.annotation if meta_data_only: return meta else: # Then include both meta and dataframe. df = fcs_parser.dataframe df = df.astype(dtype) if dtype else df return meta, df
Parse an fcs file at the location specified by the path. Parameters ---------- path: str Path of .fcs file meta_data_only: bool If True, the parse_fcs only returns the meta_data (the TEXT segment of the FCS file) output_format: 'DataFrame' | 'ndarray' If set to 'DataFrame' the returned channel_naming: '$PnS' | '$PnN' Determines which meta data field is used for naming the channels. The default should be $PnS (even though it is not guaranteed to be unique) $PnN stands for the short name (guaranteed to be unique). Will look like 'FL1-H' $PnS stands for the actual name (not guaranteed to be unique). Will look like 'FSC-H' (Forward scatter) The chosen field will be used to population self.channels Note: These names are not flipped in the implementation. It looks like they were swapped for some reason in the official FCS specification. reformat_meta: bool If true, the meta data is reformatted with the channel information organized into a DataFrame and moved into the '_channels_' key data_set: int Index of retrieved data set in the fcs file. This value specifies the data set being retrieved from an fcs file with multiple data sets. dtype: str | None If provided, will force convert all data into this dtype. This is set by default to auto-convert to float32 to deal with cases in which the original data has been stored using a smaller data type (e.g., unit8). This modifies the original data, but should make follow up analysis safer in basically all cases. encoding: str Provide encoding type of the text section. Returns ------- if meta_data_only is True: meta_data: dict Contains a dictionary with the meta data information Otherwise: a 2-tuple with the first element the meta_data (dictionary) the second element the data (in either DataFrame or numpy format) Examples -------- fname = '../tests/data/EY_2013-05-03_EID_214_PID_1120_Piperacillin_Well_B7.001.fcs' meta = parse_fcs(fname, meta_data_only=True) meta, data_pandas = parse_fcs(fname, meta_data_only=False)
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L499-L573
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.load_file
def load_file(self, file_handle, data_set=0, read_data=True): """Load the requested parts of the file into memory.""" file_handle.seek(0, 2) self._file_size = file_handle.tell() file_handle.seek(0) data_segments = 0 # seek the correct data set in fcs nextdata_offset = 0 while data_segments <= data_set: self.read_header(file_handle, nextdata_offset) self.read_text(file_handle) if '$NEXTDATA' in self.annotation: data_segments += 1 nextdata_offset = self.annotation['$NEXTDATA'] file_handle.seek(nextdata_offset) if nextdata_offset == 0 and data_segments < data_set: warnings.warn("File does not contain the number of data sets.") break else: if data_segments != 0: warnings.warn('File does not contain $NEXTDATA information.') break if read_data: self.read_data(file_handle)
python
def load_file(self, file_handle, data_set=0, read_data=True): """Load the requested parts of the file into memory.""" file_handle.seek(0, 2) self._file_size = file_handle.tell() file_handle.seek(0) data_segments = 0 # seek the correct data set in fcs nextdata_offset = 0 while data_segments <= data_set: self.read_header(file_handle, nextdata_offset) self.read_text(file_handle) if '$NEXTDATA' in self.annotation: data_segments += 1 nextdata_offset = self.annotation['$NEXTDATA'] file_handle.seek(nextdata_offset) if nextdata_offset == 0 and data_segments < data_set: warnings.warn("File does not contain the number of data sets.") break else: if data_segments != 0: warnings.warn('File does not contain $NEXTDATA information.') break if read_data: self.read_data(file_handle)
Load the requested parts of the file into memory.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L113-L136
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.from_data
def from_data(cls, data): """Load an FCS file from a bytes-like object. Args: data: buffer containing contents of an FCS file. Returns: FCSParser instance with data loaded """ obj = cls() with contextlib.closing(BytesIO(data)) as file_handle: obj.load_file(file_handle) return obj
python
def from_data(cls, data): """Load an FCS file from a bytes-like object. Args: data: buffer containing contents of an FCS file. Returns: FCSParser instance with data loaded """ obj = cls() with contextlib.closing(BytesIO(data)) as file_handle: obj.load_file(file_handle) return obj
Load an FCS file from a bytes-like object. Args: data: buffer containing contents of an FCS file. Returns: FCSParser instance with data loaded
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L139-L151
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.read_header
def read_header(self, file_handle, nextdata_offset=0): """Read the header of the FCS file. The header specifies where the annotation, data and analysis are located inside the binary file. Args: file_handle: buffer containing FCS file. nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA """ header = {'FCS format': file_handle.read(6)} file_handle.read(4) # 4 space characters after the FCS format for field in ('text start', 'text end', 'data start', 'data end', 'analysis start', 'analysis end'): s = file_handle.read(8) try: field_value = int(s) except ValueError: field_value = 0 header[field] = field_value + nextdata_offset # Checking that the location of the TEXT segment is specified for k in ('text start', 'text end'): if header[k] == 0: raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate ' u'information about the "{}" segment.)'.format(self.path, k)) elif header[k] > self._file_size: raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment ' u'is larger than file size'.format(self.path, k)) else: # All OK pass self._data_start = header['data start'] self._data_end = header['data start'] if header['analysis end'] - header['analysis start'] != 0: warnings.warn(u'There appears to be some information in the ANALYSIS segment of file ' u'{0}. However, it might not be read correctly.'.format(self.path)) self.annotation['__header__'] = header
python
def read_header(self, file_handle, nextdata_offset=0): """Read the header of the FCS file. The header specifies where the annotation, data and analysis are located inside the binary file. Args: file_handle: buffer containing FCS file. nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA """ header = {'FCS format': file_handle.read(6)} file_handle.read(4) # 4 space characters after the FCS format for field in ('text start', 'text end', 'data start', 'data end', 'analysis start', 'analysis end'): s = file_handle.read(8) try: field_value = int(s) except ValueError: field_value = 0 header[field] = field_value + nextdata_offset # Checking that the location of the TEXT segment is specified for k in ('text start', 'text end'): if header[k] == 0: raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate ' u'information about the "{}" segment.)'.format(self.path, k)) elif header[k] > self._file_size: raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment ' u'is larger than file size'.format(self.path, k)) else: # All OK pass self._data_start = header['data start'] self._data_end = header['data start'] if header['analysis end'] - header['analysis start'] != 0: warnings.warn(u'There appears to be some information in the ANALYSIS segment of file ' u'{0}. However, it might not be read correctly.'.format(self.path)) self.annotation['__header__'] = header
Read the header of the FCS file. The header specifies where the annotation, data and analysis are located inside the binary file. Args: file_handle: buffer containing FCS file. nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L153-L195
eyurtsev/fcsparser
fcsparser/api.py
FCSParser._extract_text_dict
def _extract_text_dict(self, raw_text): """Parse the TEXT segment of the FCS file into a python dictionary.""" delimiter = raw_text[0] if raw_text[-1] != delimiter: raw_text = raw_text.strip() if raw_text[-1] != delimiter: msg = (u'The first two characters were:\n {}. The last two characters were: {}\n' u'Parser expects the same delimiter character in beginning ' u'and end of TEXT segment'.format(raw_text[:2], raw_text[-2:])) raise ParserFeatureNotImplementedError(msg) # The delimiter is escaped by being repeated (two consecutive delimiters). This code splits # on the escaped delimiter first, so there is no need for extra logic to distinguish # actual delimiters from escaped delimiters. nested_split_list = [x.split(delimiter) for x in raw_text[1:-1].split(delimiter * 2)] # 1:-1 above removes the first and last characters which are reserved for the delimiter. # Flatten the nested list to a list of elements (alternating keys and values) raw_text_elements = nested_split_list[0] for partial_element_list in nested_split_list[1:]: # Rejoin two parts of an element that was split by an escaped delimiter (the end and # start of two successive sub-lists in nested_split_list) raw_text_elements[-1] += (delimiter + partial_element_list[0]) raw_text_elements.extend(partial_element_list[1:]) keys, values = raw_text_elements[0::2], raw_text_elements[1::2] return dict(zip(keys, values))
python
def _extract_text_dict(self, raw_text): """Parse the TEXT segment of the FCS file into a python dictionary.""" delimiter = raw_text[0] if raw_text[-1] != delimiter: raw_text = raw_text.strip() if raw_text[-1] != delimiter: msg = (u'The first two characters were:\n {}. The last two characters were: {}\n' u'Parser expects the same delimiter character in beginning ' u'and end of TEXT segment'.format(raw_text[:2], raw_text[-2:])) raise ParserFeatureNotImplementedError(msg) # The delimiter is escaped by being repeated (two consecutive delimiters). This code splits # on the escaped delimiter first, so there is no need for extra logic to distinguish # actual delimiters from escaped delimiters. nested_split_list = [x.split(delimiter) for x in raw_text[1:-1].split(delimiter * 2)] # 1:-1 above removes the first and last characters which are reserved for the delimiter. # Flatten the nested list to a list of elements (alternating keys and values) raw_text_elements = nested_split_list[0] for partial_element_list in nested_split_list[1:]: # Rejoin two parts of an element that was split by an escaped delimiter (the end and # start of two successive sub-lists in nested_split_list) raw_text_elements[-1] += (delimiter + partial_element_list[0]) raw_text_elements.extend(partial_element_list[1:]) keys, values = raw_text_elements[0::2], raw_text_elements[1::2] return dict(zip(keys, values))
Parse the TEXT segment of the FCS file into a python dictionary.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L197-L224
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.read_text
def read_text(self, file_handle): """Parse the TEXT segment of the FCS file. The TEXT segment contains meta data associated with the FCS file. Converting all meta keywords to lower case. """ header = self.annotation['__header__'] # For convenience ##### # Read in the TEXT segment of the FCS file # There are some differences in how the file_handle.seek(header['text start'], 0) raw_text = file_handle.read(header['text end'] - header['text start'] + 1) try: raw_text = raw_text.decode(self._encoding) except UnicodeDecodeError as e: # Catching the exception and logging it in this way kills the traceback, but # we can worry about this later. logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 ' u'characters will be ignored.\n{}'.format(e)) raw_text = raw_text.decode(self._encoding, errors='ignore') text = self._extract_text_dict(raw_text) ## # Extract channel names and convert some of the channel properties # and other fields into numeric data types (from string) # Note: do not use regular expressions for manipulations here. # Regular expressions are too heavy in terms of computation time. pars = int(text['$PAR']) if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1 self.channel_numbers = range(0, pars) # Channel number count starts from 0 else: self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1 # Extract parameter names try: names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers]) except KeyError: names_n = [] try: names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers]) except KeyError: names_s = [] self.channel_names_s = names_s self.channel_names_n = names_n # Convert some of the fields into integer values keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers] add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT'] keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int for key in keys_to_convert_to_int: value = text[key] text[key] = int(value) self.annotation.update(text) # Update data start segments if needed if self._data_start == 0: self._data_start = int(text['$BEGINDATA']) if self._data_end == 0: self._data_end = int(text['$ENDDATA'])
python
def read_text(self, file_handle): """Parse the TEXT segment of the FCS file. The TEXT segment contains meta data associated with the FCS file. Converting all meta keywords to lower case. """ header = self.annotation['__header__'] # For convenience ##### # Read in the TEXT segment of the FCS file # There are some differences in how the file_handle.seek(header['text start'], 0) raw_text = file_handle.read(header['text end'] - header['text start'] + 1) try: raw_text = raw_text.decode(self._encoding) except UnicodeDecodeError as e: # Catching the exception and logging it in this way kills the traceback, but # we can worry about this later. logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 ' u'characters will be ignored.\n{}'.format(e)) raw_text = raw_text.decode(self._encoding, errors='ignore') text = self._extract_text_dict(raw_text) ## # Extract channel names and convert some of the channel properties # and other fields into numeric data types (from string) # Note: do not use regular expressions for manipulations here. # Regular expressions are too heavy in terms of computation time. pars = int(text['$PAR']) if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1 self.channel_numbers = range(0, pars) # Channel number count starts from 0 else: self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1 # Extract parameter names try: names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers]) except KeyError: names_n = [] try: names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers]) except KeyError: names_s = [] self.channel_names_s = names_s self.channel_names_n = names_n # Convert some of the fields into integer values keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers] add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT'] keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int for key in keys_to_convert_to_int: value = text[key] text[key] = int(value) self.annotation.update(text) # Update data start segments if needed if self._data_start == 0: self._data_start = int(text['$BEGINDATA']) if self._data_end == 0: self._data_end = int(text['$ENDDATA'])
Parse the TEXT segment of the FCS file. The TEXT segment contains meta data associated with the FCS file. Converting all meta keywords to lower case.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L226-L293
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.read_analysis
def read_analysis(self, file_handle): """Read the ANALYSIS segment of the FCS file and store it in self.analysis. Warning: This has never been tested with an actual fcs file that contains an analysis segment. Args: file_handle: buffer containing FCS data """ start = self.annotation['__header__']['analysis start'] end = self.annotation['__header__']['analysis end'] if start != 0 and end != 0: file_handle.seek(start, 0) self._analysis = file_handle.read(end - start) else: self._analysis = None
python
def read_analysis(self, file_handle): """Read the ANALYSIS segment of the FCS file and store it in self.analysis. Warning: This has never been tested with an actual fcs file that contains an analysis segment. Args: file_handle: buffer containing FCS data """ start = self.annotation['__header__']['analysis start'] end = self.annotation['__header__']['analysis end'] if start != 0 and end != 0: file_handle.seek(start, 0) self._analysis = file_handle.read(end - start) else: self._analysis = None
Read the ANALYSIS segment of the FCS file and store it in self.analysis. Warning: This has never been tested with an actual fcs file that contains an analysis segment. Args: file_handle: buffer containing FCS data
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L295-L310
eyurtsev/fcsparser
fcsparser/api.py
FCSParser._verify_assumptions
def _verify_assumptions(self): """Verify that all assumptions made by the parser hold.""" text = self.annotation keys = text.keys() if '$MODE' not in text or text['$MODE'] != 'L': raise ParserFeatureNotImplementedError(u'Mode not implemented') if '$P0B' in keys: raise ParserFeatureNotImplementedError(u'Not expecting a parameter starting at 0') if text['$BYTEORD'] not in ['1,2,3,4', '4,3,2,1', '1,2', '2,1']: raise ParserFeatureNotImplementedError(u'$BYTEORD {} ' u'not implemented'.format(text['$BYTEORD']))
python
def _verify_assumptions(self): """Verify that all assumptions made by the parser hold.""" text = self.annotation keys = text.keys() if '$MODE' not in text or text['$MODE'] != 'L': raise ParserFeatureNotImplementedError(u'Mode not implemented') if '$P0B' in keys: raise ParserFeatureNotImplementedError(u'Not expecting a parameter starting at 0') if text['$BYTEORD'] not in ['1,2,3,4', '4,3,2,1', '1,2', '2,1']: raise ParserFeatureNotImplementedError(u'$BYTEORD {} ' u'not implemented'.format(text['$BYTEORD']))
Verify that all assumptions made by the parser hold.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L312-L325
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.get_channel_names
def get_channel_names(self): """Get list of channel names. Raises a warning if the names are not unique.""" names_s, names_n = self.channel_names_s, self.channel_names_n # Figure out which channel names to use if self._channel_naming == '$PnS': channel_names, channel_names_alternate = names_s, names_n else: channel_names, channel_names_alternate = names_n, names_s if len(channel_names) == 0: channel_names = channel_names_alternate if len(set(channel_names)) != len(channel_names): msg = (u'The default channel names (defined by the {} ' u'parameter in the FCS file) were not unique. To avoid ' u'problems in downstream analysis, the channel names ' u'have been switched to the alternate channel names ' u'defined in the FCS file. To avoid ' u'seeing this warning message, explicitly instruct ' u'the FCS parser to use the alternate channel names by ' u'specifying the channel_naming parameter.') msg = msg.format(self._channel_naming) warnings.warn(msg) channel_names = channel_names_alternate return channel_names
python
def get_channel_names(self): """Get list of channel names. Raises a warning if the names are not unique.""" names_s, names_n = self.channel_names_s, self.channel_names_n # Figure out which channel names to use if self._channel_naming == '$PnS': channel_names, channel_names_alternate = names_s, names_n else: channel_names, channel_names_alternate = names_n, names_s if len(channel_names) == 0: channel_names = channel_names_alternate if len(set(channel_names)) != len(channel_names): msg = (u'The default channel names (defined by the {} ' u'parameter in the FCS file) were not unique. To avoid ' u'problems in downstream analysis, the channel names ' u'have been switched to the alternate channel names ' u'defined in the FCS file. To avoid ' u'seeing this warning message, explicitly instruct ' u'the FCS parser to use the alternate channel names by ' u'specifying the channel_naming parameter.') msg = msg.format(self._channel_naming) warnings.warn(msg) channel_names = channel_names_alternate return channel_names
Get list of channel names. Raises a warning if the names are not unique.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L327-L353
eyurtsev/fcsparser
fcsparser/api.py
FCSParser.read_data
def read_data(self, file_handle): """Read the DATA segment of the FCS file.""" self._verify_assumptions() text = self.annotation if (self._data_start > self._file_size) or (self._data_end > self._file_size): raise ValueError(u'The FCS file "{}" is corrupted. Part of the data segment ' u'is missing.'.format(self.path)) num_events = text['$TOT'] # Number of events recorded num_pars = text['$PAR'] # Number of parameters recorded if text['$BYTEORD'].strip() == '1,2,3,4' or text['$BYTEORD'].strip() == '1,2': endian = '<' elif text['$BYTEORD'].strip() == '4,3,2,1' or text['$BYTEORD'].strip() == '2,1': endian = '>' else: msg = 'Unrecognized byte order ({})'.format(text['$BYTEORD']) raise ParserFeatureNotImplementedError(msg) # dictionary to convert from FCS format to numpy convention conversion_dict = {'F': 'f', 'D': 'f', 'I': 'u'} if text['$DATATYPE'] not in conversion_dict.keys(): raise ParserFeatureNotImplementedError('$DATATYPE = {0} is not yet ' 'supported.'.format(text['$DATATYPE'])) # Calculations to figure out data types of each of parameters # $PnB specifies the number of bits reserved for a measurement of parameter n bytes_per_par_list = [int(text['$P{0}B'.format(i)] / 8) for i in self.channel_numbers] par_numeric_type_list = [ '{endian}{type}{size}'.format(endian=endian, type=conversion_dict[text['$DATATYPE']], size=bytes_per_par) for bytes_per_par in bytes_per_par_list ] # Parser for list mode. Here, the order is a list of tuples. # Each tuple stores event related information file_handle.seek(self._data_start, 0) # Go to the part of the file where data starts ## # Read in the data if len(set(par_numeric_type_list)) > 1: # This branch deals with files in which the different columns (channels) # were encoded with different types; i.e., a mixed data format. dtype = ','.join(par_numeric_type_list) data = fromfile(file_handle, dtype, num_events) # The dtypes in the numpy array `data` above are associated with both a name # and a type; i.e., # https://docs.scipy.org/doc/numpy/reference/generated/numpy.recarray.html # The names are assigned automatically. # In order for this code to work correctly with the pandas DataFrame constructor, # we convert the *names* of the dtypes to the channel names we want to use. names = self.get_channel_names() if six.PY2: encoded_names = [name.encode('ascii', errors='replace') for name in names] else: # Assume that python3 or older then. encoded_names = [name for name in names] data.dtype.names = tuple(encoded_names) else: # values saved in a single data format dtype = par_numeric_type_list[0] data = fromfile(file_handle, dtype, num_events * num_pars) data = data.reshape((num_events, num_pars)) ## # Convert to native byte order # This is needed for working with pandas data structures native_code = '<' if (sys.byteorder == 'little') else '>' if endian != native_code: # swaps the actual bytes and also the endianness data = data.byteswap().newbyteorder() self._data = data
python
def read_data(self, file_handle): """Read the DATA segment of the FCS file.""" self._verify_assumptions() text = self.annotation if (self._data_start > self._file_size) or (self._data_end > self._file_size): raise ValueError(u'The FCS file "{}" is corrupted. Part of the data segment ' u'is missing.'.format(self.path)) num_events = text['$TOT'] # Number of events recorded num_pars = text['$PAR'] # Number of parameters recorded if text['$BYTEORD'].strip() == '1,2,3,4' or text['$BYTEORD'].strip() == '1,2': endian = '<' elif text['$BYTEORD'].strip() == '4,3,2,1' or text['$BYTEORD'].strip() == '2,1': endian = '>' else: msg = 'Unrecognized byte order ({})'.format(text['$BYTEORD']) raise ParserFeatureNotImplementedError(msg) # dictionary to convert from FCS format to numpy convention conversion_dict = {'F': 'f', 'D': 'f', 'I': 'u'} if text['$DATATYPE'] not in conversion_dict.keys(): raise ParserFeatureNotImplementedError('$DATATYPE = {0} is not yet ' 'supported.'.format(text['$DATATYPE'])) # Calculations to figure out data types of each of parameters # $PnB specifies the number of bits reserved for a measurement of parameter n bytes_per_par_list = [int(text['$P{0}B'.format(i)] / 8) for i in self.channel_numbers] par_numeric_type_list = [ '{endian}{type}{size}'.format(endian=endian, type=conversion_dict[text['$DATATYPE']], size=bytes_per_par) for bytes_per_par in bytes_per_par_list ] # Parser for list mode. Here, the order is a list of tuples. # Each tuple stores event related information file_handle.seek(self._data_start, 0) # Go to the part of the file where data starts ## # Read in the data if len(set(par_numeric_type_list)) > 1: # This branch deals with files in which the different columns (channels) # were encoded with different types; i.e., a mixed data format. dtype = ','.join(par_numeric_type_list) data = fromfile(file_handle, dtype, num_events) # The dtypes in the numpy array `data` above are associated with both a name # and a type; i.e., # https://docs.scipy.org/doc/numpy/reference/generated/numpy.recarray.html # The names are assigned automatically. # In order for this code to work correctly with the pandas DataFrame constructor, # we convert the *names* of the dtypes to the channel names we want to use. names = self.get_channel_names() if six.PY2: encoded_names = [name.encode('ascii', errors='replace') for name in names] else: # Assume that python3 or older then. encoded_names = [name for name in names] data.dtype.names = tuple(encoded_names) else: # values saved in a single data format dtype = par_numeric_type_list[0] data = fromfile(file_handle, dtype, num_events * num_pars) data = data.reshape((num_events, num_pars)) ## # Convert to native byte order # This is needed for working with pandas data structures native_code = '<' if (sys.byteorder == 'little') else '>' if endian != native_code: # swaps the actual bytes and also the endianness data = data.byteswap().newbyteorder() self._data = data
Read the DATA segment of the FCS file.
https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L355-L433