language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def determine_destination_full_path( destination_folder_name, destination_file_name, source_full_path, destination_file_format, file_number=None, ): """ Determine the final destination name of the file being uploaded. """ destination_file_name = determine_destination_file_name( destination_file_name=destination_file_name, source_full_path=source_full_path, destination_file_format=destination_file_format, file_number=file_number) destination_full_path = combine_folder_and_file_name( destination_folder_name, destination_file_name) return destination_full_path
def determine_destination_full_path( destination_folder_name, destination_file_name, source_full_path, destination_file_format, file_number=None, ): """ Determine the final destination name of the file being uploaded. """ destination_file_name = determine_destination_file_name( destination_file_name=destination_file_name, source_full_path=source_full_path, destination_file_format=destination_file_format, file_number=file_number) destination_full_path = combine_folder_and_file_name( destination_folder_name, destination_file_name) return destination_full_path
Python
def decompress_file(source_full_path, destination_full_path, compression): """ Decompress a given file, using the specified compression method. """ if compression == 'zip': with ZipFile(source_full_path, 'r') as zip: zip.extractall(destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}') if compression == 'tar.bz2': file = tarfile.open(source_full_path, 'r:bz2') file.extractall(path=destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}') if compression == 'tar': file = tarfile.open(source_full_path, 'r') file.extractall(path=destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}') if compression == 'tar.gz': file = tarfile.open(source_full_path, 'r:gz') file.extractall(path=destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}')
def decompress_file(source_full_path, destination_full_path, compression): """ Decompress a given file, using the specified compression method. """ if compression == 'zip': with ZipFile(source_full_path, 'r') as zip: zip.extractall(destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}') if compression == 'tar.bz2': file = tarfile.open(source_full_path, 'r:bz2') file.extractall(path=destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}') if compression == 'tar': file = tarfile.open(source_full_path, 'r') file.extractall(path=destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}') if compression == 'tar.gz': file = tarfile.open(source_full_path, 'r:gz') file.extractall(path=destination_full_path) print( f'Successfully extracted files from {source_full_path} to {destination_full_path}')
Python
def create_fallback_destination_file_name(source_file_name, compression): """ If a destination_file_name is not provided, uses the source_file_name with a removal of the compression extension. """ file_name = os.path.basename(source_file_name) file_name = file_name.replace(f'.{compression}', '') return file_name
def create_fallback_destination_file_name(source_file_name, compression): """ If a destination_file_name is not provided, uses the source_file_name with a removal of the compression extension. """ file_name = os.path.basename(source_file_name) file_name = file_name.replace(f'.{compression}', '') return file_name
Python
def coef_file_parse(cfile, txt_var_map): ''' a separate function because GE and Siemens .coef files have similar structure modifies txt_var_map in place ''' # parse .coef file. Strip unneeded characters. a valid line in that file is # broken into validline_list coef_re = re.compile('^[^\#]') # regex for first character not a '#' coef_file = open(cfile, 'r') for line in coef_file.readlines(): if coef_re.match(line): validline_list = line.lstrip(' \t').rstrip(';\n').split() if validline_list: log.info('Parsed : %s' % validline_list) l = validline_list x = int(l[1]) y = int(l[2]) txt_var_map[l[0]][x, y] = float(l[3])
def coef_file_parse(cfile, txt_var_map): ''' a separate function because GE and Siemens .coef files have similar structure modifies txt_var_map in place ''' # parse .coef file. Strip unneeded characters. a valid line in that file is # broken into validline_list coef_re = re.compile('^[^\#]') # regex for first character not a '#' coef_file = open(cfile, 'r') for line in coef_file.readlines(): if coef_re.match(line): validline_list = line.lstrip(' \t').rstrip(';\n').split() if validline_list: log.info('Parsed : %s' % validline_list) l = validline_list x = int(l[1]) y = int(l[2]) txt_var_map[l[0]][x, y] = float(l[3])
Python
def grad_file_parse(gfile, txt_var_map): ''' a separate function because GE and Siemens .coef files have similar structure modifies txt_var_map in place ''' gf = open(gfile, 'r') line = next(gf) # skip the comments while not line.startswith('#*] END:'): line = next(gf) # get R0 line = next(gf) line = next(gf) line = next(gf) R0_m = float(line.strip().split()[0]) # go to the data line = next(gf) line = next(gf) line = next(gf) line = next(gf) line = next(gf) line = next(gf) line = next(gf) xmax = 0 ymax = 0 while 1: lindex = line.find('(') rindex = line.find(')') if lindex == -1 and rindex == -1: break arrindex = line[lindex+1:rindex] xs, ys = arrindex.split(',') x = int(xs) y = int(ys) if x > xmax: xmax = x if y > ymax: ymax = y if line.find('A') != -1 and line.find('x') != -1: txt_var_map['Alpha_x'][x,y] = float(line.split()[-2]) if line.find('A') != -1 and line.find('y') != -1: txt_var_map['Alpha_y'][x,y] = float(line.split()[-2]) if line.find('A') != -1 and line.find('z') != -1: txt_var_map['Alpha_z'][x,y] = float(line.split()[-2]) if line.find('B') != -1 and line.find('x') != -1: txt_var_map['Beta_x'][x,y] = float(line.split()[-2]) if line.find('B') != -1 and line.find('y') != -1: txt_var_map['Beta_y'][x,y] = float(line.split()[-2]) if line.find('B') != -1 and line.find('z') != -1: txt_var_map['Beta_z'][x,y] = float(line.split()[-2]) try: line = next(gf) except StopIteration: break # just return R0_m but also txt_var_map is returned return R0_m, (xmax, ymax)
def grad_file_parse(gfile, txt_var_map): ''' a separate function because GE and Siemens .coef files have similar structure modifies txt_var_map in place ''' gf = open(gfile, 'r') line = next(gf) # skip the comments while not line.startswith('#*] END:'): line = next(gf) # get R0 line = next(gf) line = next(gf) line = next(gf) R0_m = float(line.strip().split()[0]) # go to the data line = next(gf) line = next(gf) line = next(gf) line = next(gf) line = next(gf) line = next(gf) line = next(gf) xmax = 0 ymax = 0 while 1: lindex = line.find('(') rindex = line.find(')') if lindex == -1 and rindex == -1: break arrindex = line[lindex+1:rindex] xs, ys = arrindex.split(',') x = int(xs) y = int(ys) if x > xmax: xmax = x if y > ymax: ymax = y if line.find('A') != -1 and line.find('x') != -1: txt_var_map['Alpha_x'][x,y] = float(line.split()[-2]) if line.find('A') != -1 and line.find('y') != -1: txt_var_map['Alpha_y'][x,y] = float(line.split()[-2]) if line.find('A') != -1 and line.find('z') != -1: txt_var_map['Alpha_z'][x,y] = float(line.split()[-2]) if line.find('B') != -1 and line.find('x') != -1: txt_var_map['Beta_x'][x,y] = float(line.split()[-2]) if line.find('B') != -1 and line.find('y') != -1: txt_var_map['Beta_y'][x,y] = float(line.split()[-2]) if line.find('B') != -1 and line.find('z') != -1: txt_var_map['Beta_z'][x,y] = float(line.split()[-2]) try: line = next(gf) except StopIteration: break # just return R0_m but also txt_var_map is returned return R0_m, (xmax, ymax)
Python
def eval_spharm_grid(self, vendor, coeffs): ''' We evaluate the spherical harmonics on a less sampled grid. This is a spacetime vs accuracy tradeoff. ''' # init the grid first if not self.fovmin: fovmin = globals.siemens_fovmin else: fovmin = self.fovmin if not self.fovmax: fovmax = globals.siemens_fovmax else: fovmax = self.fovmax if not self.numpoints: numpoints = globals.siemens_numpoints else: numpoints = self.numpoints # convert to mm fovmin = fovmin * 1000. fovmax = fovmax * 1000. # the grid in meters. this is needed for spherical harmonics vec = np.linspace(fovmin, fovmax, numpoints) gvx, gvy, gvz = utils.meshgrid(vec, vec, vec) # mm cf = (fovmax - fovmin) / numpoints # deduce the transformation from rcs to grid g_rcs2xyz = np.array( [[0, cf, 0, fovmin], [cf, 0, 0, fovmin], [0, 0, cf, fovmin], [0, 0, 0, 1]], dtype=np.float32 ) # get the grid to rcs transformation also g_xyz2rcs = np.linalg.inv(g_rcs2xyz) # indices into the gradient displacement vol gr, gc, gs = utils.meshgrid(np.arange(numpoints), np.arange(numpoints), np.arange(numpoints), dtype=np.float32) log.info('Evaluating spherical harmonics') log.info('on a ' + str(numpoints) + '^3 grid') log.info('with extents ' + str(fovmin) + 'mm to ' + str(fovmax) + 'mm') gvxyz = CV(gvx, gvy, gvz) _dv, _dxyz = eval_spherical_harmonics(coeffs, vendor, gvxyz) return CV(_dv.x, _dv.y, _dv.z), g_xyz2rcs
def eval_spharm_grid(self, vendor, coeffs): ''' We evaluate the spherical harmonics on a less sampled grid. This is a spacetime vs accuracy tradeoff. ''' # init the grid first if not self.fovmin: fovmin = globals.siemens_fovmin else: fovmin = self.fovmin if not self.fovmax: fovmax = globals.siemens_fovmax else: fovmax = self.fovmax if not self.numpoints: numpoints = globals.siemens_numpoints else: numpoints = self.numpoints # convert to mm fovmin = fovmin * 1000. fovmax = fovmax * 1000. # the grid in meters. this is needed for spherical harmonics vec = np.linspace(fovmin, fovmax, numpoints) gvx, gvy, gvz = utils.meshgrid(vec, vec, vec) # mm cf = (fovmax - fovmin) / numpoints # deduce the transformation from rcs to grid g_rcs2xyz = np.array( [[0, cf, 0, fovmin], [cf, 0, 0, fovmin], [0, 0, cf, fovmin], [0, 0, 0, 1]], dtype=np.float32 ) # get the grid to rcs transformation also g_xyz2rcs = np.linalg.inv(g_rcs2xyz) # indices into the gradient displacement vol gr, gc, gs = utils.meshgrid(np.arange(numpoints), np.arange(numpoints), np.arange(numpoints), dtype=np.float32) log.info('Evaluating spherical harmonics') log.info('on a ' + str(numpoints) + '^3 grid') log.info('with extents ' + str(fovmin) + 'mm to ' + str(fovmax) + 'mm') gvxyz = CV(gvx, gvy, gvz) _dv, _dxyz = eval_spherical_harmonics(coeffs, vendor, gvxyz) return CV(_dv.x, _dv.y, _dv.z), g_xyz2rcs
Python
def eval_spherical_harmonics(coeffs, vendor, vxyz): ''' Evaluate spherical harmonics Parameters ---------- coeffs : Coeffs (namedtuple) the sph. harmonics coefficients got by parsing vxyz : CoordsVector (namedtuple). Could be a scalar or a 6-element list the x, y, z coordinates in case of scalar or 3-element list, the coordinates are eval in the function resolution : float (optional) useful in case vxyz is scalar ''' # convert radius into mm R0 = coeffs.R0_m * 1000 x, y, z = vxyz #pdb.set_trace() # log.info('calculating displacements (mm) ' # 'using spherical harmonics coeffcients...') if vendor == 'siemens': log.info('along x...') bx = siemens_B(coeffs.alpha_x, coeffs.beta_x, x, y, z, R0) log.info('along y...') by = siemens_B(coeffs.alpha_y, coeffs.beta_y, x, y, z, R0) log.info('along z...') bz = siemens_B(coeffs.alpha_z, coeffs.beta_z, x, y, z, R0) else: # GE log.info('along x...') bx = ge_D(coeffs.alpha_x, coeffs.beta_x, x, y, z) log.info('along y...') by = ge_D(coeffs.alpha_y, coeffs.beta_y, x, y, z) log.info('along z...') bz = siemens_B(coeffs.alpha_z, coeffs.beta_z, x, y, z, R0) bz = ge_D(coeffs.alpha_z, coeffs.beta_z, x, y, z) return CV(bx * R0, by * R0, bz * R0), CV(x, y, z)
def eval_spherical_harmonics(coeffs, vendor, vxyz): ''' Evaluate spherical harmonics Parameters ---------- coeffs : Coeffs (namedtuple) the sph. harmonics coefficients got by parsing vxyz : CoordsVector (namedtuple). Could be a scalar or a 6-element list the x, y, z coordinates in case of scalar or 3-element list, the coordinates are eval in the function resolution : float (optional) useful in case vxyz is scalar ''' # convert radius into mm R0 = coeffs.R0_m * 1000 x, y, z = vxyz #pdb.set_trace() # log.info('calculating displacements (mm) ' # 'using spherical harmonics coeffcients...') if vendor == 'siemens': log.info('along x...') bx = siemens_B(coeffs.alpha_x, coeffs.beta_x, x, y, z, R0) log.info('along y...') by = siemens_B(coeffs.alpha_y, coeffs.beta_y, x, y, z, R0) log.info('along z...') bz = siemens_B(coeffs.alpha_z, coeffs.beta_z, x, y, z, R0) else: # GE log.info('along x...') bx = ge_D(coeffs.alpha_x, coeffs.beta_x, x, y, z) log.info('along y...') by = ge_D(coeffs.alpha_y, coeffs.beta_y, x, y, z) log.info('along z...') bz = siemens_B(coeffs.alpha_z, coeffs.beta_z, x, y, z, R0) bz = ge_D(coeffs.alpha_z, coeffs.beta_z, x, y, z) return CV(bx * R0, by * R0, bz * R0), CV(x, y, z)
Python
def siemens_B(alpha, beta, x1, y1, z1, R0): ''' Calculate displacement field from Siemens coefficients ''' nmax = alpha.shape[0] - 1 x1 = x1 + 0.0001 # hack to avoid singularities at R=0 # convert to spherical coordinates r = np.sqrt(x1 * x1 + y1 * y1 + z1 * z1) theta = np.arccos(z1 / r) phi = np.arctan2(y1 / r, x1 / r) b = np.zeros(x1.shape) for n in range(0, nmax + 1): f = np.power(r / R0, n) for m in range(0, n + 1): f2 = alpha[n, m] * np.cos(m * phi) + beta[n, m] * np.sin(m * phi) _ptemp = utils.legendre(n, m, np.cos(theta)) #_ptemp = scipy.special.lpmv(m, n, np.cos(theta)) normfact = 1 # this is Siemens normalization if m > 0: normfact = math.pow(-1, m) * \ math.sqrt(float((2 * n + 1) * factorial(n - m)) \ / float(2 * factorial(n + m))) _p = normfact * _ptemp b = b + f * _p * f2 return b
def siemens_B(alpha, beta, x1, y1, z1, R0): ''' Calculate displacement field from Siemens coefficients ''' nmax = alpha.shape[0] - 1 x1 = x1 + 0.0001 # hack to avoid singularities at R=0 # convert to spherical coordinates r = np.sqrt(x1 * x1 + y1 * y1 + z1 * z1) theta = np.arccos(z1 / r) phi = np.arctan2(y1 / r, x1 / r) b = np.zeros(x1.shape) for n in range(0, nmax + 1): f = np.power(r / R0, n) for m in range(0, n + 1): f2 = alpha[n, m] * np.cos(m * phi) + beta[n, m] * np.sin(m * phi) _ptemp = utils.legendre(n, m, np.cos(theta)) #_ptemp = scipy.special.lpmv(m, n, np.cos(theta)) normfact = 1 # this is Siemens normalization if m > 0: normfact = math.pow(-1, m) * \ math.sqrt(float((2 * n + 1) * factorial(n - m)) \ / float(2 * factorial(n + m))) _p = normfact * _ptemp b = b + f * _p * f2 return b
Python
def ge_D(alpha, beta, x1, y1, z1): ''' GE Gradwarp coeffs define the error rather than the total gradient field''' nmax = alpha.shape[0] - 1 x1 = x1 + 0.0001 # hack to avoid singularities r = np.sqrt(x1 * x1 + y1 * y1 + z1 * z1) # For consistency with GE papers, use theta & phi -> phi & theta phi = np.arccos(z1 / r) theta = np.arctan2(y1 / r, x1 / r) r = r * 100.0 # GE wants cm, so meters -> cm d = np.zeros(x1.shape) for n in range(0, nmax + 1): # So GE uses the usual unnormalized legendre polys. f = np.power(r, n) for m in range(0, n + 1): f2 = alpha[n, m] * np.cos(m * theta) + beta[n, m] \ * np.sin(m * theta) _p = utils.legendre(n, m, np.cos(phi)) d = d + f * _p * f2 d = d / 100.0 # cm back to meters return d
def ge_D(alpha, beta, x1, y1, z1): ''' GE Gradwarp coeffs define the error rather than the total gradient field''' nmax = alpha.shape[0] - 1 x1 = x1 + 0.0001 # hack to avoid singularities r = np.sqrt(x1 * x1 + y1 * y1 + z1 * z1) # For consistency with GE papers, use theta & phi -> phi & theta phi = np.arccos(z1 / r) theta = np.arctan2(y1 / r, x1 / r) r = r * 100.0 # GE wants cm, so meters -> cm d = np.zeros(x1.shape) for n in range(0, nmax + 1): # So GE uses the usual unnormalized legendre polys. f = np.power(r, n) for m in range(0, n + 1): f2 = alpha[n, m] * np.cos(m * theta) + beta[n, m] \ * np.sin(m * theta) _p = utils.legendre(n, m, np.cos(phi)) d = d + f * _p * f2 d = d / 100.0 # cm back to meters return d
Python
def startApplicationOnServer(appName,serverName): """Start the named application on one server""" print "startApplicationOnServer: Entry. appname=%s servername=%s" % ( appName,serverName ) cellName = getCellName() nodeName = getNodeName() # Get the application manager appManager = AdminControl.queryNames('cell=%s,node=%s,type=ApplicationManager,process=%s,*' %(cellName,nodeName,serverName)) print "startApplicationOnServer: appManager=%s" % ( repr(appManager) ) # start it rc = AdminControl.invoke(appManager, 'startApplication', appName) print "startApplicationOnServer: Exit. rc=%s" % ( repr(rc) )
def startApplicationOnServer(appName,serverName): """Start the named application on one server""" print "startApplicationOnServer: Entry. appname=%s servername=%s" % ( appName,serverName ) cellName = getCellName() nodeName = getNodeName() # Get the application manager appManager = AdminControl.queryNames('cell=%s,node=%s,type=ApplicationManager,process=%s,*' %(cellName,nodeName,serverName)) print "startApplicationOnServer: appManager=%s" % ( repr(appManager) ) # start it rc = AdminControl.invoke(appManager, 'startApplication', appName) print "startApplicationOnServer: Exit. rc=%s" % ( repr(rc) )
Python
def stopApplicationOnServer(appName,serverName): """Stop the named application on one server""" print "stopApplicationOnServer: Entry. appname=%s servername=%s" % ( appName,serverName ) cellName = getCellName() nodeName = getNodeName() # Get the application manager appManager = AdminControl.queryNames('cell=%s,node=%s,type=ApplicationManager,process=%s,*' %(cellName,nodeName,serverName)) print "stopApplicationOnServer: appManager=%s" % ( repr(appManager) ) # start it rc = AdminControl.invoke(appManager, 'stopApplication', appName) print "stopApplicationOnServer: Exit. rc=%s" % ( repr(rc) )
def stopApplicationOnServer(appName,serverName): """Stop the named application on one server""" print "stopApplicationOnServer: Entry. appname=%s servername=%s" % ( appName,serverName ) cellName = getCellName() nodeName = getNodeName() # Get the application manager appManager = AdminControl.queryNames('cell=%s,node=%s,type=ApplicationManager,process=%s,*' %(cellName,nodeName,serverName)) print "stopApplicationOnServer: appManager=%s" % ( repr(appManager) ) # start it rc = AdminControl.invoke(appManager, 'stopApplication', appName) print "stopApplicationOnServer: Exit. rc=%s" % ( repr(rc) )
Python
def installApplicationOnServer( fileName, appName, contextRoot, serverName ): """Install given application on the named server using given context root""" print "installApplicationOnServer: fileName=%s appName=%s contextRoot=%s ServerName=%s" % ( fileName, appName,contextRoot,serverName ) AdminApp.install(fileName,'[-appname ' + appName + ' -contextroot ' + contextRoot + ' -server ' + serverName + ' -usedefaultbindings ]') AdminConfig.save() """modify classloader model for application""" deploymentID = AdminConfig.getid('/Deployment:' + appName + '/') deploymentObject = AdminConfig.showAttribute(deploymentID, 'deployedObject') classldr = AdminConfig.showAttribute(deploymentObject, 'classloader') print AdminConfig.showall(classldr) AdminConfig.modify(classldr, [['mode', 'PARENT_LAST']]) """Modify WAR class loader model""" AdminConfig.show(deploymentObject, 'warClassLoaderPolicy') AdminConfig.modify(deploymentObject, [['warClassLoaderPolicy', 'SINGLE']]) AdminConfig.save()
def installApplicationOnServer( fileName, appName, contextRoot, serverName ): """Install given application on the named server using given context root""" print "installApplicationOnServer: fileName=%s appName=%s contextRoot=%s ServerName=%s" % ( fileName, appName,contextRoot,serverName ) AdminApp.install(fileName,'[-appname ' + appName + ' -contextroot ' + contextRoot + ' -server ' + serverName + ' -usedefaultbindings ]') AdminConfig.save() """modify classloader model for application""" deploymentID = AdminConfig.getid('/Deployment:' + appName + '/') deploymentObject = AdminConfig.showAttribute(deploymentID, 'deployedObject') classldr = AdminConfig.showAttribute(deploymentObject, 'classloader') print AdminConfig.showall(classldr) AdminConfig.modify(classldr, [['mode', 'PARENT_LAST']]) """Modify WAR class loader model""" AdminConfig.show(deploymentObject, 'warClassLoaderPolicy') AdminConfig.modify(deploymentObject, [['warClassLoaderPolicy', 'SINGLE']]) AdminConfig.save()
Python
def do_graphql_query(api_token, payload): """ Do a GraphQL query. This base method is used by all other methods that do a GraphQL query. :param api_token: the API token to access the GraphQL API :param payload: the payload we want to send. :return: the returned JSON object """ headers = {API_TOKEN_HEADER: api_token} response = requests.post(constants.GRAPHQL_ENDPOINT_URL, json=payload, headers=headers) if response.status_code != 200: log.error('Failed to send GraphQL query to Codiga API') return None response_json = response.json() return response_json["data"]
def do_graphql_query(api_token, payload): """ Do a GraphQL query. This base method is used by all other methods that do a GraphQL query. :param api_token: the API token to access the GraphQL API :param payload: the payload we want to send. :return: the returned JSON object """ headers = {API_TOKEN_HEADER: api_token} response = requests.post(constants.GRAPHQL_ENDPOINT_URL, json=payload, headers=headers) if response.status_code != 200: log.error('Failed to send GraphQL query to Codiga API') return None response_json = response.json() return response_json["data"]
Python
def do_graphql_query_with_api_token(api_token, payload): """ Do a GraphQL query. This base method is used by all other methods that do a GraphQL query. :param api_token: the API token to access the GraphQL API :param payload: the payload we want to send. :return: the returned JSON object """ headers = {API_TOKEN_HEADER: api_token} response = requests.post(constants.GRAPHQL_ENDPOINT_URL, json=payload, headers=headers) print(response.json()) if response.status_code != 200: log.error('Failed to send GraphQL query to Codiga API') return None response_json = response.json() return response_json["data"]
def do_graphql_query_with_api_token(api_token, payload): """ Do a GraphQL query. This base method is used by all other methods that do a GraphQL query. :param api_token: the API token to access the GraphQL API :param payload: the payload we want to send. :return: the returned JSON object """ headers = {API_TOKEN_HEADER: api_token} response = requests.post(constants.GRAPHQL_ENDPOINT_URL, json=payload, headers=headers) print(response.json()) if response.status_code != 200: log.error('Failed to send GraphQL query to Codiga API') return None response_json = response.json() return response_json["data"]
Python
def identi_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 # conv and bn layer's name conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x
def identi_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 # conv and bn layer's name conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x
Python
def display_instances(image, boxes, masks, names, scores, showScores=False): """ take the image and results and apply the mask, box, and Label """ n_instances = boxes.shape[0] colors = random_colors(n_instances) if not n_instances: print('NO INSTANCES TO DISPLAY') for i, color in enumerate(colors): if not np.any(boxes[i]): continue y1, x1, y2, x2 = boxes[i] label = names[i] score = scores[i] if scores is not None else None if showScores: caption = '{} {:.2f}'.format(label, score) if score else label else: caption = '{}'.format(label) if score else label mask = masks[:, :, i] image = apply_mask(image, mask, color) image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) image = cv2.putText( image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2 ) return image
def display_instances(image, boxes, masks, names, scores, showScores=False): """ take the image and results and apply the mask, box, and Label """ n_instances = boxes.shape[0] colors = random_colors(n_instances) if not n_instances: print('NO INSTANCES TO DISPLAY') for i, color in enumerate(colors): if not np.any(boxes[i]): continue y1, x1, y2, x2 = boxes[i] label = names[i] score = scores[i] if scores is not None else None if showScores: caption = '{} {:.2f}'.format(label, score) if score else label else: caption = '{}'.format(label) if score else label mask = masks[:, :, i] image = apply_mask(image, mask, color) image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) image = cv2.putText( image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2 ) return image
Python
def load_mask(self, image_id): """Generate instance masks for an image. Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ # If not a trash dataset image, delegate to parent class. image_info = self.image_info[image_id] if image_info["source"] != "trash": return super(self.__class__, self).load_mask(image_id) # Convert polygons to a bitmap mask of shape # [height, width, instance_count] info = self.image_info[image_id] mask = np.zeros([info["height"], info["width"], len(info["polygons"])], dtype=np.uint8) for i, p in enumerate(info["polygons"]): # Get indexes of pixels inside the polygon and set them to 1 rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x']) mask[rr, cc, i] = 1 # Return mask, and array of class IDs of each instance. Since we have # one class ID only, we return an array of 1s return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def load_mask(self, image_id): """Generate instance masks for an image. Returns: masks: A bool array of shape [height, width, instance count] with one mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ # If not a trash dataset image, delegate to parent class. image_info = self.image_info[image_id] if image_info["source"] != "trash": return super(self.__class__, self).load_mask(image_id) # Convert polygons to a bitmap mask of shape # [height, width, instance_count] info = self.image_info[image_id] mask = np.zeros([info["height"], info["width"], len(info["polygons"])], dtype=np.uint8) for i, p in enumerate(info["polygons"]): # Get indexes of pixels inside the polygon and set them to 1 rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x']) mask[rr, cc, i] = 1 # Return mask, and array of class IDs of each instance. Since we have # one class ID only, we return an array of 1s return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
Python
def image_reference(self, image_id): """Return the path of the image.""" info = self.image_info[image_id] if info["source"] == "trash": return info["path"] else: super(self.__class__, self).image_reference(image_id)
def image_reference(self, image_id): """Return the path of the image.""" info = self.image_info[image_id] if info["source"] == "trash": return info["path"] else: super(self.__class__, self).image_reference(image_id)
Python
def highlightHex(self, world_x, world_y): """ Method that highlights a selected hex. Algorithm from http://www.gamedev.net/reference/articles/article1800.asp world_x: X-Coordinate of point in the hex to highlight world_y: Y-Coordinate of point in the hex to highlight """ global SQRT_3, SQRT_3_2, HEX_M x_section = int(world_x / 3) y_section = int(world_y / SQRT_3_2) x_sectpxl = world_x - x_section * 3 y_sectpxl = world_y - y_section * SQRT_3_2 if x_section % 2: # Type A if x_sectpxl < (1 - y_sectpxl * HEX_M): # Left Bottom hex = (x_section - 1, y_section - 1) elif x_sectpxl < (y_sectpxl * HEX_M - 1): # Left Top hex = (x_section - 1, y_section) else: # Right Area hex = (x_section, y_section) else: # Type B if y_sectpxl >= SQRT_3: # Top Half if x_sectpxl < (2 - y_sectpxl * HEX_M): hex = (x_section - 1, y_section) # Top Left else: hex = (x_section, y_section) # Top Right else: # Bottom Half if x_sectpxl < (y_sectpxl * HEX_M): hex = (x_section - 1, y_section) # Bottom Left else: hex = (x_section, y_section - 1) # Botom Right # Only highlight inside world if 0 <= hex[0] < self.x and 0 <= hex[1] < self.y: if not self.activeHex or hex != self.activeHex: self.highlightDict[hex] = 0.2 self.activeHex = hex else: self.activeHex = None
def highlightHex(self, world_x, world_y): """ Method that highlights a selected hex. Algorithm from http://www.gamedev.net/reference/articles/article1800.asp world_x: X-Coordinate of point in the hex to highlight world_y: Y-Coordinate of point in the hex to highlight """ global SQRT_3, SQRT_3_2, HEX_M x_section = int(world_x / 3) y_section = int(world_y / SQRT_3_2) x_sectpxl = world_x - x_section * 3 y_sectpxl = world_y - y_section * SQRT_3_2 if x_section % 2: # Type A if x_sectpxl < (1 - y_sectpxl * HEX_M): # Left Bottom hex = (x_section - 1, y_section - 1) elif x_sectpxl < (y_sectpxl * HEX_M - 1): # Left Top hex = (x_section - 1, y_section) else: # Right Area hex = (x_section, y_section) else: # Type B if y_sectpxl >= SQRT_3: # Top Half if x_sectpxl < (2 - y_sectpxl * HEX_M): hex = (x_section - 1, y_section) # Top Left else: hex = (x_section, y_section) # Top Right else: # Bottom Half if x_sectpxl < (y_sectpxl * HEX_M): hex = (x_section - 1, y_section) # Bottom Left else: hex = (x_section, y_section - 1) # Botom Right # Only highlight inside world if 0 <= hex[0] < self.x and 0 <= hex[1] < self.y: if not self.activeHex or hex != self.activeHex: self.highlightDict[hex] = 0.2 self.activeHex = hex else: self.activeHex = None
Python
def paint(self): """ Paints the world and the highlighted hexes """ global SQRT_3, SQRT_3_2 # Paint Hex-field glCallList(self.dlHex) # Paint all highlights for hex, hv in self.highlightDict.items(): glPushMatrix() glColor3f(hv, hv, hv) if hex[0] % 2: glTranslatef(hex[0] * 3, hex[1] * SQRT_3_2, -0.05) else: glTranslatef(hex[0] * 3, SQRT_3 + hex[1] * SQRT_3_2, -0.05) glCallList(self.dlHLHex) glPopMatrix() # Paint selected hex if self.selectedHex: glPushMatrix() glColor3f(1, 1, 1) if self.selectedHex[0] % 2: glTranslatef(self.selectedHex[0] * 3, self.selectedHex[1] * SQRT_3_2, 0.05) else: glTranslatef(self.selectedHex[0] * 3, SQRT_3 + self.selectedHex[1] * SQRT_3_2, 0.05) glCallList(self.dlSelHex) if self.selectedShip: self.textg24.glPrint(str(self.selectedShip.x)) glPopMatrix() # Paint Ships for name, ship in self.shipDict.items(): ship.paint()
def paint(self): """ Paints the world and the highlighted hexes """ global SQRT_3, SQRT_3_2 # Paint Hex-field glCallList(self.dlHex) # Paint all highlights for hex, hv in self.highlightDict.items(): glPushMatrix() glColor3f(hv, hv, hv) if hex[0] % 2: glTranslatef(hex[0] * 3, hex[1] * SQRT_3_2, -0.05) else: glTranslatef(hex[0] * 3, SQRT_3 + hex[1] * SQRT_3_2, -0.05) glCallList(self.dlHLHex) glPopMatrix() # Paint selected hex if self.selectedHex: glPushMatrix() glColor3f(1, 1, 1) if self.selectedHex[0] % 2: glTranslatef(self.selectedHex[0] * 3, self.selectedHex[1] * SQRT_3_2, 0.05) else: glTranslatef(self.selectedHex[0] * 3, SQRT_3 + self.selectedHex[1] * SQRT_3_2, 0.05) glCallList(self.dlSelHex) if self.selectedShip: self.textg24.glPrint(str(self.selectedShip.x)) glPopMatrix() # Paint Ships for name, ship in self.shipDict.items(): ship.paint()
Python
def handleKeyEvent(event): """ Function handling key events. Returns: True as default, False when a quit key has been pressed """ global CameraPos keystate = pygame.key.get_pressed() if keystate[K_ESCAPE]: # ESC-key return False if keystate[K_v]: # "v"-key CameraPos = [40.0,50.0,50.0] if keystate[K_PLUS] or keystate[K_KP_PLUS]: # "+"-key CameraPos[2] += CameraPos[2] * ZOOM_SPEED_CONST if keystate[K_MINUS] or keystate[K_KP_MINUS]: # "-"-key CameraPos[2] -= CameraPos[2] * ZOOM_SPEED_CONST if CameraPos[2] < 10: # Limit zoom CameraPos[2] = 10 return True
def handleKeyEvent(event): """ Function handling key events. Returns: True as default, False when a quit key has been pressed """ global CameraPos keystate = pygame.key.get_pressed() if keystate[K_ESCAPE]: # ESC-key return False if keystate[K_v]: # "v"-key CameraPos = [40.0,50.0,50.0] if keystate[K_PLUS] or keystate[K_KP_PLUS]: # "+"-key CameraPos[2] += CameraPos[2] * ZOOM_SPEED_CONST if keystate[K_MINUS] or keystate[K_KP_MINUS]: # "-"-key CameraPos[2] -= CameraPos[2] * ZOOM_SPEED_CONST if CameraPos[2] < 10: # Limit zoom CameraPos[2] = 10 return True
Python
def tick(): """ Function handling periodic updates. Called by the 30Hz clock. """ keystate = pygame.key.get_pressed() if keystate[K_UP]: # "up arrow"-key CameraPos[1] += CameraPos[2] * KEY_MOVE_CONST if keystate[K_DOWN]: # "down arrow"-key CameraPos[1] -= CameraPos[2] * KEY_MOVE_CONST if keystate[K_RIGHT]: # "right arrow"-key CameraPos[0] += CameraPos[2] * KEY_MOVE_CONST if keystate[K_LEFT]: # "left arrow"-key CameraPos[0] -= CameraPos[2] * KEY_MOVE_CONST
def tick(): """ Function handling periodic updates. Called by the 30Hz clock. """ keystate = pygame.key.get_pressed() if keystate[K_UP]: # "up arrow"-key CameraPos[1] += CameraPos[2] * KEY_MOVE_CONST if keystate[K_DOWN]: # "down arrow"-key CameraPos[1] -= CameraPos[2] * KEY_MOVE_CONST if keystate[K_RIGHT]: # "right arrow"-key CameraPos[0] += CameraPos[2] * KEY_MOVE_CONST if keystate[K_LEFT]: # "left arrow"-key CameraPos[0] -= CameraPos[2] * KEY_MOVE_CONST
Python
def put(self, value): """Adds a new item to the end of the underlying Redis queue Arguments: value {str} -- The object to add """ self.redis.rpush(self.key, json.dumps(value))
def put(self, value): """Adds a new item to the end of the underlying Redis queue Arguments: value {str} -- The object to add """ self.redis.rpush(self.key, json.dumps(value))
Python
def empty(self): """Returns a boolean indicating if the Redis queue is empty Returns: bool -- Whether or not the underlying Redis queue is empty """ return self.redis.llen(self.key) == 0
def empty(self): """Returns a boolean indicating if the Redis queue is empty Returns: bool -- Whether or not the underlying Redis queue is empty """ return self.redis.llen(self.key) == 0
Python
def from_dict(cls, account): """Loads an account from a valid JSON dict returned from the Twitter API Arguments: account {dict} -- The JSON formatted User object from the Twitter API source {str} -- The source of the profile (e.g. "tweets", "enum", etc.) Returns: cache.Account -- The Account instance representing this user """ return Account( id=account.get('id'), id_str=account.get('id_str'), screen_name=account.get('screen_name'), created_date=datetime.strptime( account.get('created_at'), '%a %b %d %H:%M:%S %z %Y'), protected=account.get('protected'), tweet_count=account.get('statuses_count'), language=account.get('lang'), source=account.get('_tbsource'))
def from_dict(cls, account): """Loads an account from a valid JSON dict returned from the Twitter API Arguments: account {dict} -- The JSON formatted User object from the Twitter API source {str} -- The source of the profile (e.g. "tweets", "enum", etc.) Returns: cache.Account -- The Account instance representing this user """ return Account( id=account.get('id'), id_str=account.get('id_str'), screen_name=account.get('screen_name'), created_date=datetime.strptime( account.get('created_at'), '%a %b %d %H:%M:%S %z %Y'), protected=account.get('protected'), tweet_count=account.get('statuses_count'), language=account.get('lang'), source=account.get('_tbsource'))
Python
def parse_args(): """Parses the command line arguments. """ parser = argparse.ArgumentParser( description='Enumerate public Twitter tweets from discovered accounts') parser.add_argument( '--min-tweets', '-mt', type=int, default=DEFAULT_MINIMUM_TWEETS, help='The minimum number of tweets needed before fetching the tweets') parser.add_argument( '--tweet-filename', '-tf', type=str, help='The filename to store compressed tweet JSON data', default='tweets.json.gz') parser.add_argument( '--no-lookup', dest='lookup', action='store_false', help='Disable looking up users found in tweets', default=True) parser.add_argument( '--stdout', action='store_true', dest='stdout', help='Print JSON to stdout instead of a file', default=False) return parser.parse_args()
def parse_args(): """Parses the command line arguments. """ parser = argparse.ArgumentParser( description='Enumerate public Twitter tweets from discovered accounts') parser.add_argument( '--min-tweets', '-mt', type=int, default=DEFAULT_MINIMUM_TWEETS, help='The minimum number of tweets needed before fetching the tweets') parser.add_argument( '--tweet-filename', '-tf', type=str, help='The filename to store compressed tweet JSON data', default='tweets.json.gz') parser.add_argument( '--no-lookup', dest='lookup', action='store_false', help='Disable looking up users found in tweets', default=True) parser.add_argument( '--stdout', action='store_true', dest='stdout', help='Print JSON to stdout instead of a file', default=False) return parser.parse_args()
Python
def parse_args(): """Parses the command line arguments. """ parser = argparse.ArgumentParser( description='Crawl a Twitter user\'s social network') parser.add_argument('user', type=str, help='User screen name to crawl') parser.add_argument( '--graph-file', '-g', type=str, help='Filename for the output GEXF graph', default=DEFAULT_GRAPH_FILENAME) parser.add_argument( '--raw', '-r', type=str, help='Filename for the raw JSON data', default=DEFAULT_RAW_RESULTS_FILENAME) parser.add_argument( '--degree', '-d', type=int, help='Max degree of crawl', default=DEFAULT_CRAWL_DEGREE) parser.add_argument( '--max-connections', '-c', type=int, help='Max number of connections per account to crawl', default=DEFAULT_MAX_CONNECTIONS) parser.add_argument( '--root-connections', '-rc', help='Only track connections connected to the original user', default=False, action='store_true') parser.add_argument( '--dynamic', help='Store the results as a dynamic graph instead of a static graph', default=False, action='store_true') return parser.parse_args()
def parse_args(): """Parses the command line arguments. """ parser = argparse.ArgumentParser( description='Crawl a Twitter user\'s social network') parser.add_argument('user', type=str, help='User screen name to crawl') parser.add_argument( '--graph-file', '-g', type=str, help='Filename for the output GEXF graph', default=DEFAULT_GRAPH_FILENAME) parser.add_argument( '--raw', '-r', type=str, help='Filename for the raw JSON data', default=DEFAULT_RAW_RESULTS_FILENAME) parser.add_argument( '--degree', '-d', type=int, help='Max degree of crawl', default=DEFAULT_CRAWL_DEGREE) parser.add_argument( '--max-connections', '-c', type=int, help='Max number of connections per account to crawl', default=DEFAULT_MAX_CONNECTIONS) parser.add_argument( '--root-connections', '-rc', help='Only track connections connected to the original user', default=False, action='store_true') parser.add_argument( '--dynamic', help='Store the results as a dynamic graph instead of a static graph', default=False, action='store_true') return parser.parse_args()
Python
def write_graph(session, args): """Writes the entries in the database to a GEXF file Arguments: session {sqlalchemy.orm.Session} -- The database session graph_filename {str} -- The filename to write the graph to """ with open(args.graph_file, 'w') as graph: graph_mode = 'mode="static"' if args.dynamic: graph_mode = 'mode="dynamic" timeformat="dateTime"' graph.write( "<?xml version='1.0' encoding='utf-8'?>" "<gexf version=\"1.2\" xmlns=\"http://www.gexf.net/1.2draft\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.w3.org/2001/XMLSchema-instance\">" "<graph defaultedgetype=\"directed\" {} name=\"\">\n".format( graph_mode)) # Write the nodes graph.write('<nodes>\n') for node in session.query(Node).yield_per(1000): graph.write('<node id="{}" label="{}" start="{}"/>\n'.format( node.screen_name, node.screen_name, node.created_date.strftime('%Y-%m-%dT%H:%M:%S'))) graph.write('</nodes>\n') graph.write('<edges>\n') query = session.query(Edge) for edge in query.yield_per(1000): graph.write( '<edge id="{}" source="{}" target="{}" start="{}"/>\n'.format( edge.id, edge.source.screen_name, edge.target.screen_name, edge.created_date.strftime('%Y-%m-%dT%H:%M:%S'))) graph.write('</edges>\n') graph.write('</graph>\n') graph.write('</gexf>\n')
def write_graph(session, args): """Writes the entries in the database to a GEXF file Arguments: session {sqlalchemy.orm.Session} -- The database session graph_filename {str} -- The filename to write the graph to """ with open(args.graph_file, 'w') as graph: graph_mode = 'mode="static"' if args.dynamic: graph_mode = 'mode="dynamic" timeformat="dateTime"' graph.write( "<?xml version='1.0' encoding='utf-8'?>" "<gexf version=\"1.2\" xmlns=\"http://www.gexf.net/1.2draft\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.w3.org/2001/XMLSchema-instance\">" "<graph defaultedgetype=\"directed\" {} name=\"\">\n".format( graph_mode)) # Write the nodes graph.write('<nodes>\n') for node in session.query(Node).yield_per(1000): graph.write('<node id="{}" label="{}" start="{}"/>\n'.format( node.screen_name, node.screen_name, node.created_date.strftime('%Y-%m-%dT%H:%M:%S'))) graph.write('</nodes>\n') graph.write('<edges>\n') query = session.query(Edge) for edge in query.yield_per(1000): graph.write( '<edge id="{}" source="{}" target="{}" start="{}"/>\n'.format( edge.id, edge.source.screen_name, edge.target.screen_name, edge.created_date.strftime('%Y-%m-%dT%H:%M:%S'))) graph.write('</edges>\n') graph.write('</graph>\n') graph.write('</gexf>\n')
Python
def write_graph(session, args): """Writes the entries in the database to a GEXF file Arguments: session {sqlalchemy.orm.Session} -- The database session graph_filename {str} -- The filename to write the graph to """ date_format = "%Y-%m-%dT%H:%M:%S" with open(args.graph_file, 'w') as graph: graph.write( "<?xml version='1.0' encoding='utf-8'?>" "<gexf version=\"1.2\" xmlns=\"http://www.gexf.net/1.2draft\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.w3.org/2001/XMLSchema-instance\">" "<graph defaultedgetype=\"directed\" mode=\"dynamic\" timeformat=\"datetime\" name=\"\">\n") # Write the nodes graph.write('<nodes>\n') for node_class in [TweetNode, AccountNode]: for node in session.query(node_class).yield_per(1000): graph.write('<node id="{}" label="{}" start="{}" />\n'.format( node.id, node.screen_name, node.date_crawled.strftime(date_format))) graph.write('</nodes>\n') graph.write('<edges>\n') query = session.query(Edge) for edge in query.yield_per(1000): graph.write('<edge id="{}" source="{}" target="{}" />\n'.format( edge.id, edge.source.id, edge.target.id)) graph.write('</edges>\n') graph.write('</graph>\n') graph.write('</gexf>\n')
def write_graph(session, args): """Writes the entries in the database to a GEXF file Arguments: session {sqlalchemy.orm.Session} -- The database session graph_filename {str} -- The filename to write the graph to """ date_format = "%Y-%m-%dT%H:%M:%S" with open(args.graph_file, 'w') as graph: graph.write( "<?xml version='1.0' encoding='utf-8'?>" "<gexf version=\"1.2\" xmlns=\"http://www.gexf.net/1.2draft\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.w3.org/2001/XMLSchema-instance\">" "<graph defaultedgetype=\"directed\" mode=\"dynamic\" timeformat=\"datetime\" name=\"\">\n") # Write the nodes graph.write('<nodes>\n') for node_class in [TweetNode, AccountNode]: for node in session.query(node_class).yield_per(1000): graph.write('<node id="{}" label="{}" start="{}" />\n'.format( node.id, node.screen_name, node.date_crawled.strftime(date_format))) graph.write('</nodes>\n') graph.write('<edges>\n') query = session.query(Edge) for edge in query.yield_per(1000): graph.write('<edge id="{}" source="{}" target="{}" />\n'.format( edge.id, edge.source.id, edge.target.id)) graph.write('</edges>\n') graph.write('</graph>\n') graph.write('</gexf>\n')
Python
def write_row(self, row): """Writes a new row to the file in ndjson format Arguments: row {dict} -- The dict to write to the file """ record = '{}\n'.format(json.dumps(row)) record = record.encode('utf-8') self.file_handle.write(record)
def write_row(self, row): """Writes a new row to the file in ndjson format Arguments: row {dict} -- The dict to write to the file """ record = '{}\n'.format(json.dumps(row)) record = record.encode('utf-8') self.file_handle.write(record)
Python
def parse_args(): """Parses the command line arguments. """ parser = argparse.ArgumentParser( description='Enumerate public Twitter profiles and tweets') parser.add_argument( '--max-id', type=int, help='Max Twitter ID to use for enumeration', default=DEFAULT_MAX_ID), parser.add_argument( '--min-id', type=int, help='Minimum ID to use for enumeration', default=DEFAULT_MIN_ID) parser.add_argument( '--enum-percentage', '-p', type=int, default=100, help='The percentage of 32bit account space to enumerate (0-100).') parser.add_argument( '--no-stream', dest='stream', action='store_false', help='Disable the streaming', default=True) parser.add_argument( '--no-enum', dest='enum', action='store_false', help='Disable the account id enumeration', default=True) parser.add_argument( '--stream-query', '-q', type=str, help='The query to use when streaming results', default=None) parser.add_argument( '--account-filename', '-af', type=str, help='The filename to store compressed account JSON data', default='accounts.json.gz') parser.add_argument( '--stdout', action='store_true', dest='stdout', help='Print JSON to stdout instead of a file', default=False) return parser.parse_args()
def parse_args(): """Parses the command line arguments. """ parser = argparse.ArgumentParser( description='Enumerate public Twitter profiles and tweets') parser.add_argument( '--max-id', type=int, help='Max Twitter ID to use for enumeration', default=DEFAULT_MAX_ID), parser.add_argument( '--min-id', type=int, help='Minimum ID to use for enumeration', default=DEFAULT_MIN_ID) parser.add_argument( '--enum-percentage', '-p', type=int, default=100, help='The percentage of 32bit account space to enumerate (0-100).') parser.add_argument( '--no-stream', dest='stream', action='store_false', help='Disable the streaming', default=True) parser.add_argument( '--no-enum', dest='enum', action='store_false', help='Disable the account id enumeration', default=True) parser.add_argument( '--stream-query', '-q', type=str, help='The query to use when streaming results', default=None) parser.add_argument( '--account-filename', '-af', type=str, help='The filename to store compressed account JSON data', default='accounts.json.gz') parser.add_argument( '--stdout', action='store_true', dest='stdout', help='Print JSON to stdout instead of a file', default=False) return parser.parse_args()
Python
def start_lookup(api, lookup_queue, account_queue): """Iterates through results in a lookup queue, fetching and returning the hydrated account objects. This service looks up user ID's 100 ID's at a time per Twitter's API. Arguments: lookup_queue {queue.Queue} -- The queue to receive lookup requests on account_queue {queue.Queue} -- The queue to send hydrated accounts """ logger.info('Account lookup service started') try: account_batch = [] # This is a mirror of account_batch but includes the original # source so we can tell where an account came from. account_sources = {} while True: account = lookup_queue.get() account_id = account['id_str'] source = account['_tbsource'] # This probably won't happen, but it'll save us a request anyway if account_id in account_sources: continue account_batch.append(account_id) account_sources[account_id] = source if len(account_batch) >= 100: try: results = api.lookup_users( user_ids=account_batch[:100], include_entities=True) except Exception as e: # If an exception occurs, we should probably cool off for # a few seconds and then go ahead and prune out the # results. logger.error(e) logger.info( 'Error fetching users... Sleeping for 10 seconds') time.sleep(10) account_batch = account_batch[min(len(account_batch), 100):] continue for result in results: user = result._json # Backfill the original source user['_tbsource'] = account_sources.pop(user['id_str']) account_queue.put(user) account_batch = account_batch[min(len(account_batch), 100):] except KeyboardInterrupt: return
def start_lookup(api, lookup_queue, account_queue): """Iterates through results in a lookup queue, fetching and returning the hydrated account objects. This service looks up user ID's 100 ID's at a time per Twitter's API. Arguments: lookup_queue {queue.Queue} -- The queue to receive lookup requests on account_queue {queue.Queue} -- The queue to send hydrated accounts """ logger.info('Account lookup service started') try: account_batch = [] # This is a mirror of account_batch but includes the original # source so we can tell where an account came from. account_sources = {} while True: account = lookup_queue.get() account_id = account['id_str'] source = account['_tbsource'] # This probably won't happen, but it'll save us a request anyway if account_id in account_sources: continue account_batch.append(account_id) account_sources[account_id] = source if len(account_batch) >= 100: try: results = api.lookup_users( user_ids=account_batch[:100], include_entities=True) except Exception as e: # If an exception occurs, we should probably cool off for # a few seconds and then go ahead and prune out the # results. logger.error(e) logger.info( 'Error fetching users... Sleeping for 10 seconds') time.sleep(10) account_batch = account_batch[min(len(account_batch), 100):] continue for result in results: user = result._json # Backfill the original source user['_tbsource'] = account_sources.pop(user['id_str']) account_queue.put(user) account_batch = account_batch[min(len(account_batch), 100):] except KeyboardInterrupt: return
Python
async def hasattr_(info, r_data, r_attr): """Check that an object has an attribute.""" data, attr = await info.abstracts() if isinstance(data, lib.AbstractUnion): return await _union_make(info, operations.hasattr) attr_v = await info.build(r_attr) case, *_ = await _attr_case(info, data, attr_v) return Constant(case == "field" or case == "method")
async def hasattr_(info, r_data, r_attr): """Check that an object has an attribute.""" data, attr = await info.abstracts() if isinstance(data, lib.AbstractUnion): return await _union_make(info, operations.hasattr) attr_v = await info.build(r_attr) case, *_ = await _attr_case(info, data, attr_v) return Constant(case == "field" or case == "method")
Python
async def infer_dict_getitem( self, engine, arg: lib.AbstractDict, idx: xtype.String ): """Infer the return type of primitive `dict_getitem`.""" idx_v = self.require_constant(idx, argnum=2, range=set(arg.entries.keys())) return arg.entries[idx_v]
async def infer_dict_getitem( self, engine, arg: lib.AbstractDict, idx: xtype.String ): """Infer the return type of primitive `dict_getitem`.""" idx_v = self.require_constant(idx, argnum=2, range=set(arg.entries.keys())) return arg.entries[idx_v]
Python
def make_signature(self, args): """Make the signature. The signature is a pair with the first element being the signature generated from self.fn and the second a boolean saying whether there is a dout argument or not. """ aliases = defaultdict(list) if self.sum_aliases: for i, arg in enumerate(args): for elem, getter in generate_getters(arg, ROOT): aid = elem.values.get(ALIASID, None) if aid is not None: assert aid is not lib.ANYTHING aliases[aid].append((i, getter)) aliases = tuple(sorted((k, tuple(v)) for k, v in aliases.items())) if ( len(args) > 0 and isinstance(args[-1], lib.AbstractKeywordArgument) and args[-1].key == "dout" ): dout = "kw" else: dout = self.dout_parameter if dout: args = args[:-1] if any(isinstance(arg, lib.AbstractKeywordArgument) for arg in args): raise MyiaTypeError( f"Only 'dout' is valid as a keyword argument in a" " grad-transformed function." ) if isinstance(self.fn, (Graph, MetaGraph)): sig = self.fn.make_signature(args) else: sig = (len(args),) return sig, dout, aliases
def make_signature(self, args): """Make the signature. The signature is a pair with the first element being the signature generated from self.fn and the second a boolean saying whether there is a dout argument or not. """ aliases = defaultdict(list) if self.sum_aliases: for i, arg in enumerate(args): for elem, getter in generate_getters(arg, ROOT): aid = elem.values.get(ALIASID, None) if aid is not None: assert aid is not lib.ANYTHING aliases[aid].append((i, getter)) aliases = tuple(sorted((k, tuple(v)) for k, v in aliases.items())) if ( len(args) > 0 and isinstance(args[-1], lib.AbstractKeywordArgument) and args[-1].key == "dout" ): dout = "kw" else: dout = self.dout_parameter if dout: args = args[:-1] if any(isinstance(arg, lib.AbstractKeywordArgument) for arg in args): raise MyiaTypeError( f"Only 'dout' is valid as a keyword argument in a" " grad-transformed function." ) if isinstance(self.fn, (Graph, MetaGraph)): sig = self.fn.make_signature(args) else: sig = (len(args),) return sig, dout, aliases
Python
def generate_graph(self, sig): """Make the graph for the grad. If wrt is an integer, the wrt-th gradient will be returned directly. If it is a tuple of integers, then a tuple of the specified gradients will be returned in the same order (duplicates are allowed). If self.return_value is True, a tuple will always be returned and the first element will be the return value of the function. The other elements will be the gradients. """ gsig, dout, aliases = sig if isinstance(self.fn, (Graph, MetaGraph)): g = self.fn.generate_graph(gsig) dbg = g.debug nargs = len(g.parameters) orig_parameters = g.parameters orig_parameter_names = g.parameter_names else: g = self.fn dbg = NamedDebugInfo() (nargs,) = gsig orig_parameters = [Parameter(None) for _ in range(nargs)] orig_parameter_names = None def _getindex(wrt): if wrt == "*": raise MyiaTypeError(f"'*' in grad must be the only parameter") elif isinstance(wrt, str): try: return orig_parameter_names.index(wrt) except ValueError: raise MyiaTypeError(f"{g} has no argument named '{wrt}'") elif 0 <= wrt < nargs: return wrt else: raise MyiaTypeError( f"Cannot get gradient with respect to argument {wrt}" f" for {g} because it is out of range." ) if self.wrt == ["*"]: wrt = list(range(nargs)) else: wrt = list(map(_getindex, self.wrt)) if len(wrt) == 1: wrt = wrt[0] elif wrt == []: wrt = 0 with About(dbg, "grad"): df = Graph() df.set_flags("core", "reference") jf = df.apply(P.J, g) params = [] for orig_p in orig_parameters: with About(orig_p.debug, "grad"): params.append(df.add_parameter()) jparams = [df.apply(P.J, p) for p in params] app = df.apply(jf, *jparams) out = df.apply(P.Jinv, df.apply(P.tuple_getitem, app, 0)) bprop = df.apply(P.tuple_getitem, app, 1) if dout: bprop_arg = df.add_parameter() bprop_arg.debug.name = "dout" if dout == "kw": bprop_arg = df.apply(P.extract_kwarg, "dout", bprop_arg) else: bprop_arg = df.apply(_cast_helper, 1, out) if isinstance(wrt, int): direct_return = not self.always_return_tuple wrt = [wrt] else: direct_return = False bapp = df.apply(bprop, bprop_arg) all_results = [ df.apply(P.tuple_getitem, bapp, idx + 1) for idx in range(nargs) ] adjusted = {i: all_results[i] for i in range(nargs)} for aid, equivs in aliases: contribs = [] for i, entry in equivs: node = sexp_to_node(entry, df, sub={ROOT: all_results[i]}) contribs.append(node) combined = reduce( lambda x, y: df.apply(operations.gadd, x, y), contribs ) for i, entry in equivs: setter = setter_from_getter(entry, combined) node = sexp_to_node(setter, df, sub={ROOT: adjusted[i]}) adjusted[i] = node elems = [out] if self.return_value else [] elems += [adjusted[idx] for idx in wrt] if len(elems) == 1 and direct_return: df.output = elems[0] else: df.output = df.apply(P.make_tuple, *elems) return df
def generate_graph(self, sig): """Make the graph for the grad. If wrt is an integer, the wrt-th gradient will be returned directly. If it is a tuple of integers, then a tuple of the specified gradients will be returned in the same order (duplicates are allowed). If self.return_value is True, a tuple will always be returned and the first element will be the return value of the function. The other elements will be the gradients. """ gsig, dout, aliases = sig if isinstance(self.fn, (Graph, MetaGraph)): g = self.fn.generate_graph(gsig) dbg = g.debug nargs = len(g.parameters) orig_parameters = g.parameters orig_parameter_names = g.parameter_names else: g = self.fn dbg = NamedDebugInfo() (nargs,) = gsig orig_parameters = [Parameter(None) for _ in range(nargs)] orig_parameter_names = None def _getindex(wrt): if wrt == "*": raise MyiaTypeError(f"'*' in grad must be the only parameter") elif isinstance(wrt, str): try: return orig_parameter_names.index(wrt) except ValueError: raise MyiaTypeError(f"{g} has no argument named '{wrt}'") elif 0 <= wrt < nargs: return wrt else: raise MyiaTypeError( f"Cannot get gradient with respect to argument {wrt}" f" for {g} because it is out of range." ) if self.wrt == ["*"]: wrt = list(range(nargs)) else: wrt = list(map(_getindex, self.wrt)) if len(wrt) == 1: wrt = wrt[0] elif wrt == []: wrt = 0 with About(dbg, "grad"): df = Graph() df.set_flags("core", "reference") jf = df.apply(P.J, g) params = [] for orig_p in orig_parameters: with About(orig_p.debug, "grad"): params.append(df.add_parameter()) jparams = [df.apply(P.J, p) for p in params] app = df.apply(jf, *jparams) out = df.apply(P.Jinv, df.apply(P.tuple_getitem, app, 0)) bprop = df.apply(P.tuple_getitem, app, 1) if dout: bprop_arg = df.add_parameter() bprop_arg.debug.name = "dout" if dout == "kw": bprop_arg = df.apply(P.extract_kwarg, "dout", bprop_arg) else: bprop_arg = df.apply(_cast_helper, 1, out) if isinstance(wrt, int): direct_return = not self.always_return_tuple wrt = [wrt] else: direct_return = False bapp = df.apply(bprop, bprop_arg) all_results = [ df.apply(P.tuple_getitem, bapp, idx + 1) for idx in range(nargs) ] adjusted = {i: all_results[i] for i in range(nargs)} for aid, equivs in aliases: contribs = [] for i, entry in equivs: node = sexp_to_node(entry, df, sub={ROOT: all_results[i]}) contribs.append(node) combined = reduce( lambda x, y: df.apply(operations.gadd, x, y), contribs ) for i, entry in equivs: setter = setter_from_getter(entry, combined) node = sexp_to_node(setter, df, sub={ROOT: adjusted[i]}) adjusted[i] = node elems = [out] if self.return_value else [] elems += [adjusted[idx] for idx in wrt] if len(elems) == 1 and direct_return: df.output = elems[0] else: df.output = df.apply(P.make_tuple, *elems) return df
Python
def specialize(self, args): """Specialize on the types of the given arguments. Returns a Pipeline. If the argument types were seen before, returns a cached version. """ argnames = inspect.getfullargspec(self.fn).args n1 = len(argnames) n2 = len(args) if n1 != n2: raise MyiaTypeError( f"Wrong number of arguments: expected {n1}, got {n2}" ) alias_map, aid_to_paths = find_aliases(args, self.alias_tracker) argspec = tuple( from_value( arg, broaden=name not in self.specialize_values, alias_map=alias_map, ) for arg, name in zip(args, argnames) ) if argspec not in self._cache: if self.tracer: self.tracer.__enter__() self._cache[argspec] = self.pip( input=self.fn, argspec=argspec, aliasspec=(self.alias_tracker, aid_to_paths), ) if self.tracer: self.tracer.__exit__(None, None, None) return self._cache[argspec]
def specialize(self, args): """Specialize on the types of the given arguments. Returns a Pipeline. If the argument types were seen before, returns a cached version. """ argnames = inspect.getfullargspec(self.fn).args n1 = len(argnames) n2 = len(args) if n1 != n2: raise MyiaTypeError( f"Wrong number of arguments: expected {n1}, got {n2}" ) alias_map, aid_to_paths = find_aliases(args, self.alias_tracker) argspec = tuple( from_value( arg, broaden=name not in self.specialize_values, alias_map=alias_map, ) for arg, name in zip(args, argnames) ) if argspec not in self._cache: if self.tracer: self.tracer.__enter__() self._cache[argspec] = self.pip( input=self.fn, argspec=argspec, aliasspec=(self.alias_tracker, aid_to_paths), ) if self.tracer: self.tracer.__exit__(None, None, None) return self._cache[argspec]
Python
def to_device(self, v, *, broaden=True, vm_t=None, orig_t=None): """Move value to the function's accelerator hardware.""" backr = self.pip.resources.keywords["backend"].keywords return to_device( v, backr["name"], backr["options"], broaden=broaden, vm_t=vm_t, orig_t=orig_t, )
def to_device(self, v, *, broaden=True, vm_t=None, orig_t=None): """Move value to the function's accelerator hardware.""" backr = self.pip.resources.keywords["backend"].keywords return to_device( v, backr["name"], backr["options"], broaden=broaden, vm_t=vm_t, orig_t=orig_t, )
Python
def to_device( value, backend, backend_options=None, *, broaden=True, orig_t=None, vm_t=None, ): """Move value to target accelerator hardware (using selected backend).""" if not isinstance(backend, Backend): backend = load_backend(backend, backend_options) if orig_t is None: orig_t = from_value(value, broaden=broaden) value = to_canonical(value, orig_t) if vm_t is None: vm_t = from_value(value, broaden=broaden) value = backend.to_backend_value(value, vm_t) return BackendValue(value, orig_t, vm_t, backend)
def to_device( value, backend, backend_options=None, *, broaden=True, orig_t=None, vm_t=None, ): """Move value to target accelerator hardware (using selected backend).""" if not isinstance(backend, Backend): backend = load_backend(backend, backend_options) if orig_t is None: orig_t = from_value(value, broaden=broaden) value = to_canonical(value, orig_t) if vm_t is None: vm_t = from_value(value, broaden=broaden) value = backend.to_backend_value(value, vm_t) return BackendValue(value, orig_t, vm_t, backend)
Python
async def check(self, engine, argrefs, uniform=False): """Check that the argrefs match the function signature.""" check_nargs(self.name, self.nargs, argrefs) outtype = self.outtype async def _force_abstract(x): return (await x.get()) if isinstance(x, Reference) else x for typ, indexes in self.typemap.items(): args = [await _force_abstract(argrefs[i]) for i in indexes] if uniform: res = engine.check(typ, *args) if typ == self.outtype: outtype = res continue for arg in args: if isinstance(typ, xtype.TypeMeta): await force_pending(engine.check(typ, arg.xtype(), typ)) elif isinstance(typ, type) and issubclass(typ, AbstractValue): if not isinstance(arg, typ): raise MyiaTypeError( f"Wrong type {arg} != {typ} for {self.name}" ) elif callable(typ): await force_pending(engine.check(typ, arg)) else: raise AssertionError(f"Invalid annotation: {typ}") return outtype
async def check(self, engine, argrefs, uniform=False): """Check that the argrefs match the function signature.""" check_nargs(self.name, self.nargs, argrefs) outtype = self.outtype async def _force_abstract(x): return (await x.get()) if isinstance(x, Reference) else x for typ, indexes in self.typemap.items(): args = [await _force_abstract(argrefs[i]) for i in indexes] if uniform: res = engine.check(typ, *args) if typ == self.outtype: outtype = res continue for arg in args: if isinstance(typ, xtype.TypeMeta): await force_pending(engine.check(typ, arg.xtype(), typ)) elif isinstance(typ, type) and issubclass(typ, AbstractValue): if not isinstance(arg, typ): raise MyiaTypeError( f"Wrong type {arg} != {typ} for {self.name}" ) elif callable(typ): await force_pending(engine.check(typ, arg)) else: raise AssertionError(f"Invalid annotation: {typ}") return outtype
Python
async def build(self, ref, ab=None): """Get a constant value from a reference.""" from .utils import build_value if ab is None: ab = await ref.get() try: return build_value(ab) except ValueError: raise MyiaValueError( "Arguments to a myia_static function must be constant", refs=[ref], )
async def build(self, ref, ab=None): """Get a constant value from a reference.""" from .utils import build_value if ab is None: ab = await ref.get() try: return build_value(ab) except ValueError: raise MyiaValueError( "Arguments to a myia_static function must be constant", refs=[ref], )
Python
def to_opdef(fn): """Create an operation definition from a function.""" name = fn.__name__ return OperationDefinition( name=name, registered_name=name, mapping=fn, python_implementation=None )
def to_opdef(fn): """Create an operation definition from a function.""" name = fn.__name__ return OperationDefinition( name=name, registered_name=name, mapping=fn, python_implementation=None )
Python
def elemwise(name, op, infer_value=False): """Define an elemwise operation on one or more arrays.""" hm = HyperMap( name=name, fn_leaf=op, nonleaf=(lib.AbstractArray,), infer_value=infer_value, ) return OperationDefinition( name=name, registered_name=name, mapping=hm, python_implementation=None )
def elemwise(name, op, infer_value=False): """Define an elemwise operation on one or more arrays.""" hm = HyperMap( name=name, fn_leaf=op, nonleaf=(lib.AbstractArray,), infer_value=infer_value, ) return OperationDefinition( name=name, registered_name=name, mapping=hm, python_implementation=None )
Python
def make_leaf(self, g, fnarg, argmap): """Generate the expression for a leaf.""" if fnarg is None: fnarg = self.fn_leaf return g.apply(fnarg, *argmap.keys())
def make_leaf(self, g, fnarg, argmap): """Generate the expression for a leaf.""" if fnarg is None: fnarg = self.fn_leaf return g.apply(fnarg, *argmap.keys())
Python
def generate_graph(self, all_args): """Create a graph for mapping over the given args.""" g = Graph() g.debug.name = self.name g.set_flags("core", "reference", metagraph=self) argmap = {} if self.fn_leaf is None: fn_t, *args = all_args fnarg = g.add_parameter() else: args = all_args fnarg = None for a in args: argmap[g.add_parameter()] = (a, self._is_leaf(a)) g.output = self._generate_helper(g, fnarg, argmap) return g
def generate_graph(self, all_args): """Create a graph for mapping over the given args.""" g = Graph() g.debug.name = self.name g.set_flags("core", "reference", metagraph=self) argmap = {} if self.fn_leaf is None: fn_t, *args = all_args fnarg = g.add_parameter() else: args = all_args fnarg = None for a in args: argmap[g.add_parameter()] = (a, self._is_leaf(a)) g.output = self._generate_helper(g, fnarg, argmap) return g
Python
def generate_data(n, batch_size, input_size, target_size, *, seed=87): """Generate inputs and targets. Generates n batches of samples of size input_size, matched with a single target. """ R = RandomState(seed=seed) return [ (param(R, batch_size, input_size), param(R, batch_size, target_size)) for i in range(n) ]
def generate_data(n, batch_size, input_size, target_size, *, seed=87): """Generate inputs and targets. Generates n batches of samples of size input_size, matched with a single target. """ R = RandomState(seed=seed) return [ (param(R, batch_size, input_size), param(R, batch_size, target_size)) for i in range(n) ]
Python
def mlp_parameters(*layer_sizes, seed=90909): """Generates parameters for a MLP given a list of layer sizes.""" R = RandomState(seed=seed) parameters = [] for i, o in zip(layer_sizes[:-1], layer_sizes[1:]): W = param(R, i, o) b = param(R, 1, o) parameters.append((W, b)) return parameters
def mlp_parameters(*layer_sizes, seed=90909): """Generates parameters for a MLP given a list of layer sizes.""" R = RandomState(seed=seed) parameters = [] for i, o in zip(layer_sizes[:-1], layer_sizes[1:]): W = param(R, i, o) b = param(R, 1, o) parameters.append((W, b)) return parameters
Python
def D(__d=None, **dct): """Generate a symbolic dict from parsing given dict.""" if __d is None: d = {} else: d = __d d.update(**dct) keys = list(d.keys()) values = list(to_abstract_test(v) for v in d.values()) return AbstractDict(dict(zip(keys, values)))
def D(__d=None, **dct): """Generate a symbolic dict from parsing given dict.""" if __d is None: d = {} else: d = __d d.update(**dct) keys = list(d.keys()) values = list(to_abstract_test(v) for v in d.values()) return AbstractDict(dict(zip(keys, values)))
Python
async def infer_record_getitem( self, engine, data: lib.AbstractClassBase, attr: xtype.String ): """Infer the return type of primitive `record_getitem`.""" attr_v = self.require_constant(attr, argnum=2) return data.attributes[attr_v]
async def infer_record_getitem( self, engine, data: lib.AbstractClassBase, attr: xtype.String ): """Infer the return type of primitive `record_getitem`.""" attr_v = self.require_constant(attr, argnum=2) return data.attributes[attr_v]
Python
def matches(self, value) -> bool: """Return True if the variable matches the value given. Note that this relation is transitive, but not associative. """ return True
def matches(self, value) -> bool: """Return True if the variable matches the value given. Note that this relation is transitive, but not associative. """ return True
Python
def intersection(self, v): """Return the intersection with the given variable. Returns: * A variable that matches only the values both self and v match. * `False` if it can be proven that self and v are mutually exclusive. * NotImplemented, in which case one may try `v.intersection(self)`. """ return NotImplemented
def intersection(self, v): """Return the intersection with the given variable. Returns: * A variable that matches only the values both self and v match. * `False` if it can be proven that self and v are mutually exclusive. * NotImplemented, in which case one may try `v.intersection(self)`. """ return NotImplemented
Python
def matches(self, value) -> bool: """Check if the provided value matches the SVar.""" if isinstance(value, SVar): return self.subtype.matches(value.subtype) elif isinstance(value, Seq): return all(self.subtype.matches(v) for v in value) else: return False
def matches(self, value) -> bool: """Check if the provided value matches the SVar.""" if isinstance(value, SVar): return self.subtype.matches(value.subtype) elif isinstance(value, Seq): return all(self.subtype.matches(v) for v in value) else: return False
Python
def matches(self, value) -> bool: """Return True if the variable matches the value.""" if isinstance(value, RestrictedVar): return all(v in self.legal_values for v in value.legal_values) return value in self.legal_values
def matches(self, value) -> bool: """Return True if the variable matches the value.""" if isinstance(value, RestrictedVar): return all(v in self.legal_values for v in value.legal_values) return value in self.legal_values
Python
def intersection(self, v): """Return the intersection of two RestrictedVars. The resulting variable's legal values are the intersection of self and v's legal values. """ if isinstance(v, RestrictedVar): lv = set(self.legal_values) lv2 = set(v.legal_values) common = lv & lv2 if common == lv: return self elif common == lv2: return v elif common: return RestrictedVar(common) else: return False else: return NotImplemented
def intersection(self, v): """Return the intersection of two RestrictedVars. The resulting variable's legal values are the intersection of self and v's legal values. """ if isinstance(v, RestrictedVar): lv = set(self.legal_values) lv2 = set(v.legal_values) common = lv & lv2 if common == lv: return self elif common == lv2: return v elif common: return RestrictedVar(common) else: return False else: return NotImplemented
Python
def matches(self, value) -> bool: """Return True if the variable matches the value.""" if isinstance(value, RestrictedVar): return all(self.filter(v) for v in value.legal_values) if isinstance(value, FilterVar): return self.filter == value.filter return self.filter(value)
def matches(self, value) -> bool: """Return True if the variable matches the value.""" if isinstance(value, RestrictedVar): return all(self.filter(v) for v in value.legal_values) if isinstance(value, FilterVar): return self.filter == value.filter return self.filter(value)
Python
def intersection(self, v): """Return the intersection of two FilterVars. The resulting variable tests that both self and v's filters return true. """ if isinstance(v, FilterVar): if self.filter == v.filter: return self return FilterVar(PredicateSet(self.filter, v.filter)) else: return NotImplemented
def intersection(self, v): """Return the intersection of two FilterVars. The resulting variable tests that both self and v's filters return true. """ if isinstance(v, FilterVar): if self.filter == v.filter: return self return FilterVar(PredicateSet(self.filter, v.filter)) else: return NotImplemented
Python
def var(filter: FilterT = None) -> Var: """Create a variable for unification purposes. Arguments: tag: An identifier for the variable. Two variables with the same filter and identifier will return the same object. filter: A predicate, or a set of values the variable is allowed to take. """ if callable(filter): return FilterVar(filter) elif filter is not None: return RestrictedVar(filter) else: return Var()
def var(filter: FilterT = None) -> Var: """Create a variable for unification purposes. Arguments: tag: An identifier for the variable. Two variables with the same filter and identifier will return the same object. filter: A predicate, or a set of values the variable is allowed to take. """ if callable(filter): return FilterVar(filter) elif filter is not None: return RestrictedVar(filter) else: return Var()
Python
def svar(subtype: Var = None) -> SVar: """Create an SVar (can match 0 or more items). Items must match the subtype. """ return SVar(subtype)
def svar(subtype: Var = None) -> SVar: """Create an SVar (can match 0 or more items). Items must match the subtype. """ return SVar(subtype)
Python
def expandlist(lst: Iterable[T]) -> List[T]: """Flatten the Seq instances in a sequence.""" lst = list(lst) off = 0 for i, e in enumerate(list(lst)): if isinstance(e, Seq): lst[off + i : off + i + 1] = e off += len(e) - 1 return lst
def expandlist(lst: Iterable[T]) -> List[T]: """Flatten the Seq instances in a sequence.""" lst = list(lst) off = 0 for i, e in enumerate(list(lst)): if isinstance(e, Seq): lst[off + i : off + i + 1] = e off += len(e) - 1 return lst
Python
def noseq(fn: Callable[[T], T], u: T) -> T: """Make sure that there are no Seq in the value.""" um = fn(u) if isinstance(um, Seq): raise TypeError("Multiple values in single-value position") return um
def noseq(fn: Callable[[T], T], u: T) -> T: """Make sure that there are no Seq in the value.""" um = fn(u) if isinstance(um, Seq): raise TypeError("Multiple values in single-value position") return um
Python
def clone(self, v: T, copy_map: Dict = None) -> T: """Return a copy of a templated type structure. Type are passed through without modification, variables are duplicated with a new id. This preserves relationships between variables like this:: clone(Tuple(v1, v1)) -> Tuple(v2, v2) Arguments: v: expression copy_map: Dictionary of variable mappings """ if copy_map is None: copy_map = {} return self._clone(v, copy_map)
def clone(self, v: T, copy_map: Dict = None) -> T: """Return a copy of a templated type structure. Type are passed through without modification, variables are duplicated with a new id. This preserves relationships between variables like this:: clone(Tuple(v1, v1)) -> Tuple(v2, v2) Arguments: v: expression copy_map: Dictionary of variable mappings """ if copy_map is None: copy_map = {} return self._clone(v, copy_map)
Python
def unify_raw(self, w, v, equiv: EquivT) -> EquivT: """'raw' interface for unification. The `equiv` argument is modified in-place. Arguments: w: An expression v: An expression equiv: A dictionary of variable equivalences. Returns: The equivalence dictionary Raises: UnificationError If the expressions are not compatible. The dictionary may contain partial matching in this case and should no longer be used for further unification. Note: There must not be loops in the equivalence relationships described by `equiv` or this function might never return. """ w = self._getvar(w) v = self._getvar(v) while isinstance(w, Var) and w in equiv: w = equiv[w] while isinstance(v, Var) and v in equiv: v = equiv[v] if self.eq(w, v): return equiv if isinstance(w, UnionVar): return self.unify_union(w, v, equiv) if isinstance(v, UnionVar): return self.unify_union(v, w, equiv) if isinstance(v, Var) and isinstance(w, Var): u = v.intersection(w) if u is NotImplemented: u = w.intersection(v) if u is False: raise UnificationError("Incompatible variables") if u is not NotImplemented: assert isinstance(u, Var) if u is not v: equiv[v] = u if u is not w: equiv[w] = u return equiv if isinstance(w, Var): if w.matches(v): equiv[w] = v return equiv if isinstance(v, Var): if v.matches(w): equiv[v] = w return equiv if type(v) != type(w): raise UnificationError("Type match error") if isinstance(v, Seq) and isinstance(w, Seq): values_v = list(v) values_w = list(w) else: def appender(l): def fn(u): l.append(self._getvar(u)) return u return fn try: values_v = [] self.visit(appender(values_v), v) values_w = [] self.visit(appender(values_w), w) except VisitError: raise UnificationError("Cannot visit elements") sv = -1 sw = -1 for i, vv in enumerate(values_v): if isinstance(vv, SVar): if sv != -1: raise UnificationError("Multiple SVars in sequence") sv = i for i, vw in enumerate(values_w): if isinstance(vw, SVar): if sw != -1: raise UnificationError("Multiple SVars in sequence") sw = i if sv != -1 and sw != -1: if len(values_v) == len(values_w) and sv == sw: self.unify_raw(values_w[sw], values_v[sv], equiv) values_v.pop(sv) values_w.pop(sw) else: raise UnificationError("SVars in both sides of the match") if sv != -1 and len(values_w) >= len(values_v) - 1: wb = values_w[:sv] diff = len(values_w) - len(values_v) + 1 wm = Seq(values_w[sv : sv + diff]) we = values_w[sv + diff :] values_w = wb + [wm] + we if sw != -1 and len(values_v) >= len(values_w) - 1: vb = values_v[:sw] diff = len(values_v) - len(values_w) + 1 vm = Seq(values_v[sw : sw + diff]) ve = values_v[sw + diff :] values_v = vb + [vm] + ve if len(values_w) != len(values_v): raise UnificationError("Structures of differing size") for wi, vi in zip(values_w, values_v): equiv = self.unify_raw(wi, vi, equiv) return equiv
def unify_raw(self, w, v, equiv: EquivT) -> EquivT: """'raw' interface for unification. The `equiv` argument is modified in-place. Arguments: w: An expression v: An expression equiv: A dictionary of variable equivalences. Returns: The equivalence dictionary Raises: UnificationError If the expressions are not compatible. The dictionary may contain partial matching in this case and should no longer be used for further unification. Note: There must not be loops in the equivalence relationships described by `equiv` or this function might never return. """ w = self._getvar(w) v = self._getvar(v) while isinstance(w, Var) and w in equiv: w = equiv[w] while isinstance(v, Var) and v in equiv: v = equiv[v] if self.eq(w, v): return equiv if isinstance(w, UnionVar): return self.unify_union(w, v, equiv) if isinstance(v, UnionVar): return self.unify_union(v, w, equiv) if isinstance(v, Var) and isinstance(w, Var): u = v.intersection(w) if u is NotImplemented: u = w.intersection(v) if u is False: raise UnificationError("Incompatible variables") if u is not NotImplemented: assert isinstance(u, Var) if u is not v: equiv[v] = u if u is not w: equiv[w] = u return equiv if isinstance(w, Var): if w.matches(v): equiv[w] = v return equiv if isinstance(v, Var): if v.matches(w): equiv[v] = w return equiv if type(v) != type(w): raise UnificationError("Type match error") if isinstance(v, Seq) and isinstance(w, Seq): values_v = list(v) values_w = list(w) else: def appender(l): def fn(u): l.append(self._getvar(u)) return u return fn try: values_v = [] self.visit(appender(values_v), v) values_w = [] self.visit(appender(values_w), w) except VisitError: raise UnificationError("Cannot visit elements") sv = -1 sw = -1 for i, vv in enumerate(values_v): if isinstance(vv, SVar): if sv != -1: raise UnificationError("Multiple SVars in sequence") sv = i for i, vw in enumerate(values_w): if isinstance(vw, SVar): if sw != -1: raise UnificationError("Multiple SVars in sequence") sw = i if sv != -1 and sw != -1: if len(values_v) == len(values_w) and sv == sw: self.unify_raw(values_w[sw], values_v[sv], equiv) values_v.pop(sv) values_w.pop(sw) else: raise UnificationError("SVars in both sides of the match") if sv != -1 and len(values_w) >= len(values_v) - 1: wb = values_w[:sv] diff = len(values_w) - len(values_v) + 1 wm = Seq(values_w[sv : sv + diff]) we = values_w[sv + diff :] values_w = wb + [wm] + we if sw != -1 and len(values_v) >= len(values_w) - 1: vb = values_v[:sw] diff = len(values_v) - len(values_w) + 1 vm = Seq(values_v[sw : sw + diff]) ve = values_v[sw + diff :] values_v = vb + [vm] + ve if len(values_w) != len(values_v): raise UnificationError("Structures of differing size") for wi, vi in zip(values_w, values_v): equiv = self.unify_raw(wi, vi, equiv) return equiv
Python
def unify(self, w, v, equiv: EquivT = None) -> EquivT: """Unify two expressions. After a match is found, this will post-process the dictionary to set all the equivalences to their transitive values. Arguments: w: expression v: expression equiv: Dictionary of pre-existing equivalences. Returns: The equivalence dictionary if a match is found, None otherwise. Note: There must not be loops in the equivalence relationships described by `equiv` or this function will never return. """ if equiv is None: equiv = {} try: equiv = self.unify_raw(w, v, equiv) except UnificationError: return None # Set all keys to their transitive values ks = set(equiv.keys()) for k in ks: init_k = k while k in equiv: k = equiv[k] equiv[init_k] = k return equiv
def unify(self, w, v, equiv: EquivT = None) -> EquivT: """Unify two expressions. After a match is found, this will post-process the dictionary to set all the equivalences to their transitive values. Arguments: w: expression v: expression equiv: Dictionary of pre-existing equivalences. Returns: The equivalence dictionary if a match is found, None otherwise. Note: There must not be loops in the equivalence relationships described by `equiv` or this function will never return. """ if equiv is None: equiv = {} try: equiv = self.unify_raw(w, v, equiv) except UnificationError: return None # Set all keys to their transitive values ks = set(equiv.keys()) for k in ks: init_k = k while k in equiv: k = equiv[k] equiv[init_k] = k return equiv
Python
def reify(self, v, equiv: EquivT) -> Any: """Fill in a expression according to the equivalences given. Arguments: v: expression equiv: equivalence relationships (transitively mapped) Note: This expects the dictionary of equivalences to be transitively transformed to work properly. `unify` does this automatically on its return value, but if you use unify_raw directly, you have to take care of this. """ v = self._getvar(v) if v in equiv: return equiv[v] try: return self.visit(lambda u: self.reify(u, equiv), v) except VisitError: return v
def reify(self, v, equiv: EquivT) -> Any: """Fill in a expression according to the equivalences given. Arguments: v: expression equiv: equivalence relationships (transitively mapped) Note: This expects the dictionary of equivalences to be transitively transformed to work properly. `unify` does this automatically on its return value, but if you use unify_raw directly, you have to take care of this. """ v = self._getvar(v) if v in equiv: return equiv[v] try: return self.visit(lambda u: self.reify(u, equiv), v) except VisitError: return v
Python
def as_frozen(x): """Return an immutable representation for x.""" if isinstance(x, dict): return tuple(sorted((k, as_frozen(v)) for k, v in x.items())) else: assert not isinstance(x, (list, tuple)) return x
def as_frozen(x): """Return an immutable representation for x.""" if isinstance(x, dict): return tuple(sorted((k, as_frozen(v)) for k, v in x.items())) else: assert not isinstance(x, (list, tuple)) return x
Python
def make_subtype(cls, **params): """Low-level parameterization function. The named parameters correspond to the fields declared in the Type's annotations. """ fields = cls._fields if not fields: raise TypeError(f"{cls} cannot be parameterized.") elif cls._params is not None: raise TypeError(f"{cls} is already instantiated") elif list(params.keys()) != fields: raise TypeError("Invalid type parameterization") else: key = (cls, as_frozen(params)) if key in _type_cache: return _type_cache[key] rval = type(cls.__qualname__, (cls,), {"_params": params}) for k, v in params.items(): setattr(rval, k, v) _type_cache[key] = rval return rval
def make_subtype(cls, **params): """Low-level parameterization function. The named parameters correspond to the fields declared in the Type's annotations. """ fields = cls._fields if not fields: raise TypeError(f"{cls} cannot be parameterized.") elif cls._params is not None: raise TypeError(f"{cls} is already instantiated") elif list(params.keys()) != fields: raise TypeError("Invalid type parameterization") else: key = (cls, as_frozen(params)) if key in _type_cache: return _type_cache[key] rval = type(cls.__qualname__, (cls,), {"_params": params}) for k, v in params.items(): setattr(rval, k, v) _type_cache[key] = rval return rval
Python
def parameterize(cls, bits): """Parameterize using a number of bits.""" if not cls._valid_bits: raise RuntimeError(f"Can't parameterize {cls.__name__} directly") if bits not in cls._valid_bits: raise ValueError(f"Unsupported number of bits: {bits}") return cls.make_subtype(bits=bits)
def parameterize(cls, bits): """Parameterize using a number of bits.""" if not cls._valid_bits: raise RuntimeError(f"Can't parameterize {cls.__name__} directly") if bits not in cls._valid_bits: raise ValueError(f"Unsupported number of bits: {bits}") return cls.make_subtype(bits=bits)
Python
def to_numpy(self, x): """Convert ndarray x to an ndarray.""" if not isinstance(x, numpy.ndarray): raise MyiaInputTypeError(f"Expected numpy.ndarray but got {x}.") return x
def to_numpy(self, x): """Convert ndarray x to an ndarray.""" if not isinstance(x, numpy.ndarray): raise MyiaInputTypeError(f"Expected numpy.ndarray but got {x}.") return x
Python
def np_dtype_to_type(dtype): """Map a numpy type string to a myia type.""" if dtype not in DTYPE_TO_MTYPE: raise TypeError(f"Unsupported dtype {dtype}") return DTYPE_TO_MTYPE[dtype]
def np_dtype_to_type(dtype): """Map a numpy type string to a myia type.""" if dtype not in DTYPE_TO_MTYPE: raise TypeError(f"Unsupported dtype {dtype}") return DTYPE_TO_MTYPE[dtype]
Python
def type_to_np_dtype(type): """Map a myia type to a numpy type string.""" if type not in MTYPE_TO_DTYPE: raise TypeError(f"Can't convert to NumPy dtype {type}") return MTYPE_TO_DTYPE[type]
def type_to_np_dtype(type): """Map a myia type to a numpy type string.""" if type not in MTYPE_TO_DTYPE: raise TypeError(f"Can't convert to NumPy dtype {type}") return MTYPE_TO_DTYPE[type]
Python
def pytype_to_myiatype(pytype): """Convert a Python type into a Myia type. Arguments: pytype: The Python type to convert. """ return _simple_types[pytype]
def pytype_to_myiatype(pytype): """Convert a Python type into a Myia type. Arguments: pytype: The Python type to convert. """ return _simple_types[pytype]
Python
async def infer_reshape(self, engine, a: AbstractArray, _shp: u64tup_typecheck): """Infer the return type of primitive `reshape`.""" shp = build_value(_shp, default=ANYTHING) if shp == ANYTHING: shp = (ANYTHING,) * len(_shp.elements) a_shp = await force_pending(a.xshape()) if ( all(s is not ANYTHING for s in shp) and all(s is not ANYTHING for s in a_shp) and _prod(shp) != _prod(a_shp) ): raise MyiaShapeError( "Cannot change the total number of elements " "in reshape" ) return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()})
async def infer_reshape(self, engine, a: AbstractArray, _shp: u64tup_typecheck): """Infer the return type of primitive `reshape`.""" shp = build_value(_shp, default=ANYTHING) if shp == ANYTHING: shp = (ANYTHING,) * len(_shp.elements) a_shp = await force_pending(a.xshape()) if ( all(s is not ANYTHING for s in shp) and all(s is not ANYTHING for s in a_shp) and _prod(shp) != _prod(a_shp) ): raise MyiaShapeError( "Cannot change the total number of elements " "in reshape" ) return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()})
Python
async def infer_J(self, engine, x): """Infer the return type of primitive `J`.""" if isinstance(x, AbstractFunction): v = await x.get() return AbstractFunction(*[TransformedFunction(poss, P.J) for poss in v]) elif isinstance(x, AbstractFunctionUnique): vfn = type_fixer(None)( TransformedFunction(AbstractFunctionUnique(x.args, x.output), P.J) ) assert isinstance(vfn, AbstractFunctionUnique) return vfn else: return AbstractJTagged(x)
async def infer_J(self, engine, x): """Infer the return type of primitive `J`.""" if isinstance(x, AbstractFunction): v = await x.get() return AbstractFunction(*[TransformedFunction(poss, P.J) for poss in v]) elif isinstance(x, AbstractFunctionUnique): vfn = type_fixer(None)( TransformedFunction(AbstractFunctionUnique(x.args, x.output), P.J) ) assert isinstance(vfn, AbstractFunctionUnique) return vfn else: return AbstractJTagged(x)
Python
def from_value(v, broaden=False, **kwargs): """Convert a value to an abstract value. Arguments: v: The value to convert. broaden: If True, concrete values will be made more abstract, so e.g. the value 1234 would become ANYTHING. """ a = to_abstract(v, **kwargs) if broaden: a = _broaden(a) return a
def from_value(v, broaden=False, **kwargs): """Convert a value to an abstract value. Arguments: v: The value to convert. broaden: If True, concrete values will be made more abstract, so e.g. the value 1234 would become ANYTHING. """ a = to_abstract(v, **kwargs) if broaden: a = _broaden(a) return a
Python
def to_abstract(self, v: AbstractValue, **kwargs): """Translate the value to an abstract value. Arguments: v: The value to convert. context: The context in which the value was found, used if the value is a Graph. node: The node for the Constant we are converting, if there is one, so that we can generate a tracking_id. loop: The InferenceLoop, or None. If not None, scalars ints or floats will be given a Pending type so that it can adapt to the types of the variables they interact with. """ return AbstractType(v)
def to_abstract(self, v: AbstractValue, **kwargs): """Translate the value to an abstract value. Arguments: v: The value to convert. context: The context in which the value was found, used if the value is a Graph. node: The node for the Constant we are converting, if there is one, so that we can generate a tracking_id. loop: The InferenceLoop, or None. If not None, scalars ints or floats will be given a Pending type so that it can adapt to the types of the variables they interact with. """ return AbstractType(v)
Python
def type_to_abstract(self, t: xtype.TypeMeta): """Convert a type to an AbstractValue. If the value is already an AbstractValue, returns it directly. """ return self[(t,)](t)
def type_to_abstract(self, t: xtype.TypeMeta): """Convert a type to an AbstractValue. If the value is already an AbstractValue, returns it directly. """ return self[(t,)](t)
Python
def pytype_to_abstract(main: tuple, args): """Convert a Python type to an AbstractValue.""" if args == () or args is None: targs = ANYTHING elif args == ((),): targs = [] else: targs = [type_to_abstract(a) for a in args] return AbstractTuple(targs)
def pytype_to_abstract(main: tuple, args): """Convert a Python type to an AbstractValue.""" if args == () or args is None: targs = ANYTHING elif args == ((),): targs = [] else: targs = [type_to_abstract(a) for a in args] return AbstractTuple(targs)
Python
def rstate_and_compute(rstate, x): """Uses and returns a rstate with a computed value.""" _rs, _val = random_uint32(rstate, (2, 3)) _val = P.array_cast(_val, xtype.i64) return _rs, x * np.sum(_val).item()
def rstate_and_compute(rstate, x): """Uses and returns a rstate with a computed value.""" _rs, _val = random_uint32(rstate, (2, 3)) _val = P.array_cast(_val, xtype.i64) return _rs, x * np.sum(_val).item()
Python
def step_rstate_and_compute(rstate, x): """Compiled myia function that returns a rstate with computed value.""" # Here it seems mandatory to use the `dout` parameters to get grad, # to help myia handle rstate grad correctly. (_rs, _val), _grad = value_and_grad(rstate_and_compute, "x")( rstate, x, dout=(1, 1) ) return _rs, _val + _grad
def step_rstate_and_compute(rstate, x): """Compiled myia function that returns a rstate with computed value.""" # Here it seems mandatory to use the `dout` parameters to get grad, # to help myia handle rstate grad correctly. (_rs, _val), _grad = value_and_grad(rstate_and_compute, "x")( rstate, x, dout=(1, 1) ) return _rs, _val + _grad
Python
def only_compute(rstate, x): """Use rstate but return only computed value.""" _, _val = random_uint32(rstate, (2, 3)) _val = P.array_cast(_val, xtype.i64) return x * np.sum(_val).item()
def only_compute(rstate, x): """Use rstate but return only computed value.""" _, _val = random_uint32(rstate, (2, 3)) _val = P.array_cast(_val, xtype.i64) return x * np.sum(_val).item()
Python
def step_only_compute(rstate, x): """Compiled myia function that return only a computed value.""" # Here dout seems not needed, as rstate is not returned. _val, _grad = value_and_grad(only_compute, "x")(rstate, x) return _val + _grad
def step_only_compute(rstate, x): """Compiled myia function that return only a computed value.""" # Here dout seems not needed, as rstate is not returned. _val, _grad = value_and_grad(only_compute, "x")(rstate, x) return _val + _grad
Python
def conv2d_weight( input, weight_size, grad_output, stride, padding, dilation, groups ): r"""Computes gradient of conv2d with respect to the weight. Args: input: input tensor of shape (minibatch x in_channels x iH x iW) weight_size: Shape of the weight gradient tensor grad_output: output gradient tensor (minibatch x out_channels x oH x oW) stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 Examples: >>> input = torch.randn(1,1,3,3, requires_grad=True) >>> weight = torch.randn(1,1,1,2, requires_grad=True) >>> output = F.conv2d(input, weight) >>> grad_output = torch.randn(output.shape) >>> grad_weight = torch.autograd.grad(output, filter, grad_output) >>> F.grad.conv2d_weight(input, weight.shape, grad_output) """ in_channels = input.shape[1] out_channels = grad_output.shape[1] min_batch = input.shape[0] grad_output = grad_output.contiguous().repeat( 1, in_channels // groups, 1, 1 ) grad_output = grad_output.contiguous().view( grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2], grad_output.shape[3], ) input = input.contiguous().view( 1, input.shape[0] * input.shape[1], input.shape[2], input.shape[3] ) grad_weight = torch.conv2d( input, grad_output, None, dilation, padding, stride, in_channels * min_batch, ) grad_weight = grad_weight.contiguous().view( min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2], grad_weight.shape[3], ) if groups > 1: return ( grad_weight.sum(dim=0) .view( out_channels, in_channels // groups, grad_weight.shape[2], grad_weight.shape[3], ) .narrow(2, 0, weight_size[2]) .narrow(3, 0, weight_size[3]) ) else: return ( grad_weight.sum(dim=0) .view( in_channels // groups, out_channels, grad_weight.shape[2], grad_weight.shape[3], ) .transpose(0, 1) .narrow(2, 0, weight_size[2]) .narrow(3, 0, weight_size[3]) )
def conv2d_weight( input, weight_size, grad_output, stride, padding, dilation, groups ): r"""Computes gradient of conv2d with respect to the weight. Args: input: input tensor of shape (minibatch x in_channels x iH x iW) weight_size: Shape of the weight gradient tensor grad_output: output gradient tensor (minibatch x out_channels x oH x oW) stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 Examples: >>> input = torch.randn(1,1,3,3, requires_grad=True) >>> weight = torch.randn(1,1,1,2, requires_grad=True) >>> output = F.conv2d(input, weight) >>> grad_output = torch.randn(output.shape) >>> grad_weight = torch.autograd.grad(output, filter, grad_output) >>> F.grad.conv2d_weight(input, weight.shape, grad_output) """ in_channels = input.shape[1] out_channels = grad_output.shape[1] min_batch = input.shape[0] grad_output = grad_output.contiguous().repeat( 1, in_channels // groups, 1, 1 ) grad_output = grad_output.contiguous().view( grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2], grad_output.shape[3], ) input = input.contiguous().view( 1, input.shape[0] * input.shape[1], input.shape[2], input.shape[3] ) grad_weight = torch.conv2d( input, grad_output, None, dilation, padding, stride, in_channels * min_batch, ) grad_weight = grad_weight.contiguous().view( min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2], grad_weight.shape[3], ) if groups > 1: return ( grad_weight.sum(dim=0) .view( out_channels, in_channels // groups, grad_weight.shape[2], grad_weight.shape[3], ) .narrow(2, 0, weight_size[2]) .narrow(3, 0, weight_size[3]) ) else: return ( grad_weight.sum(dim=0) .view( in_channels // groups, out_channels, grad_weight.shape[2], grad_weight.shape[3], ) .transpose(0, 1) .narrow(2, 0, weight_size[2]) .narrow(3, 0, weight_size[3]) )
Python
async def infer_random_uint32( self, engine, rstate: AbstractRandomState, shape: AbstractTuple ): """Infer the return type of primitive `random_uint32`.""" output_scalar_type = AbstractScalar({VALUE: ANYTHING, TYPE: xtype.u32}) output_shape = tuple( self.require_constant(e, argnum=f'"3:size[{edx}]"') for edx, e in enumerate(shape.elements) ) value_type = AbstractArray( output_scalar_type, {SHAPE: output_shape, TYPE: xtype.NDArray} ) return AbstractTuple((AbstractRandomState(), value_type))
async def infer_random_uint32( self, engine, rstate: AbstractRandomState, shape: AbstractTuple ): """Infer the return type of primitive `random_uint32`.""" output_scalar_type = AbstractScalar({VALUE: ANYTHING, TYPE: xtype.u32}) output_shape = tuple( self.require_constant(e, argnum=f'"3:size[{edx}]"') for edx, e in enumerate(shape.elements) ) value_type = AbstractArray( output_scalar_type, {SHAPE: output_shape, TYPE: xtype.NDArray} ) return AbstractTuple((AbstractRandomState(), value_type))
Python
def _export_Graph(self, g): """Return an object that executes `g` when called on arguments.""" c = Closure(g, None) c.vm = self return c
def _export_Graph(self, g): """Return an object that executes `g` when called on arguments.""" c = Closure(g, None) c.vm = self return c
Python
def evaluate( self, graph: Graph, _args: Iterable[Any], *, closure: Mapping[ANFNode, Any] = None, ) -> Any: """Run a graph. This will evaluate the passed-in graph and return the resulting value. """ args = self.convert(tuple(_args)) self._acquire_graph(graph) if len(args) != len(graph.parameters): raise RuntimeError("Call with wrong number of arguments") top_frame = VMFrame( toposort(graph.return_, self._succ_vm(graph)), dict(zip(graph.parameters, args)), closure=closure, ) frames = [top_frame] while frames: try: frame = frames[-1] todo = frame.todo while todo: self._handle_node(todo[-1], frame) todo.pop() except self._Call as c: # The last element of todo is always a return if len(todo) == 2: frames[-1] = c.frame else: frames.append(c.frame) except self._Return as r: frames.pop() if frames: frames[-1].values[frames[-1].todo[-1]] = r.value frames[-1].todo.pop() else: return self.export(r.value)
def evaluate( self, graph: Graph, _args: Iterable[Any], *, closure: Mapping[ANFNode, Any] = None, ) -> Any: """Run a graph. This will evaluate the passed-in graph and return the resulting value. """ args = self.convert(tuple(_args)) self._acquire_graph(graph) if len(args) != len(graph.parameters): raise RuntimeError("Call with wrong number of arguments") top_frame = VMFrame( toposort(graph.return_, self._succ_vm(graph)), dict(zip(graph.parameters, args)), closure=closure, ) frames = [top_frame] while frames: try: frame = frames[-1] todo = frame.todo while todo: self._handle_node(todo[-1], frame) todo.pop() except self._Call as c: # The last element of todo is always a return if len(todo) == 2: frames[-1] = c.frame else: frames.append(c.frame) except self._Return as r: frames.pop() if frames: frames[-1].values[frames[-1].todo[-1]] = r.value frames[-1].todo.pop() else: return self.export(r.value)
Python
def _succ_vm(self, graph): """Return a visitor for the graph.""" def succ(node: ANFNode) -> Iterable[ANFNode]: """Follow node.incoming and free variables.""" for i in node.inputs: if ( i.graph == node.graph or i.is_constant_graph() and i.value.parent == graph ): yield i if node.is_constant_graph() and node.value.parent == graph: for v in self._vars[node.value]: if v.graph is graph or v.graph is None: yield v return succ
def _succ_vm(self, graph): """Return a visitor for the graph.""" def succ(node: ANFNode) -> Iterable[ANFNode]: """Follow node.incoming and free variables.""" for i in node.inputs: if ( i.graph == node.graph or i.is_constant_graph() and i.value.parent == graph ): yield i if node.is_constant_graph() and node.value.parent == graph: for v in self._vars[node.value]: if v.graph is graph or v.graph is None: yield v return succ
Python
def succ(node: ANFNode) -> Iterable[ANFNode]: """Follow node.incoming and free variables.""" for i in node.inputs: if ( i.graph == node.graph or i.is_constant_graph() and i.value.parent == graph ): yield i if node.is_constant_graph() and node.value.parent == graph: for v in self._vars[node.value]: if v.graph is graph or v.graph is None: yield v
def succ(node: ANFNode) -> Iterable[ANFNode]: """Follow node.incoming and free variables.""" for i in node.inputs: if ( i.graph == node.graph or i.is_constant_graph() and i.value.parent == graph ): yield i if node.is_constant_graph() and node.value.parent == graph: for v in self._vars[node.value]: if v.graph is graph or v.graph is None: yield v
Python
async def infer_array_max( self, engine, input: lib.AbstractArray, dim: lib.u64tup_typecheck ): """Infer the return type of primitive `array_max`.""" shp = () shp_inp = input.xshape() dim = tuple( self.require_constant(e, argnum=f'"1:dim[{edx}]"') for edx, e in enumerate(dim.elements) ) shp = list(shp_inp) for d in dim: shp[d] = 1 shp = tuple(shp) return type(input)(input.element, {SHAPE: shp, TYPE: input.xtype()})
async def infer_array_max( self, engine, input: lib.AbstractArray, dim: lib.u64tup_typecheck ): """Infer the return type of primitive `array_max`.""" shp = () shp_inp = input.xshape() dim = tuple( self.require_constant(e, argnum=f'"1:dim[{edx}]"') for edx, e in enumerate(dim.elements) ) shp = list(shp_inp) for d in dim: shp[d] = 1 shp = tuple(shp) return type(input)(input.element, {SHAPE: shp, TYPE: input.xtype()})
Python
def conv_transpose2d( input, weight, bias, stride=1, padding=0, output_padding=0, groups=1, dilation=1, ): """Map of Pytorch method torch.nn.functional.conv_transpose2d.""" ret = P.conv_transpose2d( input, weight, stride, padding, output_padding, groups, dilation ) if bias is not None: ret = ret + reshape(bias, (1, bias.shape[0], 1, 1)) return ret
def conv_transpose2d( input, weight, bias, stride=1, padding=0, output_padding=0, groups=1, dilation=1, ): """Map of Pytorch method torch.nn.functional.conv_transpose2d.""" ret = P.conv_transpose2d( input, weight, stride, padding, output_padding, groups, dilation ) if bias is not None: ret = ret + reshape(bias, (1, bias.shape[0], 1, 1)) return ret
Python
def linear(input, weight, bias=None): r"""Applies a linear transformation to the incoming data. :math:`y = xA^T + b` Shape: - Input: :math:`(N, *, in\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\_features, in\_features)` - Bias: :math:`(out\_features)` - Output: :math:`(N, *, out\_features)` """ if input.dim() == 2 and bias is not None: # fused op is marginally faster ret = bias + input @ weight.t() else: output = input @ weight.t() if bias is not None: output = output + bias ret = output return ret
def linear(input, weight, bias=None): r"""Applies a linear transformation to the incoming data. :math:`y = xA^T + b` Shape: - Input: :math:`(N, *, in\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\_features, in\_features)` - Bias: :math:`(out\_features)` - Output: :math:`(N, *, out\_features)` """ if input.dim() == 2 and bias is not None: # fused op is marginally faster ret = bias + input @ weight.t() else: output = input @ weight.t() if bias is not None: output = output + bias ret = output return ret
Python
def uniform(rstate, size, _min, _max, dtype=f32): """Returns samples from uniform distribution bounded by _min and _max.""" r0, v0 = P.random_uint32(rstate, size) _min = P.scalar_to_array(_min, AA) _max = P.scalar_to_array(_max, AA) _min = P.array_cast(_min, dtype) _max = P.array_cast(_max, dtype) rand_range = _max - _min v0 = P.array_cast(v0, dtype) return (v0 * (rand_range / 4294967296)) + _min, r0
def uniform(rstate, size, _min, _max, dtype=f32): """Returns samples from uniform distribution bounded by _min and _max.""" r0, v0 = P.random_uint32(rstate, size) _min = P.scalar_to_array(_min, AA) _max = P.scalar_to_array(_max, AA) _min = P.array_cast(_min, dtype) _max = P.array_cast(_max, dtype) rand_range = _max - _min v0 = P.array_cast(v0, dtype) return (v0 * (rand_range / 4294967296)) + _min, r0
Python
async def infer_conv_transpose2d( self, engine, input: AbstractArray, weight: AbstractArray, stride: AbstractTuple, padding: AbstractTuple, output_padding: AbstractTuple, groups: AbstractScalar, dilation: AbstractTuple, ): """Infer output array type and shape.""" n, c_in, h_in, w_in = await force_pending(input.xshape()) stride = tuple( self.require_constant(e, argnum=f'"2:stride[{edx}]"') for edx, e in enumerate(stride.elements) ) padding = tuple( self.require_constant(e, argnum=f'"3:padding[{edx}]"') for edx, e in enumerate(padding.elements) ) output_padding = tuple( self.require_constant(e, argnum=f'"4:output_padding[{edx}]"') for edx, e in enumerate(output_padding.elements) ) groups = self.require_constant(groups, argnum="5:groups") dilation = tuple( self.require_constant(e, argnum=f'"6:dilation[{edx}]"') for edx, e in enumerate(dilation.elements) ) _, c_out_per_group, kh, kw = await force_pending(weight.xshape()) c_out = c_out_per_group * groups h_out = int( (h_in - 1) * stride[0] - 2 * padding[0] + dilation[0] * (kh - 1) + output_padding[0] + 1 ) w_out = int( (w_in - 1) * stride[1] - 2 * padding[1] + dilation[1] * (kw - 1) + output_padding[1] + 1 ) output_shape = n, c_out, h_out, w_out weight_type = await force_pending(weight.xtype()) return AbstractArray( weight.element, {SHAPE: output_shape, TYPE: weight_type} )
async def infer_conv_transpose2d( self, engine, input: AbstractArray, weight: AbstractArray, stride: AbstractTuple, padding: AbstractTuple, output_padding: AbstractTuple, groups: AbstractScalar, dilation: AbstractTuple, ): """Infer output array type and shape.""" n, c_in, h_in, w_in = await force_pending(input.xshape()) stride = tuple( self.require_constant(e, argnum=f'"2:stride[{edx}]"') for edx, e in enumerate(stride.elements) ) padding = tuple( self.require_constant(e, argnum=f'"3:padding[{edx}]"') for edx, e in enumerate(padding.elements) ) output_padding = tuple( self.require_constant(e, argnum=f'"4:output_padding[{edx}]"') for edx, e in enumerate(output_padding.elements) ) groups = self.require_constant(groups, argnum="5:groups") dilation = tuple( self.require_constant(e, argnum=f'"6:dilation[{edx}]"') for edx, e in enumerate(dilation.elements) ) _, c_out_per_group, kh, kw = await force_pending(weight.xshape()) c_out = c_out_per_group * groups h_out = int( (h_in - 1) * stride[0] - 2 * padding[0] + dilation[0] * (kh - 1) + output_padding[0] + 1 ) w_out = int( (w_in - 1) * stride[1] - 2 * padding[1] + dilation[1] * (kw - 1) + output_padding[1] + 1 ) output_shape = n, c_out, h_out, w_out weight_type = await force_pending(weight.xtype()) return AbstractArray( weight.element, {SHAPE: output_shape, TYPE: weight_type} )
Python
async def infer_make_dict(self, engine, _dct: lib.AbstractType, *values): """Infer the return type of primitive `make_dict`.""" dct = _dct.element assert len(dct.entries) == len(values) for t, elem in zip(dct.entries.values(), values): assert typecheck(t, elem) return AbstractDict( dict((key, val) for key, val in zip(dct.entries.keys(), values)) )
async def infer_make_dict(self, engine, _dct: lib.AbstractType, *values): """Infer the return type of primitive `make_dict`.""" dct = _dct.element assert len(dct.entries) == len(values) for t, elem in zip(dct.entries.values(), values): assert typecheck(t, elem) return AbstractDict( dict((key, val) for key, val in zip(dct.entries.keys(), values)) )
Python
def generate_function(impl, inputs): """Generate relay function. Use impl callback with inputs as parameters to get symbolic output, and then generate relay function using inputs and output. :type impl: callable :type inputs: list """ output = impl(*inputs) return relay.Function(list(inputs), output)
def generate_function(impl, inputs): """Generate relay function. Use impl callback with inputs as parameters to get symbolic output, and then generate relay function using inputs and output. :type impl: callable :type inputs: list """ output = impl(*inputs) return relay.Function(list(inputs), output)
Python
def generate_relay_counter_array(self, counter): """Generate relay symbolic uint64 counter array for Philox2x32 RNG. Generate a relay vector of 64-bits integers which encodes couples (counter, i) for i in range(n) counter must be a relay expression (e.g. a relay constant or variable). """ c = relay.cast(counter, "uint64") b = relay.op.transform.full(c, (self.n,), "uint64") d = relay.left_shift(b, RELAY_UINT64_32) e = relay.arange(relay.const(self.n, "uint64"), dtype="uint64") return relay.bitwise_or(d, e)
def generate_relay_counter_array(self, counter): """Generate relay symbolic uint64 counter array for Philox2x32 RNG. Generate a relay vector of 64-bits integers which encodes couples (counter, i) for i in range(n) counter must be a relay expression (e.g. a relay constant or variable). """ c = relay.cast(counter, "uint64") b = relay.op.transform.full(c, (self.n,), "uint64") d = relay.left_shift(b, RELAY_UINT64_32) e = relay.arange(relay.const(self.n, "uint64"), dtype="uint64") return relay.bitwise_or(d, e)
Python
def __impl_philox_2x_round(self, ctr, key): """Compute a round in Philox2x32. :param ctr: uint64 vector :param key: uint32 scalar :return: """ ctr_0 = relay.right_shift(ctr, RELAY_UINT64_32) ctr_1 = relay.bitwise_and(ctr, RELAY_UINT64_CLEAR_HIGH) # mul_hi_lo product = relay.multiply(RELAY_PHILOX_M2x32_0, ctr_0) key_64 = relay.cast(key, "uint64") ctr_1_xor_key = relay.bitwise_xor(ctr_1, key_64) ctr_1_xor_key_up = relay.left_shift(ctr_1_xor_key, RELAY_UINT64_32) return relay.bitwise_xor(product, ctr_1_xor_key_up)
def __impl_philox_2x_round(self, ctr, key): """Compute a round in Philox2x32. :param ctr: uint64 vector :param key: uint32 scalar :return: """ ctr_0 = relay.right_shift(ctr, RELAY_UINT64_32) ctr_1 = relay.bitwise_and(ctr, RELAY_UINT64_CLEAR_HIGH) # mul_hi_lo product = relay.multiply(RELAY_PHILOX_M2x32_0, ctr_0) key_64 = relay.cast(key, "uint64") ctr_1_xor_key = relay.bitwise_xor(ctr_1, key_64) ctr_1_xor_key_up = relay.left_shift(ctr_1_xor_key, RELAY_UINT64_32) return relay.bitwise_xor(product, ctr_1_xor_key_up)
Python
def __uint64_to_2xuint32_vector(self, ctr): """Convert a uint64 vector to a corresponding uint32 vector. Given uint64 vector with size n is converted to a uint32 vector with size 2n. Each uint64 is split into couple (32 high bits, 32 low bits). Output values order is the same as input, ie., both values from a uint64 remain consecutive in output vector. """ hi = relay.right_shift(ctr, RELAY_UINT64_32) lo = relay.bitwise_and(ctr, RELAY_UINT64_CLEAR_HIGH) hi_32 = relay.cast(hi, "uint32") lo_32 = relay.cast(lo, "uint32") vector_hi_32 = relay.reshape(hi_32, (self.n, 1)) vector_lo_32 = relay.reshape(lo_32, (self.n, 1)) tensor = relay.concatenate([vector_hi_32, vector_lo_32], 1) return relay.reshape(tensor, (2 * self.n))
def __uint64_to_2xuint32_vector(self, ctr): """Convert a uint64 vector to a corresponding uint32 vector. Given uint64 vector with size n is converted to a uint32 vector with size 2n. Each uint64 is split into couple (32 high bits, 32 low bits). Output values order is the same as input, ie., both values from a uint64 remain consecutive in output vector. """ hi = relay.right_shift(ctr, RELAY_UINT64_32) lo = relay.bitwise_and(ctr, RELAY_UINT64_CLEAR_HIGH) hi_32 = relay.cast(hi, "uint32") lo_32 = relay.cast(lo, "uint32") vector_hi_32 = relay.reshape(hi_32, (self.n, 1)) vector_lo_32 = relay.reshape(lo_32, (self.n, 1)) tensor = relay.concatenate([vector_hi_32, vector_lo_32], 1) return relay.reshape(tensor, (2 * self.n))
Python
def philox_2x(self, ctr, key): """Generate random values, with 10 as default number of rounds. :param ctr: counter array: uint64 vector :param key: key: uint32 scalar :return: random values in uint32 vector with expected output size. """ output_64 = self.impl_philox_2x_r(PHILOX2x32_DEFAULT_ROUNDS, ctr, key) output = self.__uint64_to_2xuint32_vector(output_64) if self.output_size % 2 == 1: output = relay.op.transform.strided_slice( output, [0], [2 * self.n - 1] ) return output
def philox_2x(self, ctr, key): """Generate random values, with 10 as default number of rounds. :param ctr: counter array: uint64 vector :param key: key: uint32 scalar :return: random values in uint32 vector with expected output size. """ output_64 = self.impl_philox_2x_r(PHILOX2x32_DEFAULT_ROUNDS, ctr, key) output = self.__uint64_to_2xuint32_vector(output_64) if self.output_size % 2 == 1: output = relay.op.transform.strided_slice( output, [0], [2 * self.n - 1] ) return output
Python
def eqtest(t1: torch.Tensor, t2, rtol=1e-5, atol=1e-8, **kwargs): """ New version of eqtest using np.testing.assert_allclose. """ np.testing.assert_allclose( t1.detach().numpy(), t2.detach().numpy(), rtol=rtol, atol=atol, verbose=True, ) return True
def eqtest(t1: torch.Tensor, t2, rtol=1e-5, atol=1e-8, **kwargs): """ New version of eqtest using np.testing.assert_allclose. """ np.testing.assert_allclose( t1.detach().numpy(), t2.detach().numpy(), rtol=rtol, atol=atol, verbose=True, ) return True
Python
def loader_callable_from_pkg(cls, pkg): """Return a function that creates a new backend loader. :param pkg: module name (example myia.compile.backends.relay). Module must provide 2 functions: - `load_options` for `__init__`'s `default_fn` parameter - `load_backend` for `__init__`'s `load_fn` parameter :return: a callable (with no arguments) that will generate and return a BackendLoader object. """ def loader(): module = importlib.import_module(pkg) load_options = getattr(module, "load_options") load_backend = getattr(module, "load_backend") return cls(load_fn=load_backend, defaults_fn=load_options) return loader
def loader_callable_from_pkg(cls, pkg): """Return a function that creates a new backend loader. :param pkg: module name (example myia.compile.backends.relay). Module must provide 2 functions: - `load_options` for `__init__`'s `default_fn` parameter - `load_backend` for `__init__`'s `load_fn` parameter :return: a callable (with no arguments) that will generate and return a BackendLoader object. """ def loader(): module = importlib.import_module(pkg) load_options = getattr(module, "load_options") load_backend = getattr(module, "load_backend") return cls(load_fn=load_backend, defaults_fn=load_options) return loader