language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def create_endpoints(self, app, rooms): '''Automatically create flask endpoints for functions decorated with @route''' for _name, fcn in inspect.getmembers(type(self)): if hasattr(fcn, 'endpoint'): self.flask_wrap( app, fcn, fcn.endpoint, rooms, **fcn.flask_kwargs)
def create_endpoints(self, app, rooms): '''Automatically create flask endpoints for functions decorated with @route''' for _name, fcn in inspect.getmembers(type(self)): if hasattr(fcn, 'endpoint'): self.flask_wrap( app, fcn, fcn.endpoint, rooms, **fcn.flask_kwargs)
Python
def flask_wrap(self, app, fcn, endpoint, rooms, **kwargs): ''' Register a flask endpoint for an API interface and convert data to and from json app - a flask app fcn - the function exposed by the API interface. should take (self, request_params) as arguments endpoint - the URL for this function (str) ''' endpoint = '/<room_id>' + endpoint @app.route(endpoint, **kwargs) @wraps(fcn) def wrapped(room_id): room = rooms[room_id] self.manager = room.manager return jsonify(fcn(self, request.json)) @app.route(webhook_endpoint_format(endpoint), **kwargs) @wrap_name() @wraps(fcn) def webhook_wrapped(room_id): return jsonify(webhook_output_format( fcn(self, webhook_input_format(request.json))))
def flask_wrap(self, app, fcn, endpoint, rooms, **kwargs): ''' Register a flask endpoint for an API interface and convert data to and from json app - a flask app fcn - the function exposed by the API interface. should take (self, request_params) as arguments endpoint - the URL for this function (str) ''' endpoint = '/<room_id>' + endpoint @app.route(endpoint, **kwargs) @wraps(fcn) def wrapped(room_id): room = rooms[room_id] self.manager = room.manager return jsonify(fcn(self, request.json)) @app.route(webhook_endpoint_format(endpoint), **kwargs) @wrap_name() @wraps(fcn) def webhook_wrapped(room_id): return jsonify(webhook_output_format( fcn(self, webhook_input_format(request.json))))
Python
def assignPlans(pl_dict): ''' assigns plans given a dictionary pl_dict run once after each set of plans is in a pl_dict ''' for k,v in pl_dict.items(): print(k) plan_shp = v dist_col = 'DISTRICT' # detect number of districts precs.dtypes num_dists = len(plan_shp) dist_type = "None" if(num_dists == dist_chamber): dist_type = chamber else: dist_type = 'Other Chamber' print('District type: ' + dist_type) print('Number of districts: ' + str(num_dists)) # check the new plan plan_shp = plan_shp.to_crs(proj_crs) plan_shp['geometry'] = plan_shp.geometry.buffer(0) checkGeo(plan_shp) # check for district column if dist_col in plan_shp.columns: print('District assignment column PRESENT') else: print('WARNING: District assignment column MISSING') # check for unique district columns if plan_shp[dist_col].nunique() == num_dists: print('District assignment column contains unique values') else: print('WARNING: District assignment column contains duplicate values') # assign plan with spatial join print("Starting assignment for plan " + k) assignment = maup.assign(precs, plan_shp) precs[k] = assignment if not os.path.exists('./Dashboard/'): os.makedirs('./Dashboard/') # group by new columns - all census + election columns agg_cols = prec_cols + [k] precs_grouped = precs[agg_cols].groupby(k).sum() precs_grouped.to_csv('./Dashboard/{0}_precs_grouped.csv'.format(k))
def assignPlans(pl_dict): ''' assigns plans given a dictionary pl_dict run once after each set of plans is in a pl_dict ''' for k,v in pl_dict.items(): print(k) plan_shp = v dist_col = 'DISTRICT' # detect number of districts precs.dtypes num_dists = len(plan_shp) dist_type = "None" if(num_dists == dist_chamber): dist_type = chamber else: dist_type = 'Other Chamber' print('District type: ' + dist_type) print('Number of districts: ' + str(num_dists)) # check the new plan plan_shp = plan_shp.to_crs(proj_crs) plan_shp['geometry'] = plan_shp.geometry.buffer(0) checkGeo(plan_shp) # check for district column if dist_col in plan_shp.columns: print('District assignment column PRESENT') else: print('WARNING: District assignment column MISSING') # check for unique district columns if plan_shp[dist_col].nunique() == num_dists: print('District assignment column contains unique values') else: print('WARNING: District assignment column contains duplicate values') # assign plan with spatial join print("Starting assignment for plan " + k) assignment = maup.assign(precs, plan_shp) precs[k] = assignment if not os.path.exists('./Dashboard/'): os.makedirs('./Dashboard/') # group by new columns - all census + election columns agg_cols = prec_cols + [k] precs_grouped = precs[agg_cols].groupby(k).sum() precs_grouped.to_csv('./Dashboard/{0}_precs_grouped.csv'.format(k))
Python
def plot_resis(resis, vector): """ Test function that plots a dictionary of residues and a vector that describes their orientation """ from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111, projection='3d') colordict = { 'n': 'blue', 'c': 'black', 'ca': 'brown' } for atom in ['n', 'ca', 'c']: for resi in resis: ax.scatter(resi[atom][0], resi[atom][1], resi[atom][2], color=colordict[atom]) # centroid = find_resis_centroid(resis) # vector_moved = vector + centroid x = [point[0] for point in vector] y = [point[1] for point in vector] z = [point[2] for point in vector] # x = [vector[0][0], vector[1][0]] # y = [vector[0][1], vector[1][1]] # z = [vector[0][2], vector[1][2]] ax.plot(x, y, z, color='darkgray', linewidth=4) # x = [0, vector[0]] # y = [0, vector[1]] # z = [0, vector[2]] # ax.scatter(x, y, z, color='black') plt.show()
def plot_resis(resis, vector): """ Test function that plots a dictionary of residues and a vector that describes their orientation """ from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111, projection='3d') colordict = { 'n': 'blue', 'c': 'black', 'ca': 'brown' } for atom in ['n', 'ca', 'c']: for resi in resis: ax.scatter(resi[atom][0], resi[atom][1], resi[atom][2], color=colordict[atom]) # centroid = find_resis_centroid(resis) # vector_moved = vector + centroid x = [point[0] for point in vector] y = [point[1] for point in vector] z = [point[2] for point in vector] # x = [vector[0][0], vector[1][0]] # y = [vector[0][1], vector[1][1]] # z = [vector[0][2], vector[1][2]] ax.plot(x, y, z, color='darkgray', linewidth=4) # x = [0, vector[0]] # y = [0, vector[1]] # z = [0, vector[2]] # ax.scatter(x, y, z, color='black') plt.show()
Python
def scan_pose_helices(self, name=None, test=False, split_chains=True, path=None): """ Scan a pose for helices, then find their centroid, direction, length, name, and solvent accessibility. To do: solvent accessbility, name (can maybe be done outside this function). """ if not name: name = self.pose.pdb_info().name() print('NAME') print(name) if split_chains: chains = self.pose.split_by_chain() else: chains = [self.pose] # Calculate which residues in pose are at surface only once ch = 1 helices_found = [] for pose in chains: dssp = rosetta.core.scoring.dssp.Dssp(pose) positions = contiguous_secstruct(dssp.get_dssp_secstruct()) surface = np.array( pythonize_vector(self.selector.apply(pose)) ) for helix in positions['H']: # print(helix) if helix[1] - helix[0] > 1: helix_info = {} helix_info['start'] = helix[0] helix_info['stop'] = helix[1] resis = [] positions = np.arange(helix[0], helix[1] + 1) # med = int(median(positions)) - 1 # start = min(helix[1] - 3, med) for i in range(helix[0], helix[1] + 1): res = {} for atom in ['n', 'ca', 'c']: res[atom] = numeric.xyzV_to_np_array(pose.residue(i).xyz(atom.upper())) resis.append(res) avg_direction = find_avg_helix_direction(resis) # This gives us direction only; need to also get center of # mass and maybe length of helix to fully characterize position # helix_info['direction'] = helix_direction(resis[0], resis[1], resis[2]) helix_info['direction'] = avg_direction helix_info['centroid'] = find_resis_centroid(resis) helix_info['nres'] = helix[1] - helix[0] helix_info['length'] = helix_length(pose, helix) if split_chains: helix_info['name'] = name + '_' + str(ch) else: helix_info['name'] = name helix_info['vector'] = final_vector(helix_info['direction'], helix_info['length'], helix_info['centroid']) helix_info['surface'] = surface[helix[0]-1:helix[1]-1] helix_info['percent_exposed'] =\ np.count_nonzero(helix_info['surface']) /\ len(helix_info['surface']) helix_info['chain'] = \ pose.pdb_info().pose2pdb(helix_info['start']).split(' ')[1] if path: helix_info['path'] = path helices_found.append(helix_info) if test: plot_resis(resis, helix_info['vector']) ch += 1 return helices_found
def scan_pose_helices(self, name=None, test=False, split_chains=True, path=None): """ Scan a pose for helices, then find their centroid, direction, length, name, and solvent accessibility. To do: solvent accessbility, name (can maybe be done outside this function). """ if not name: name = self.pose.pdb_info().name() print('NAME') print(name) if split_chains: chains = self.pose.split_by_chain() else: chains = [self.pose] # Calculate which residues in pose are at surface only once ch = 1 helices_found = [] for pose in chains: dssp = rosetta.core.scoring.dssp.Dssp(pose) positions = contiguous_secstruct(dssp.get_dssp_secstruct()) surface = np.array( pythonize_vector(self.selector.apply(pose)) ) for helix in positions['H']: # print(helix) if helix[1] - helix[0] > 1: helix_info = {} helix_info['start'] = helix[0] helix_info['stop'] = helix[1] resis = [] positions = np.arange(helix[0], helix[1] + 1) # med = int(median(positions)) - 1 # start = min(helix[1] - 3, med) for i in range(helix[0], helix[1] + 1): res = {} for atom in ['n', 'ca', 'c']: res[atom] = numeric.xyzV_to_np_array(pose.residue(i).xyz(atom.upper())) resis.append(res) avg_direction = find_avg_helix_direction(resis) # This gives us direction only; need to also get center of # mass and maybe length of helix to fully characterize position # helix_info['direction'] = helix_direction(resis[0], resis[1], resis[2]) helix_info['direction'] = avg_direction helix_info['centroid'] = find_resis_centroid(resis) helix_info['nres'] = helix[1] - helix[0] helix_info['length'] = helix_length(pose, helix) if split_chains: helix_info['name'] = name + '_' + str(ch) else: helix_info['name'] = name helix_info['vector'] = final_vector(helix_info['direction'], helix_info['length'], helix_info['centroid']) helix_info['surface'] = surface[helix[0]-1:helix[1]-1] helix_info['percent_exposed'] =\ np.count_nonzero(helix_info['surface']) /\ len(helix_info['surface']) helix_info['chain'] = \ pose.pdb_info().pose2pdb(helix_info['start']).split(' ')[1] if path: helix_info['path'] = path helices_found.append(helix_info) if test: plot_resis(resis, helix_info['vector']) ch += 1 return helices_found
Python
def apply(self): ''' Find the score for each subgraph, return the score and subgraph of the best-scoring transformation ''' best_score = 9999 best_subgraph = None original_atoms = prody.parsePDB(self.pdb_path, chain=self.chain).select('backbone') for subgraph in self.subgraphs: atoms = deepcopy(original_atoms) df_vectors, query_vectors = self.get_vectors(subgraph) transform = numeric.Transformation(df_vectors, query_vectors) prody_transform =\ prody.measure.transform.Transformation(transform.rotation, transform.translation) prody_transform.apply(atoms) score = self.calculate(atoms) if score < best_score: best_score = score best_subgraph = subgraph self.score = best_score self.subgraph = best_subgraph
def apply(self): ''' Find the score for each subgraph, return the score and subgraph of the best-scoring transformation ''' best_score = 9999 best_subgraph = None original_atoms = prody.parsePDB(self.pdb_path, chain=self.chain).select('backbone') for subgraph in self.subgraphs: atoms = deepcopy(original_atoms) df_vectors, query_vectors = self.get_vectors(subgraph) transform = numeric.Transformation(df_vectors, query_vectors) prody_transform =\ prody.measure.transform.Transformation(transform.rotation, transform.translation) prody_transform.apply(atoms) score = self.calculate(atoms) if score < best_score: best_score = score best_subgraph = subgraph self.score = best_score self.subgraph = best_subgraph
Python
def calculate(self, atoms): '''Calculate a clash score from the transformed atoms object and alpha shape''' # For now just return the # of atoms inside the alpha shape clashes = 0 for atom in atoms: coords = [atom.getCoords()] if self.alpha.contains(coords): clashes += 1 return clashes
def calculate(self, atoms): '''Calculate a clash score from the transformed atoms object and alpha shape''' # For now just return the # of atoms inside the alpha shape clashes = 0 for atom in atoms: coords = [atom.getCoords()] if self.alpha.contains(coords): clashes += 1 return clashes
Python
def contiguous_secstruct(ss_str): """ This function takes the output from rosetta.core.scoring.dssp.Dssp(pair.pose), which is a string of H, L, and E's that denote secondary struct at each residue. Returns a dictionary. Keys = H, L, or E. Values = lists of tuples, each corresp to one continuous secstruct element """ ss_positions = {} ss_positions['H'] = [] ss_positions['L'] = [] ss_positions['E'] = [] start = 0 for i in range(0, len(ss_str)): if ss_str[i] != ss_str[start]: ss_positions[ss_str[start]].append((start + 1, i)) start = i if i + 1== len(ss_str): ss_positions[ss_str[start]].append((start + 1, i + 1)) return ss_positions
def contiguous_secstruct(ss_str): """ This function takes the output from rosetta.core.scoring.dssp.Dssp(pair.pose), which is a string of H, L, and E's that denote secondary struct at each residue. Returns a dictionary. Keys = H, L, or E. Values = lists of tuples, each corresp to one continuous secstruct element """ ss_positions = {} ss_positions['H'] = [] ss_positions['L'] = [] ss_positions['E'] = [] start = 0 for i in range(0, len(ss_str)): if ss_str[i] != ss_str[start]: ss_positions[ss_str[start]].append((start + 1, i)) start = i if i + 1== len(ss_str): ss_positions[ss_str[start]].append((start + 1, i + 1)) return ss_positions
Python
def read_and_display(*cmd): """Read cmd's stdout, stderr while displaying them as they arrive.""" # start process process = yield from asyncio.create_subprocess_exec(*cmd, stdout=PIPE, stderr=PIPE) # read child's stdout/stderr concurrently stdout, stderr = [], [] # stderr, stdout buffers tasks = { asyncio.Task(process.stdout.readline()): ( stdout, process.stdout, sys.stdout.buffer), asyncio.Task(process.stderr.readline()): ( stderr, process.stderr, sys.stderr.buffer)} while tasks: done, pending = yield from asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) assert done for future in done: buf, stream, display = tasks.pop(future) line = future.result() if line: # not EOF buf.append(line) # save for later display.write(line) # display in terminal # schedule to read the next line tasks[asyncio.Task(stream.readline())] = buf, stream, display # wait for the process to exit rc = yield from process.wait() return rc, b''.join(stdout), b''.join(stderr)
def read_and_display(*cmd): """Read cmd's stdout, stderr while displaying them as they arrive.""" # start process process = yield from asyncio.create_subprocess_exec(*cmd, stdout=PIPE, stderr=PIPE) # read child's stdout/stderr concurrently stdout, stderr = [], [] # stderr, stdout buffers tasks = { asyncio.Task(process.stdout.readline()): ( stdout, process.stdout, sys.stdout.buffer), asyncio.Task(process.stderr.readline()): ( stderr, process.stderr, sys.stderr.buffer)} while tasks: done, pending = yield from asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) assert done for future in done: buf, stream, display = tasks.pop(future) line = future.result() if line: # not EOF buf.append(line) # save for later display.write(line) # display in terminal # schedule to read the next line tasks[asyncio.Task(stream.readline())] = buf, stream, display # wait for the process to exit rc = yield from process.wait() return rc, b''.join(stdout), b''.join(stderr)
Python
def map_residues(self): ''' Makes a dictionary that stores pairwise distances between all residues in reslist ''' resmap = {} for res1 in self.reslist: if res1 not in resmap: # pdbres1 = self.pose.pdb_info().pose2pdb(res1) # resmap[pdbres1] = {} resmap[res1] = {} for res2 in self.reslist: # pdbres2 = self.pose.pdb_info().pose2pdb(res2) # Don't calculate twice if res2 in resmap and res1 in resmap[res2]: xyz1 = self.pose.residue(res1).xyz('CA') xyz2 = self.pose.residue(res2).xyz('CA') assert( euclidean_distance(xyz1, xyz2) == resmap[res2][res1] ) resmap[res1][res2] = resmap[res2][res1] # resmap[pdbres1][pdbres2] = resmap[pdbres2][pdbres1] else: xyz1 = self.pose.residue(res1).xyz('CA') xyz2 = self.pose.residue(res2).xyz('CA') resmap[res1][res2] = euclidean_distance(xyz1, xyz2) # resmap[pdbres1][pdbres2] = euclidean_distance(xyz1, # xyz2) if len(resmap) > 0: resmap = pd.DataFrame(resmap).fillna(0).unstack().reset_index() resmap.columns = ['res1', 'res2', 'dist'] self.resmap = resmap else: self.resmap = None
def map_residues(self): ''' Makes a dictionary that stores pairwise distances between all residues in reslist ''' resmap = {} for res1 in self.reslist: if res1 not in resmap: # pdbres1 = self.pose.pdb_info().pose2pdb(res1) # resmap[pdbres1] = {} resmap[res1] = {} for res2 in self.reslist: # pdbres2 = self.pose.pdb_info().pose2pdb(res2) # Don't calculate twice if res2 in resmap and res1 in resmap[res2]: xyz1 = self.pose.residue(res1).xyz('CA') xyz2 = self.pose.residue(res2).xyz('CA') assert( euclidean_distance(xyz1, xyz2) == resmap[res2][res1] ) resmap[res1][res2] = resmap[res2][res1] # resmap[pdbres1][pdbres2] = resmap[pdbres2][pdbres1] else: xyz1 = self.pose.residue(res1).xyz('CA') xyz2 = self.pose.residue(res2).xyz('CA') resmap[res1][res2] = euclidean_distance(xyz1, xyz2) # resmap[pdbres1][pdbres2] = euclidean_distance(xyz1, # xyz2) if len(resmap) > 0: resmap = pd.DataFrame(resmap).fillna(0).unstack().reset_index() resmap.columns = ['res1', 'res2', 'dist'] self.resmap = resmap else: self.resmap = None
Python
def bin_array(array, bins): ''' Digitize a numpy array. TO DO: Circularize the binning of angles somehow. ''' inds = np.digitize(array, bins) binned = tuple([bins[inds[n]-1] for n in range(array.size)]) return binned
def bin_array(array, bins): ''' Digitize a numpy array. TO DO: Circularize the binning of angles somehow. ''' inds = np.digitize(array, bins) binned = tuple([bins[inds[n]-1] for n in range(array.size)]) return binned
Python
def relative_position(row1, row2, vectortype='normalized_vector'): ''' Gives the internal relative orientation of two lines, given their row from the pandas dataframe created in scan_helices. The relative orientation of two lines should be able to be described with just 4 parameters, since they are 2D objects in 3D space. If we have lines consisting of points [a,b] and [c,d], those parameters are: - The distance between their centroids - Angle abc - Angle bcd - Dihedral abcd ''' norm_v1 = row1[vectortype] norm_v2 = row2[vectortype] centroid_dist = numeric.euclidean_distance(row1['centroid'], row2['centroid']) abc = numeric.angle(norm_v1[0], norm_v1[1], norm_v2[0]) bcd = numeric.angle(norm_v1[1], norm_v2[0], norm_v2[1]) dihedral = numeric.dihedral(norm_v1[0], norm_v1[1], norm_v2[0], norm_v2[1]) # plot_vectors([norm_v1, norm_v2], color='black') return centroid_dist, abc, bcd, dihedral
def relative_position(row1, row2, vectortype='normalized_vector'): ''' Gives the internal relative orientation of two lines, given their row from the pandas dataframe created in scan_helices. The relative orientation of two lines should be able to be described with just 4 parameters, since they are 2D objects in 3D space. If we have lines consisting of points [a,b] and [c,d], those parameters are: - The distance between their centroids - Angle abc - Angle bcd - Dihedral abcd ''' norm_v1 = row1[vectortype] norm_v2 = row2[vectortype] centroid_dist = numeric.euclidean_distance(row1['centroid'], row2['centroid']) abc = numeric.angle(norm_v1[0], norm_v1[1], norm_v2[0]) bcd = numeric.angle(norm_v1[1], norm_v2[0], norm_v2[1]) dihedral = numeric.dihedral(norm_v1[0], norm_v1[1], norm_v2[0], norm_v2[1]) # plot_vectors([norm_v1, norm_v2], color='black') return centroid_dist, abc, bcd, dihedral
Python
def max_subgraph(self): ''' Finds dense subgraphs, which represent compatible sets of helix pairs between the query helices and the database PDB. The longest such subgraph represents the best overlay of the PDB with the set of query helices. ''' max_subgraph_len = 0 # for f in gt.max_cliques(self.graph): for f in nx.find_cliques(self.graph): if len(f) > max_subgraph_len: max_subgraph_len = len(f) print('Max number of matches:') print(max_subgraph_len) return max_subgraph_len
def max_subgraph(self): ''' Finds dense subgraphs, which represent compatible sets of helix pairs between the query helices and the database PDB. The longest such subgraph represents the best overlay of the PDB with the set of query helices. ''' max_subgraph_len = 0 # for f in gt.max_cliques(self.graph): for f in nx.find_cliques(self.graph): if len(f) > max_subgraph_len: max_subgraph_len = len(f) print('Max number of matches:') print(max_subgraph_len) return max_subgraph_len
Python
def find_edges(self): ''' Populate the graph with nodes and edges. Each node consists of a pair of indices, one from the main database and one from the query database. This pairing represents the case where the helix in the first index is overlaid on the helix of the second index. Edges represent compatibility between adjacent nodes. ''' print('Finding edges') edges = [] self.nodes = set() property_map = {} i = 0 for doc in self.db.iterrows(): if doc[0] in self.query.index: compatible_bins = self.query.xs(doc[0]) # compatible_bins = self.query.find({'bin': doc['bin']}) for result in compatible_bins.iterrows(): idx_pair1 = (doc[1]['idx1'], result[1]['idx1']) idx_pair2 = (doc[1]['idx2'], result[1]['idx2']) # Track which nodes have been sampled if idx_pair1 not in self.nodes: self.nodes.add(idx_pair1) self.graph.add_node(idx_pair1) # self.nodes[idx_pair1] = i # property_map[i] = idx_pair1 i += 1 # self.nodes.append(idx_pair1) # self.graph.add_node(idx_pair1) if idx_pair2 not in self.nodes: # self.nodes[idx_pair2] = i # property_map[i] = idx_pair2 self.nodes.add(idx_pair2) self.graph.add_node(idx_pair2) i += 1 # self.nodes.append(idx_pair2) # self.graph.add_node(idx_pair2) self.graph.add_edge(idx_pair1, idx_pair2) # print('Edge found:') # print(idx_pair1) # print(idx_pair2) # edges.append((self.nodes[idx_pair1], # self.nodes[idx_pair2])) # i += 2 # nodes = set(self.nodes) # self.graph.add_edge(idx_pair1, idx_pair2) # print(nodes) # if self.verbose: # print('All edges:') # print(edges) # self.graph.add_edge_list(edges) # Add properties # prop_dict = self.graph.new_vertex_property('object') # for v in self.graph.vertices(): # prop_dict[v] = {'query_idx':property_map[v][0], # 'lookup_idx':property_map[v][1]}
def find_edges(self): ''' Populate the graph with nodes and edges. Each node consists of a pair of indices, one from the main database and one from the query database. This pairing represents the case where the helix in the first index is overlaid on the helix of the second index. Edges represent compatibility between adjacent nodes. ''' print('Finding edges') edges = [] self.nodes = set() property_map = {} i = 0 for doc in self.db.iterrows(): if doc[0] in self.query.index: compatible_bins = self.query.xs(doc[0]) # compatible_bins = self.query.find({'bin': doc['bin']}) for result in compatible_bins.iterrows(): idx_pair1 = (doc[1]['idx1'], result[1]['idx1']) idx_pair2 = (doc[1]['idx2'], result[1]['idx2']) # Track which nodes have been sampled if idx_pair1 not in self.nodes: self.nodes.add(idx_pair1) self.graph.add_node(idx_pair1) # self.nodes[idx_pair1] = i # property_map[i] = idx_pair1 i += 1 # self.nodes.append(idx_pair1) # self.graph.add_node(idx_pair1) if idx_pair2 not in self.nodes: # self.nodes[idx_pair2] = i # property_map[i] = idx_pair2 self.nodes.add(idx_pair2) self.graph.add_node(idx_pair2) i += 1 # self.nodes.append(idx_pair2) # self.graph.add_node(idx_pair2) self.graph.add_edge(idx_pair1, idx_pair2) # print('Edge found:') # print(idx_pair1) # print(idx_pair2) # edges.append((self.nodes[idx_pair1], # self.nodes[idx_pair2])) # i += 2 # nodes = set(self.nodes) # self.graph.add_edge(idx_pair1, idx_pair2) # print(nodes) # if self.verbose: # print('All edges:') # print(edges) # self.graph.add_edge_list(edges) # Add properties # prop_dict = self.graph.new_vertex_property('object') # for v in self.graph.vertices(): # prop_dict[v] = {'query_idx':property_map[v][0], # 'lookup_idx':property_map[v][1]}
Python
def score_matches(results, query_df, db_df): '''Go through results dataframe and score the matches''' # for i in range(0, 100): # Review top 100 matches for now. # testrow = results.iloc[i] alphapath = query_df.iloc[0]['path'] alpha = clash.get_alphashape(alphapath) results['clash_score'] = None results['rifdock_score'] = None numrows = results.shape[0] curr = 0 for idx, row in results.iterrows(): curr += 1 print('Row {} out of {}'.format(curr, numrows)) clash_score = clash.ClashScore(row, db_df, query_df, alpha=alpha) clash_score.apply() print('CLASH SCORE IS {}'.format(clash_score.score)) results.at[idx,'clash_score'] = clash_score.score rifdock_score = 0 if clash_score.subgraph: for node in clash_score.subgraph: query_idx = node[1] query_row = query_df.loc[query_idx] helixpath = os.path.realpath(query_row['path']) turnno = os.path.basename(helixpath).split('_')[0][0] scorepath = os.path.join( os.path.dirname(query_row['path']), '{}turn.scores'.format(turnno) ) # In the future, would be more efficient to just open all # the score files once and save them to a dataframe. with open(scorepath, 'r') as f: for line in f: line = line.strip('\n') if line.endswith(os.path.basename(helixpath)): score_line = line break score = float(score_line.split()[10]) rifdock_score += score print('RIFDOCK SCORE IS {}'.format(rifdock_score)) results.at[idx,'rifdock_score'] = rifdock_score # print(row) # print(results.iloc[idx]) return results
def score_matches(results, query_df, db_df): '''Go through results dataframe and score the matches''' # for i in range(0, 100): # Review top 100 matches for now. # testrow = results.iloc[i] alphapath = query_df.iloc[0]['path'] alpha = clash.get_alphashape(alphapath) results['clash_score'] = None results['rifdock_score'] = None numrows = results.shape[0] curr = 0 for idx, row in results.iterrows(): curr += 1 print('Row {} out of {}'.format(curr, numrows)) clash_score = clash.ClashScore(row, db_df, query_df, alpha=alpha) clash_score.apply() print('CLASH SCORE IS {}'.format(clash_score.score)) results.at[idx,'clash_score'] = clash_score.score rifdock_score = 0 if clash_score.subgraph: for node in clash_score.subgraph: query_idx = node[1] query_row = query_df.loc[query_idx] helixpath = os.path.realpath(query_row['path']) turnno = os.path.basename(helixpath).split('_')[0][0] scorepath = os.path.join( os.path.dirname(query_row['path']), '{}turn.scores'.format(turnno) ) # In the future, would be more efficient to just open all # the score files once and save them to a dataframe. with open(scorepath, 'r') as f: for line in f: line = line.strip('\n') if line.endswith(os.path.basename(helixpath)): score_line = line break score = float(score_line.split()[10]) rifdock_score += score print('RIFDOCK SCORE IS {}'.format(rifdock_score)) results.at[idx,'rifdock_score'] = rifdock_score # print(row) # print(results.iloc[idx]) return results
Python
def make_helix(pose): '''Build an ideal straight alpha helix.''' # Set the basic parameters c_n_length = 1.32869 n_ca_length = 1.458 ca_c_length = 1.52326 c_o_length = 1.24 c_n_ca_angle = np.radians(121.7) n_ca_c_angle = np.radians(111.2) ca_c_n_angle = np.radians(116.2) n_c_o_angle = np.radians(125) ca_c_o_angle = np.radians(121) phi = -57 psi = -47 ca_n_c_o_torsion = 0 omega = 180 # Make geometries helical for i in range(1, pose.size() + 1): pose.set_phi(i, phi) pose.set_psi(i, psi) pose.set_omega(i, omega)
def make_helix(pose): '''Build an ideal straight alpha helix.''' # Set the basic parameters c_n_length = 1.32869 n_ca_length = 1.458 ca_c_length = 1.52326 c_o_length = 1.24 c_n_ca_angle = np.radians(121.7) n_ca_c_angle = np.radians(111.2) ca_c_n_angle = np.radians(116.2) n_c_o_angle = np.radians(125) ca_c_o_angle = np.radians(121) phi = -57 psi = -47 ca_n_c_o_torsion = 0 omega = 180 # Make geometries helical for i in range(1, pose.size() + 1): pose.set_phi(i, phi) pose.set_psi(i, psi) pose.set_omega(i, omega)
Python
def build_ideal_straight_alpha_helix(length): '''Build an ideal straight alpha helix.''' # Set the basic parameters c_n_length = 1.32869 n_ca_length = 1.458 ca_c_length = 1.52326 c_o_length = 1.24 c_n_ca_angle = np.radians(121.7) n_ca_c_angle = np.radians(111.2) ca_c_n_angle = np.radians(116.2) n_c_o_angle = np.radians(125) ca_c_o_angle = np.radians(121) phi = np.radians(-57) psi = np.radians(-47) ca_n_c_o_torsion = np.radians(0) omega = np.radians(180) # Build the first residue helix = [{'ca' : np.array([0, 0, 0]), 'n' : n_ca_length * np.array([np.sin(n_ca_c_angle), np.cos(n_ca_c_angle), 0]), 'c' : np.array([0, ca_c_length, 0])}] # Build the rest of residues for i in range(1, length): res = {} res['n'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['n'], helix[-1]['ca'], helix[-1]['c'], c_n_length, ca_c_n_angle, psi) res['ca'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['ca'], helix[-1]['c'], res['n'], n_ca_length, c_n_ca_angle, omega) res['c'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['c'], res['n'], res['ca'], ca_c_length, n_ca_c_angle, phi) helix.append(res) # Add oxygen atoms for i in range(len(helix) - 1): helix[i]['o'] = geometry.cartesian_coord_from_internal_coord( helix[i + 1]['ca'], helix[i + 1]['n'], helix[i]['c'], c_o_length, n_c_o_angle, ca_n_c_o_torsion) # Add oxgen for the last residue helix[-1]['o'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['n'], helix[-1]['ca'], helix[-1]['c'], c_o_length, ca_c_o_angle, np.radians(133)) return helix[:length]
def build_ideal_straight_alpha_helix(length): '''Build an ideal straight alpha helix.''' # Set the basic parameters c_n_length = 1.32869 n_ca_length = 1.458 ca_c_length = 1.52326 c_o_length = 1.24 c_n_ca_angle = np.radians(121.7) n_ca_c_angle = np.radians(111.2) ca_c_n_angle = np.radians(116.2) n_c_o_angle = np.radians(125) ca_c_o_angle = np.radians(121) phi = np.radians(-57) psi = np.radians(-47) ca_n_c_o_torsion = np.radians(0) omega = np.radians(180) # Build the first residue helix = [{'ca' : np.array([0, 0, 0]), 'n' : n_ca_length * np.array([np.sin(n_ca_c_angle), np.cos(n_ca_c_angle), 0]), 'c' : np.array([0, ca_c_length, 0])}] # Build the rest of residues for i in range(1, length): res = {} res['n'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['n'], helix[-1]['ca'], helix[-1]['c'], c_n_length, ca_c_n_angle, psi) res['ca'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['ca'], helix[-1]['c'], res['n'], n_ca_length, c_n_ca_angle, omega) res['c'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['c'], res['n'], res['ca'], ca_c_length, n_ca_c_angle, phi) helix.append(res) # Add oxygen atoms for i in range(len(helix) - 1): helix[i]['o'] = geometry.cartesian_coord_from_internal_coord( helix[i + 1]['ca'], helix[i + 1]['n'], helix[i]['c'], c_o_length, n_c_o_angle, ca_n_c_o_torsion) # Add oxgen for the last residue helix[-1]['o'] = geometry.cartesian_coord_from_internal_coord( helix[-1]['n'], helix[-1]['ca'], helix[-1]['c'], c_o_length, ca_c_o_angle, np.radians(133)) return helix[:length]
Python
def helix_direction(res1, res2, res3): '''Get the helix direction from 3 consecutive residues.''' # Get the peptide bond frames frame1 = geometry.create_frame_from_three_points( res1['c'], res2['n'], res2['ca']) frame2 = geometry.create_frame_from_three_points( res2['c'], res3['n'], res3['ca']) return geometry.rotation_matrix_to_axis_and_angle( np.dot(np.transpose(frame2), frame1))[0]
def helix_direction(res1, res2, res3): '''Get the helix direction from 3 consecutive residues.''' # Get the peptide bond frames frame1 = geometry.create_frame_from_three_points( res1['c'], res2['n'], res2['ca']) frame2 = geometry.create_frame_from_three_points( res2['c'], res3['n'], res3['ca']) return geometry.rotation_matrix_to_axis_and_angle( np.dot(np.transpose(frame2), frame1))[0]
Python
def build_alpha_helix_from_directions(directions): '''Build an alpha helix from a list of directions. The number of residues will be 2 + number of directions. ''' helix = build_ideal_straight_alpha_helix(len(directions) + 2) # Align the direction defined by the first 3 residues to the first direction d0 = helix_direction(*helix[:3]) M0 = geometry.rotation_matrix_to_superimpose_two_vectors(d0, directions[0]) helix = basic.transform_residue_list(helix, M0, np.zeros(3)) # Change the directions of residues for i in range(1, len(directions)): res_id = i + 1 # Get the rotation angle that make the n_ca_c angle ideal theta = get_peptide_bond_rotation_angle(directions[i], helix[res_id - 1]['c'] - helix[res_id - 1]['ca'], helix[res_id]['ca'] - helix[res_id]['n'], helix[res_id]['c'] - helix[res_id]['ca']) # Get the transformation frame1 = geometry.create_frame_from_three_points( helix[res_id - 1]['ca'], helix[res_id - 1]['c'], helix[res_id]['n']) frame2 = geometry.create_frame_from_three_points( helix[res_id]['ca'], helix[res_id]['c'], helix[res_id + 1]['n']) m = geometry.rotation_matrix_from_axis_and_angle(directions[i], theta) M = np.dot(np.dot(m, np.transpose(frame1)), frame2) t = helix[res_id]['ca'] - np.dot(M, helix[res_id]['ca']) # Transform the current residue for atom in ['c', 'o']: helix[res_id][atom] = np.dot(M, helix[res_id][atom]) + t # Transform the rest of the strand for j in range(res_id + 1, len(helix)): helix[j] = basic.transform_residue(helix[j], M, t) return helix
def build_alpha_helix_from_directions(directions): '''Build an alpha helix from a list of directions. The number of residues will be 2 + number of directions. ''' helix = build_ideal_straight_alpha_helix(len(directions) + 2) # Align the direction defined by the first 3 residues to the first direction d0 = helix_direction(*helix[:3]) M0 = geometry.rotation_matrix_to_superimpose_two_vectors(d0, directions[0]) helix = basic.transform_residue_list(helix, M0, np.zeros(3)) # Change the directions of residues for i in range(1, len(directions)): res_id = i + 1 # Get the rotation angle that make the n_ca_c angle ideal theta = get_peptide_bond_rotation_angle(directions[i], helix[res_id - 1]['c'] - helix[res_id - 1]['ca'], helix[res_id]['ca'] - helix[res_id]['n'], helix[res_id]['c'] - helix[res_id]['ca']) # Get the transformation frame1 = geometry.create_frame_from_three_points( helix[res_id - 1]['ca'], helix[res_id - 1]['c'], helix[res_id]['n']) frame2 = geometry.create_frame_from_three_points( helix[res_id]['ca'], helix[res_id]['c'], helix[res_id + 1]['n']) m = geometry.rotation_matrix_from_axis_and_angle(directions[i], theta) M = np.dot(np.dot(m, np.transpose(frame1)), frame2) t = helix[res_id]['ca'] - np.dot(M, helix[res_id]['ca']) # Transform the current residue for atom in ['c', 'o']: helix[res_id][atom] = np.dot(M, helix[res_id][atom]) + t # Transform the rest of the strand for j in range(res_id + 1, len(helix)): helix[j] = basic.transform_residue(helix[j], M, t) return helix
Python
def submit_slurm(workspace, **params): from klab import process """Submit a job to a workstation with a Slurm job scheduler.""" # Parse some job parameters for the keyword arguments. params = dict((k, v) for k, v in list(params.items()) if v is not None) test_run = params.get('test_run', False) nstruct = params.get('nstruct') max_runtime = params.get('max_runtime', '12:00:00') max_memory = params.get('max_memory', '3G') if test_run: max_runtime = '0:30:00' if nstruct is None: raise TypeError("sumbit() requires the keyword argument 'nstruct' for production runs.") # Create the job file workspace_jobno = workspace.slurm_custom_jobno + 1 job_file = os.path.join(workspace.focus_dir, 'slurm_{}'.format(workspace_jobno)) nstruct = params.get('nstruct') f = open(job_file, 'w') for n in range(1, nstruct + 1): cmd = '{0} {1} {2} {3}'.format( workspace.python_path, workspace.script_path, workspace.focus_dir, n ) f.write(cmd + '\n') f.close() submission_script = '''#!/bin/bash # # Simple SLURM script for submitting multiple serial # commands (e.g. parametric studies) using a script wrapper # to launch the commands. # # To use, change this job script to accommodate # running your serial application(s) in your WORKDIR # directory (usually the directory of submission). # Edit the commands file to specify the executions # for the launcher (paramrun) to perform. #------------------------------------------------------- #------------------------------------------------------- # # <------ Setup Parameters ------> # #SBATCH -J {name} #SBATCH -N 1 #SBATCH -n 1 #use site recommended # of cores #SBATCH -p skx-normal #SBATCH -o {logs}.o%j #SBATCH -e {logs}.e%j #SBATCH -t {runtime} ##SBATCH -A <acct_name> #uncomment and insert acct name if necessary #------------------------------------------------------ # # USING SLURM; plugins defines SLURM env. vars. export LAUNCHER_RMI=SLURM export LAUNCHER_PLUGIN_DIR=$LAUNCHER_DIR/plugins # # JOB_FILE is a list of executions to run export LAUNCHER_JOB_FILE={commands} export LAUNCHER_SCHED=dynamic export LAUNCHER_WORKDIR={focus_dir} $LAUNCHER_DIR/paramrun # will run the executions in the LAUNCHER_JOB_FILE file # "JOB" is a misnomer--these are not slurm jobs # Each line in the commands file is an # execution.'''.format( name=params.get('job_name', 'roseasy_job'), logs=os.path.join(workspace.focus_dir, 'logs', params.get('job_name', 'roseasy_job')) + '.', runtime=max_runtime, commands=job_file, focus_dir=workspace.focus_dir ) with open(workspace.slurm_submit_file, 'w') as f: f.write(submission_script) status = process.check_output(('bash',workspace.slurm_submit_file)).decode('utf-8') print('Job submission status:') print(status) #submission = with open(workspace.job_info_path(workspace_jobno), 'w') as file: json.dump(params, file)
def submit_slurm(workspace, **params): from klab import process """Submit a job to a workstation with a Slurm job scheduler.""" # Parse some job parameters for the keyword arguments. params = dict((k, v) for k, v in list(params.items()) if v is not None) test_run = params.get('test_run', False) nstruct = params.get('nstruct') max_runtime = params.get('max_runtime', '12:00:00') max_memory = params.get('max_memory', '3G') if test_run: max_runtime = '0:30:00' if nstruct is None: raise TypeError("sumbit() requires the keyword argument 'nstruct' for production runs.") # Create the job file workspace_jobno = workspace.slurm_custom_jobno + 1 job_file = os.path.join(workspace.focus_dir, 'slurm_{}'.format(workspace_jobno)) nstruct = params.get('nstruct') f = open(job_file, 'w') for n in range(1, nstruct + 1): cmd = '{0} {1} {2} {3}'.format( workspace.python_path, workspace.script_path, workspace.focus_dir, n ) f.write(cmd + '\n') f.close() submission_script = '''#!/bin/bash # # Simple SLURM script for submitting multiple serial # commands (e.g. parametric studies) using a script wrapper # to launch the commands. # # To use, change this job script to accommodate # running your serial application(s) in your WORKDIR # directory (usually the directory of submission). # Edit the commands file to specify the executions # for the launcher (paramrun) to perform. #------------------------------------------------------- #------------------------------------------------------- # # <------ Setup Parameters ------> # #SBATCH -J {name} #SBATCH -N 1 #SBATCH -n 1 #use site recommended # of cores #SBATCH -p skx-normal #SBATCH -o {logs}.o%j #SBATCH -e {logs}.e%j #SBATCH -t {runtime} ##SBATCH -A <acct_name> #uncomment and insert acct name if necessary #------------------------------------------------------ # # USING SLURM; plugins defines SLURM env. vars. export LAUNCHER_RMI=SLURM export LAUNCHER_PLUGIN_DIR=$LAUNCHER_DIR/plugins # # JOB_FILE is a list of executions to run export LAUNCHER_JOB_FILE={commands} export LAUNCHER_SCHED=dynamic export LAUNCHER_WORKDIR={focus_dir} $LAUNCHER_DIR/paramrun # will run the executions in the LAUNCHER_JOB_FILE file # "JOB" is a misnomer--these are not slurm jobs # Each line in the commands file is an # execution.'''.format( name=params.get('job_name', 'roseasy_job'), logs=os.path.join(workspace.focus_dir, 'logs', params.get('job_name', 'roseasy_job')) + '.', runtime=max_runtime, commands=job_file, focus_dir=workspace.focus_dir ) with open(workspace.slurm_submit_file, 'w') as f: f.write(submission_script) status = process.check_output(('bash',workspace.slurm_submit_file)).decode('utf-8') print('Job submission status:') print(status) #submission = with open(workspace.job_info_path(workspace_jobno), 'w') as file: json.dump(params, file)
Python
def submit(workspace, cmd, create_job_info=True, **params): """Submit a job with the given parameters.""" from klab import cluster, process # Make sure the rosetta symlink has been created. # if not os.path.exists(workspace.rosetta_dir): # raise pipeline.RosettaNotFound(workspace) # Parse some job parameters for the keyword arguments. params = dict((k, v) for k, v in list(params.items()) if v is not None) test_run = params.get('test_run', False) nstruct = params.get('nstruct') max_runtime = params.get('max_runtime', '6:00:00') max_memory = params.get('max_memory', '1G') if test_run: max_runtime = '0:30:00' if nstruct is None: raise TypeError("sumbit() requires the keyword argument 'nstruct' for production runs.") # Submit the job and put it immediately into the hold state. qsub_command = 'qsub', '-h', '-cwd' qsub_command += '-o', workspace.log_dir qsub_command += '-e', workspace.log_dir qsub_command += '-t', '1-{0}'.format(nstruct), qsub_command += '-l', 'h_rt={0}'.format(max_runtime), qsub_command += '-l', 'mem_free={0}'.format(max_memory), qsub_command += '-b', 'y', qsub_command += '-N', params.get('job_name'), # qsub_command += workspace.python_path, for param in cmd: qsub_command += param, status = process.check_output(qsub_command).decode('utf-8') status_pattern = re.compile(r'Your job-array (\d+).[0-9:-]+ \(".*"\) has been submitted') status_match = status_pattern.match(status) if not status_match: print(status) sys.exit() # Figure out the job id, then make a params file specifically for it. job_id = status_match.group(1) if create_job_info: with open(workspace.job_info_path(job_id), 'w') as file: json.dump(params, file) # Release the hold on the job. qrls_command = 'qrls', job_id process.check_output(qrls_command) print(status, end=' ')
def submit(workspace, cmd, create_job_info=True, **params): """Submit a job with the given parameters.""" from klab import cluster, process # Make sure the rosetta symlink has been created. # if not os.path.exists(workspace.rosetta_dir): # raise pipeline.RosettaNotFound(workspace) # Parse some job parameters for the keyword arguments. params = dict((k, v) for k, v in list(params.items()) if v is not None) test_run = params.get('test_run', False) nstruct = params.get('nstruct') max_runtime = params.get('max_runtime', '6:00:00') max_memory = params.get('max_memory', '1G') if test_run: max_runtime = '0:30:00' if nstruct is None: raise TypeError("sumbit() requires the keyword argument 'nstruct' for production runs.") # Submit the job and put it immediately into the hold state. qsub_command = 'qsub', '-h', '-cwd' qsub_command += '-o', workspace.log_dir qsub_command += '-e', workspace.log_dir qsub_command += '-t', '1-{0}'.format(nstruct), qsub_command += '-l', 'h_rt={0}'.format(max_runtime), qsub_command += '-l', 'mem_free={0}'.format(max_memory), qsub_command += '-b', 'y', qsub_command += '-N', params.get('job_name'), # qsub_command += workspace.python_path, for param in cmd: qsub_command += param, status = process.check_output(qsub_command).decode('utf-8') status_pattern = re.compile(r'Your job-array (\d+).[0-9:-]+ \(".*"\) has been submitted') status_match = status_pattern.match(status) if not status_match: print(status) sys.exit() # Figure out the job id, then make a params file specifically for it. job_id = status_match.group(1) if create_job_info: with open(workspace.job_info_path(job_id), 'w') as file: json.dump(params, file) # Release the hold on the job. qrls_command = 'qrls', job_id process.check_output(qrls_command) print(status, end=' ')
Python
def int_list_to_pdb_numbers(reslist, chain='Z'): ''' Takes a list of residues with PDB numbering and formats it as a list of strings with the chain number included (i.e., '19_Z') ''' pdblist = [] for res in reslist: if type(res) == type('hi'): spl = res.split(' ') resnum = res.split(' ')[0] pdblist.append(' '.join([str(resnum), chain])) else: pdblist.append(' '.join([str(res), chain])) return pdblist
def int_list_to_pdb_numbers(reslist, chain='Z'): ''' Takes a list of residues with PDB numbering and formats it as a list of strings with the chain number included (i.e., '19_Z') ''' pdblist = [] for res in reslist: if type(res) == type('hi'): spl = res.split(' ') resnum = res.split(' ')[0] pdblist.append(' '.join([str(resnum), chain])) else: pdblist.append(' '.join([str(res), chain])) return pdblist
Python
def rosetta_numbers_from_pdb(reslist, pose, chain='A'): '''Get list of rosetta numbers from a list of PDB numbers''' rosetta_list = [] for resi in reslist: if type(resi) == type('hi'): spl = resi.split(' ') rosetta_list.append(pose.pdb_info().pdb2pose(spl[1], spl[0])) else: rosetta_list.append(pose.pdb_info().pdb2pose(chain, resi)) return rosetta_list
def rosetta_numbers_from_pdb(reslist, pose, chain='A'): '''Get list of rosetta numbers from a list of PDB numbers''' rosetta_list = [] for resi in reslist: if type(resi) == type('hi'): spl = resi.split(' ') rosetta_list.append(pose.pdb_info().pdb2pose(spl[1], spl[0])) else: rosetta_list.append(pose.pdb_info().pdb2pose(chain, resi)) return rosetta_list
Python
def correct_resnums(initial_pose, reslist, final_pose): """Get rosetta numbering for final_pose from a reslist for the intial_pose""" corrected_residues = [] for res in reslist: pdbnum = initial_pose.pdb_info().pose2pdb(res) pdbres = int(pdbnum.split(' ')[0]) pdbchain = pdbnum.split(' ')[1] rosettanum = final_pose.pdb_info().pdb2pose(pdbchain, pdbres) corrected_residues.append(rosettanum) return corrected_residues
def correct_resnums(initial_pose, reslist, final_pose): """Get rosetta numbering for final_pose from a reslist for the intial_pose""" corrected_residues = [] for res in reslist: pdbnum = initial_pose.pdb_info().pose2pdb(res) pdbres = int(pdbnum.split(' ')[0]) pdbchain = pdbnum.split(' ')[1] rosettanum = final_pose.pdb_info().pdb2pose(pdbchain, pdbres) corrected_residues.append(rosettanum) return corrected_residues
Python
def is_default_database(self, dbpath): '''Checks if a database path is the default one by checking whether it is in standard_params or not''' return os.path.dirname(os.path.abspath(dbpath)) == \ os.path.abspath(self.standard_params_dir)
def is_default_database(self, dbpath): '''Checks if a database path is the default one by checking whether it is in standard_params or not''' return os.path.dirname(os.path.abspath(dbpath)) == \ os.path.abspath(self.standard_params_dir)
Python
def slurm_custom_jobno(self): """ Return the number associated with the latest slurm command file. Slurm command files should have the format slurm_<jobnumber>, where jobnumber increments by 1 for each job submitted in a particular workspace. This function simply returns the latest <jobnumber>. """ latest = 0 for f in glob.glob(self.focus_dir + '/slurm_*'): f = os.path.basename(f) num = int(f.split('_')[1]) if num > latest: latest = num return latest
def slurm_custom_jobno(self): """ Return the number associated with the latest slurm command file. Slurm command files should have the format slurm_<jobnumber>, where jobnumber increments by 1 for each job submitted in a particular workspace. This function simply returns the latest <jobnumber>. """ latest = 0 for f in glob.glob(self.focus_dir + '/slurm_*'): f = os.path.basename(f) num = int(f.split('_')[1]) if num > latest: latest = num return latest
Python
def user_email(self): ''' Slurm jobs can accept an email to be submitted with a job. This can be provided in either standard_params or project_params. ''' path = self.find_path('email') if not os.path.exists(path): print('WARNING: Slurm requires an email to submit a job.') print('Please provide this via a file named \'email\' in project_params.') raise PathNotFound(path) with open(path, 'r') as f: email = f.read().strip('\n') return email
def user_email(self): ''' Slurm jobs can accept an email to be submitted with a job. This can be provided in either standard_params or project_params. ''' path = self.find_path('email') if not os.path.exists(path): print('WARNING: Slurm requires an email to submit a job.') print('Please provide this via a file named \'email\' in project_params.') raise PathNotFound(path) with open(path, 'r') as f: email = f.read().strip('\n') return email
Python
def slurm_cmd_file(self): """ Returns the latest slurm command file. """ jobno = self.slurm_custom_jobno return os.path.join(self.focus_dir, 'slurm_{}'.format(jobno))
def slurm_cmd_file(self): """ Returns the latest slurm command file. """ jobno = self.slurm_custom_jobno return os.path.join(self.focus_dir, 'slurm_{}'.format(jobno))
Python
def find_all_paths(self, basename): """ Looks in a few places for any files with a given name or pattern and returns them as a list. """ # Look for the file in standard folders hits = [] for directory in self.find_path_dirs: paths = glob.glob(os.path.join(directory, basename)) hits.extend([os.path.abspath(path) for path in paths]) return hits
def find_all_paths(self, basename): """ Looks in a few places for any files with a given name or pattern and returns them as a list. """ # Look for the file in standard folders hits = [] for directory in self.find_path_dirs: paths = glob.glob(os.path.join(directory, basename)) hits.extend([os.path.abspath(path) for path in paths]) return hits
Python
def scaffold_clusters(self, scaffold): '''Returns the folder for clusters of a given scaffold. Should be able to take either a directory or pdb file as the scaffold argument.''' scaffold_dir = os.path.join(self.cluster_outputs, self.basename(scaffold)) return scaffold_dir
def scaffold_clusters(self, scaffold): '''Returns the folder for clusters of a given scaffold. Should be able to take either a directory or pdb file as the scaffold argument.''' scaffold_dir = os.path.join(self.cluster_outputs, self.basename(scaffold)) return scaffold_dir
Python
def predecessor_from_dir(workspace, step): """ Get the workspace that precedes the current step. """ searchstr = '{:02}*'.format(step - 1) results = [] for folder in glob.glob(os.path.join(workspace.root_dir, searchstr)): if os.path.isdir(folder): results.append(folder) if len(results) > 1: e = 'WARNING! More than one predecessor workspace found. '\ 'Check filenames for conflicts.' raise PipelineError(e) elif len(results) == 0: e = 'WARNING! No predecessor workspaces found. '\ 'Check filenames for conflicts.' raise PipelineError(e) else: return results[0]
def predecessor_from_dir(workspace, step): """ Get the workspace that precedes the current step. """ searchstr = '{:02}*'.format(step - 1) results = [] for folder in glob.glob(os.path.join(workspace.root_dir, searchstr)): if os.path.isdir(folder): results.append(folder) if len(results) > 1: e = 'WARNING! More than one predecessor workspace found. '\ 'Check filenames for conflicts.' raise PipelineError(e) elif len(results) == 0: e = 'WARNING! No predecessor workspaces found. '\ 'Check filenames for conflicts.' raise PipelineError(e) else: return results[0]
Python
def workspace_from_dir(directory, recurse=True): """ Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile. """ directory = os.path.abspath(directory) pickle_path = os.path.join(directory, 'workspace.pkl') # Make sure the given directory contains a 'workspace' file. This file is # needed to instantiate the right kind of workspace. if not os.path.exists(pickle_path): if recurse: parent_dir = os.path.dirname(directory) print(parent_dir) # Keep looking for a workspace as long as we haven't hit the root # of the file system. If an exception is raised, that means no # workspace was found. Catch and re-raise the exception so that # the name of the directory reported in the exception is meaningful # to the user. try: return workspace_from_dir(parent_dir, parent_dir != '/') except WorkspaceNotFound: raise WorkspaceNotFound(directory) else: raise WorkspaceNotFound(directory) # Load the 'workspace' file and create a workspace. with open(pickle_path, 'rb') as file: workspace_class = pickle.load(file) return workspace_class.from_directory(directory)
def workspace_from_dir(directory, recurse=True): """ Construct a workspace object from a directory name. If recurse=True, this function will search down the directory tree and return the first workspace it finds. If recurse=False, an exception will be raised if the given directory is not a workspace. Workspace identification requires a file called 'workspace.pkl' to be present in each workspace directory, which can unfortunately be a little fragile. """ directory = os.path.abspath(directory) pickle_path = os.path.join(directory, 'workspace.pkl') # Make sure the given directory contains a 'workspace' file. This file is # needed to instantiate the right kind of workspace. if not os.path.exists(pickle_path): if recurse: parent_dir = os.path.dirname(directory) print(parent_dir) # Keep looking for a workspace as long as we haven't hit the root # of the file system. If an exception is raised, that means no # workspace was found. Catch and re-raise the exception so that # the name of the directory reported in the exception is meaningful # to the user. try: return workspace_from_dir(parent_dir, parent_dir != '/') except WorkspaceNotFound: raise WorkspaceNotFound(directory) else: raise WorkspaceNotFound(directory) # Load the 'workspace' file and create a workspace. with open(pickle_path, 'rb') as file: workspace_class = pickle.load(file) return workspace_class.from_directory(directory)
Python
def centroid(vector1, vector2): ''' Finds centroid of two sets of points ''' cen1 = np.mean(vector1, axis=0) cen2 = np.mean(vector2, axis=0) return np.mean([cen1, cen2], axis=0)
def centroid(vector1, vector2): ''' Finds centroid of two sets of points ''' cen1 = np.mean(vector1, axis=0) cen2 = np.mean(vector2, axis=0) return np.mean([cen1, cen2], axis=0)
Python
def angle(a, b, c): ''' Finds the angle between 3 points in degrees. ''' ba = a - b bc = c - b cos = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)) angle = np.arccos(cos) return np.degrees(angle)
def angle(a, b, c): ''' Finds the angle between 3 points in degrees. ''' ba = a - b bc = c - b cos = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc)) angle = np.arccos(cos) return np.degrees(angle)
Python
def dihedral(a, b, c, d): ''' Finds the dihedral angle between 4 points in degrees. ''' b0 = -1.0 * (b - a) b1 = c - b b2 = d - c # normalize b1 so that it does not influence magnitude of vector # rejections that come next b1 /= np.linalg.norm(b1) # vector rejections # v = projection of b0 onto plane perpendicular to b1 = b0 minus # component that aligns with b1 # w = projection of b2 onto plane perpendicular to b1 = b2 minus # component that aligns with b1 v = b0 - np.dot(b0, b1) * b1 w = b2 - np.dot(b2, b1) * b1 # angle between v and w in a plane is the torsion angle # v and w may not be normalized but that's fine since tan is y/x x = np.dot(v, w) y = np.dot(np.cross(b1, v), w) return np.degrees(np.arctan2(y, x))
def dihedral(a, b, c, d): ''' Finds the dihedral angle between 4 points in degrees. ''' b0 = -1.0 * (b - a) b1 = c - b b2 = d - c # normalize b1 so that it does not influence magnitude of vector # rejections that come next b1 /= np.linalg.norm(b1) # vector rejections # v = projection of b0 onto plane perpendicular to b1 = b0 minus # component that aligns with b1 # w = projection of b2 onto plane perpendicular to b1 = b2 minus # component that aligns with b1 v = b0 - np.dot(b0, b1) * b1 w = b2 - np.dot(b2, b1) * b1 # angle between v and w in a plane is the torsion angle # v and w may not be normalized but that's fine since tan is y/x x = np.dot(v, w) y = np.dot(np.cross(b1, v), w) return np.degrees(np.arctan2(y, x))
Python
def line_angle(line1, line2): ''' Finds the angle between two lines, each consisting of two points. E.g. line1 = [[1,2,3],[5,6,7]] ''' # Move to origin vector1 = line1[1] - line1[0] vector2 = line2[1] - line2[0] # Normalize vector1 = vector1 / np.linalg.norm(vector1) vector2 = vector2 / np.linalg.norm(vector2) return np.arccos(np.clip(np.dot(vector1, vector2), -1.0, 1.0))
def line_angle(line1, line2): ''' Finds the angle between two lines, each consisting of two points. E.g. line1 = [[1,2,3],[5,6,7]] ''' # Move to origin vector1 = line1[1] - line1[0] vector2 = line2[1] - line2[0] # Normalize vector1 = vector1 / np.linalg.norm(vector1) vector2 = vector2 / np.linalg.norm(vector2) return np.arccos(np.clip(np.dot(vector1, vector2), -1.0, 1.0))
Python
def update(provider, since=None, purge=False, dry_run=False): """Update metadata formats, items, records and sets. Parameters ---------- provider: object The metadata provider. Must have the following methods: formats(): dict from unicode to (unicode, unicode) The available metadata formats as a dict mapping metadata prefixes to (namespace, schema location) tuples. identifiers(): iterable of unicode OAI identifiers of all items. has_changed(identifier: unicode, since: datetime): bool Return `True` if the item with the given identifier has changed since the given time. Otherwise return `False`. get_sets(identifier: unicode): iterable of (unicode, unicode) Return sets of the item with the given identifier as an iterable of (set spec, set name) tuples. get_record(identifier: unicode, prefix: unicode): unicode or None Disseminate the metadata of the specified item in the specified format. Return an XML fragment. If the item cannot be disseminated in the specified format, return None. since: datetime.datetime or None Time of the last update in UTC, or `None`. purge: bool If `True`, purge deleted formats, items and records from the database. dry_run: bool If `True`, fetch records as usual but do not actually change the database. Raises ------ HarvestError: If the provider raises an exception and the harvest cannot be continued. """ prefixes = update_formats(provider, purge, dry_run) identifiers = update_items(provider, purge, dry_run) update_records(provider, identifiers, prefixes, since, dry_run)
def update(provider, since=None, purge=False, dry_run=False): """Update metadata formats, items, records and sets. Parameters ---------- provider: object The metadata provider. Must have the following methods: formats(): dict from unicode to (unicode, unicode) The available metadata formats as a dict mapping metadata prefixes to (namespace, schema location) tuples. identifiers(): iterable of unicode OAI identifiers of all items. has_changed(identifier: unicode, since: datetime): bool Return `True` if the item with the given identifier has changed since the given time. Otherwise return `False`. get_sets(identifier: unicode): iterable of (unicode, unicode) Return sets of the item with the given identifier as an iterable of (set spec, set name) tuples. get_record(identifier: unicode, prefix: unicode): unicode or None Disseminate the metadata of the specified item in the specified format. Return an XML fragment. If the item cannot be disseminated in the specified format, return None. since: datetime.datetime or None Time of the last update in UTC, or `None`. purge: bool If `True`, purge deleted formats, items and records from the database. dry_run: bool If `True`, fetch records as usual but do not actually change the database. Raises ------ HarvestError: If the provider raises an exception and the harvest cannot be continued. """ prefixes = update_formats(provider, purge, dry_run) identifiers = update_items(provider, purge, dry_run) update_records(provider, identifiers, prefixes, since, dry_run)
Python
def parse_response(text): """Parse XML data and validate it using the OAI-PMH and oai_dc schemas. Return the parsed XML tree.""" parser = etree.XMLParser(schema=master_schema()) return etree.fromstring(text.encode('utf-8'), parser)
def parse_response(text): """Parse XML data and validate it using the OAI-PMH and oai_dc schemas. Return the parsed XML tree.""" parser = etree.XMLParser(schema=master_schema()) return etree.fromstring(text.encode('utf-8'), parser)
Python
def check_error_code(self, error, code): """Render the error template and check rendered error code.""" template = get_template_path('error.pt') params = { 'time': datetime(2013, 12, 24, 13, 45, 0), 'format_date': format_datestamp, 'filter_illegal_chars': filter_illegal_chars, 'error': error, } request = testing.DummyRequest(params={ 'verb': 'ListSets', 'resumptionToken': 'asdf' }) setattr(request, 'path_url', 'http://pelle.org/asd') # render the template result = render(template, params, request) # parse and validate xml tree = parse_response(result) # check response date, base url, error code and message self.assertEqual(tree.find('{{{0}}}responseDate'.format(OAI_NS)).text, '2013-12-24T13:45:00Z') self.assertEqual(tree.find('{{{0}}}request'.format(OAI_NS)).text, 'http://pelle.org/asd') self.assertEqual(tree.find('{{{0}}}error'.format(OAI_NS)).get('code'), code) return tree
def check_error_code(self, error, code): """Render the error template and check rendered error code.""" template = get_template_path('error.pt') params = { 'time': datetime(2013, 12, 24, 13, 45, 0), 'format_date': format_datestamp, 'filter_illegal_chars': filter_illegal_chars, 'error': error, } request = testing.DummyRequest(params={ 'verb': 'ListSets', 'resumptionToken': 'asdf' }) setattr(request, 'path_url', 'http://pelle.org/asd') # render the template result = render(template, params, request) # parse and validate xml tree = parse_response(result) # check response date, base url, error code and message self.assertEqual(tree.find('{{{0}}}responseDate'.format(OAI_NS)).text, '2013-12-24T13:45:00Z') self.assertEqual(tree.find('{{{0}}}request'.format(OAI_NS)).text, 'http://pelle.org/asd') self.assertEqual(tree.find('{{{0}}}error'.format(OAI_NS)).get('code'), code) return tree
Python
def render_template(self, values): """Render the template with some parameters. """ self.values.update(values) return render(self.template, self.values, self.request)
def render_template(self, values): """Render the template with some parameters. """ self.values.update(values) return render(self.template, self.values, self.request)
Python
def check_response(self, response, pattern): """Parse the response into an XML tree, validate it with the OAI-PMH schema and check that response matches the pattern. """ # parse and validate xml tree = parse_response(response) # check root tag self.assertEqual(tree.tag, '{{{0}}}OAI-PMH'.format(OAI_NS)) # check schema location self.assertEqual( tree.get('{{{0}}}schemaLocation'.format(XSI_NS)).split(), [OAI_NS, 'http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd'] ) # check response date and request elements self.assertEqual(tree.find('{%s}responseDate' % OAI_NS).text, '2013-12-24T13:45:00Z') self.assertEqual(tree.find('{%s}request' % OAI_NS).text, 'http://pelle.org/asd') self.assertEqual(tree.find('{%s}request' % OAI_NS).attrib, self.request.params) self.check_pattern(tree, pattern)
def check_response(self, response, pattern): """Parse the response into an XML tree, validate it with the OAI-PMH schema and check that response matches the pattern. """ # parse and validate xml tree = parse_response(response) # check root tag self.assertEqual(tree.tag, '{{{0}}}OAI-PMH'.format(OAI_NS)) # check schema location self.assertEqual( tree.get('{{{0}}}schemaLocation'.format(XSI_NS)).split(), [OAI_NS, 'http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd'] ) # check response date and request elements self.assertEqual(tree.find('{%s}responseDate' % OAI_NS).text, '2013-12-24T13:45:00Z') self.assertEqual(tree.find('{%s}request' % OAI_NS).text, 'http://pelle.org/asd') self.assertEqual(tree.find('{%s}request' % OAI_NS).attrib, self.request.params) self.check_pattern(tree, pattern)
Python
def oai_view(wrapped): """Augment the return value of a function with common template parameters and add time property to the request parameter.""" def wrapper(context, request=None): if request is None: request = context context = None # Get the datestamp before any database queries. setattr(request, 'time', datestamp_now()) if wrapped.func_code.co_argcount == 1: result = wrapped(request) else: result = wrapped(context, request) # time of the response result['time'] = request.time # function for formatting datestamps result['format_date'] = format_datestamp request.response.content_type = 'text/xml' return result functools.update_wrapper(wrapper, wrapped) return wrapper
def oai_view(wrapped): """Augment the return value of a function with common template parameters and add time property to the request parameter.""" def wrapper(context, request=None): if request is None: request = context context = None # Get the datestamp before any database queries. setattr(request, 'time', datestamp_now()) if wrapped.func_code.co_argcount == 1: result = wrapped(request) else: result = wrapped(context, request) # time of the response result['time'] = request.time # function for formatting datestamps result['format_date'] = format_datestamp request.response.content_type = 'text/xml' return result functools.update_wrapper(wrapper, wrapped) return wrapper
Python
def _create_resumption_token(params, offset, time): """Create a resumption token for a ListRecords or ListIdentifiers request. """ return json.dumps({ 'verb': params[u'verb'], 'metadataPrefix': params[u'metadataPrefix'], 'offset': offset, 'date': format_datestamp(time), 'from': params.get(u'from', None), 'until': params.get(u'until', None), 'set': params.get(u'set', None), })
def _create_resumption_token(params, offset, time): """Create a resumption token for a ListRecords or ListIdentifiers request. """ return json.dumps({ 'verb': params[u'verb'], 'metadataPrefix': params[u'metadataPrefix'], 'offset': offset, 'date': format_datestamp(time), 'from': params.get(u'from', None), 'until': params.get(u'until', None), 'set': params.get(u'set', None), })
Python
def _get_resumption_token(request): """Check whether the request parameters contain a resumption token. Also check that the resumption token is not expired and that it has the correct verb. Other parameters in the resumption token are not checked. Parameters ---------- request: pyramid.request.Request The request. Raises ------ BadArgument: If the parameters contain other arguments in addition to the resumption token, or if the params contain multiple resumption tokens. InvalidResumptionToken: If the params contain an invalid resumption token. ExpiredResumptionToken: If the resumption token has expired. Return ------ None or dict: The parsed resumption token dict, or ``None`` if there is no request token in the parameters. """ if u'resumptionToken' not in request.params: return None # No other arguments allowed with resumptionToken. _check_params(request.params, required=[u'resumptionToken']) try: parsed = json.loads(request.params[u'resumptionToken']) except: raise exception.InvalidResumptionToken() # Check types. if type(parsed) is not dict: raise exception.InvalidResumptionToken() for k, v in parsed.iteritems(): if (v is not None) and (not isinstance(v, basestring)): raise exception.InvalidResumptionToken() # Check verb. if parsed.get(u'verb', None) != request.params[u'verb']: raise exception.InvalidResumptionToken() # Check date. _check_resumption_token_date(parsed) return parsed
def _get_resumption_token(request): """Check whether the request parameters contain a resumption token. Also check that the resumption token is not expired and that it has the correct verb. Other parameters in the resumption token are not checked. Parameters ---------- request: pyramid.request.Request The request. Raises ------ BadArgument: If the parameters contain other arguments in addition to the resumption token, or if the params contain multiple resumption tokens. InvalidResumptionToken: If the params contain an invalid resumption token. ExpiredResumptionToken: If the resumption token has expired. Return ------ None or dict: The parsed resumption token dict, or ``None`` if there is no request token in the parameters. """ if u'resumptionToken' not in request.params: return None # No other arguments allowed with resumptionToken. _check_params(request.params, required=[u'resumptionToken']) try: parsed = json.loads(request.params[u'resumptionToken']) except: raise exception.InvalidResumptionToken() # Check types. if type(parsed) is not dict: raise exception.InvalidResumptionToken() for k, v in parsed.iteritems(): if (v is not None) and (not isinstance(v, basestring)): raise exception.InvalidResumptionToken() # Check verb. if parsed.get(u'verb', None) != request.params[u'verb']: raise exception.InvalidResumptionToken() # Check date. _check_resumption_token_date(parsed) return parsed
Python
def _check_resumption_token_date(token): """Check that a resumption token's date is valid. Arguments --------- token: dict from str to str The parsed resumption token. Raises ------ InvalidResumptionToken: If the date is in invalid format. ExpiredResumptionToken: If the resumption token has expired. """ try: date, _ = parse_date(token[u'date']) except: raise exception.InvalidResumptionToken() latest = Datestamp.get() if (latest is not None) and (latest >= date): raise exception.ExpiredResumptionToken()
def _check_resumption_token_date(token): """Check that a resumption token's date is valid. Arguments --------- token: dict from str to str The parsed resumption token. Raises ------ InvalidResumptionToken: If the date is in invalid format. ExpiredResumptionToken: If the resumption token has expired. """ try: date, _ = parse_date(token[u'date']) except: raise exception.InvalidResumptionToken() latest = Datestamp.get() if (latest is not None) and (latest >= date): raise exception.ExpiredResumptionToken()
Python
def _get_metadata_prefix(params, ignore_deleted): """Check that metadata prefix in request parameters is supported. If the metadata prefix is not supported, raise ``UnsupportedMetadataFormat``. Otherwise return the prefix. """ prefix = params[u'metadataPrefix'] if not Format.exists(prefix, ignore_deleted): raise exception.UnsupportedMetadataFormat(prefix) return prefix
def _get_metadata_prefix(params, ignore_deleted): """Check that metadata prefix in request parameters is supported. If the metadata prefix is not supported, raise ``UnsupportedMetadataFormat``. Otherwise return the prefix. """ prefix = params[u'metadataPrefix'] if not Format.exists(prefix, ignore_deleted): raise exception.UnsupportedMetadataFormat(prefix) return prefix
Python
def filter_illegal_chars(text): """Remove unicode characters that are illegal in XML. Parameters ---------- text: unicode Text to filter. Return ------ unicode: Filtered text. """ return _XML_ILLEGAL_CHARACTERS.sub(u'', text)
def filter_illegal_chars(text): """Remove unicode characters that are illegal in XML. Parameters ---------- text: unicode Text to filter. Return ------ unicode: Filtered text. """ return _XML_ILLEGAL_CHARACTERS.sub(u'', text)
Python
def contains_illegal_chars(text): """Check wheter the text is contains illegal characters. Parameters ---------- text: unicode Text to check. Return ------ bool: True, if the text contains illegal characters. Otherwise False. """ return _XML_ILLEGAL_CHARACTERS.search(text) is not None
def contains_illegal_chars(text): """Check wheter the text is contains illegal characters. Parameters ---------- text: unicode Text to check. Return ------ bool: True, if the text contains illegal characters. Otherwise False. """ return _XML_ILLEGAL_CHARACTERS.search(text) is not None
Python
def datestamp_now(): """Create a datestamp of the current time at second granularity. Return ------ datetime.datetime: Current time. """ now = datetime.datetime.utcnow() # Strip microseconds. return now.replace(microsecond=0)
def datestamp_now(): """Create a datestamp of the current time at second granularity. Return ------ datetime.datetime: Current time. """ now = datetime.datetime.utcnow() # Strip microseconds. return now.replace(microsecond=0)
Python
def format_datestamp(datestamp): """Format datestamp to an OAI-PMH compliant format. Parameters ---------- datestamp: datetime.datetime A datestamp. Return ------ str: Formatted datestamp. """ return datestamp.strftime('%Y-%m-%dT%H:%M:%SZ')
def format_datestamp(datestamp): """Format datestamp to an OAI-PMH compliant format. Parameters ---------- datestamp: datetime.datetime A datestamp. Return ------ str: Formatted datestamp. """ return datestamp.strftime('%Y-%m-%dT%H:%M:%SZ')
Python
def main(global_config, **app_config): """ This function returns a Pyramid WSGI application. """ settings = {} settings.update(global_config) settings.update(app_config) clean_oai_settings(settings) setup_logging(settings['logging_config']) create_engine(settings) ensure_oai_dc_exists() config = Configurator(settings=settings) config.include('pyramid_tm') config.include('pyramid_chameleon') config.add_route('oai', '/oai', request_method=('GET', 'POST')) config.scan() return config.make_wsgi_app()
def main(global_config, **app_config): """ This function returns a Pyramid WSGI application. """ settings = {} settings.update(global_config) settings.update(app_config) clean_oai_settings(settings) setup_logging(settings['logging_config']) create_engine(settings) ensure_oai_dc_exists() config = Configurator(settings=settings) config.include('pyramid_tm') config.include('pyramid_chameleon') config.add_route('oai', '/oai', request_method=('GET', 'POST')) config.scan() return config.make_wsgi_app()
Python
def master_schema(): """Return the master XSD schema. The master schema contains schemas for OAI-PMH responses and the OAI DC metadata format. Return ------ lxml.etree.XMLSchema The schema. """ global _the_schema if _the_schema is None: schema_path = os.path.join(__path__[0], 'master.xsd') _the_schema = etree.XMLSchema(file=schema_path) return _the_schema
def master_schema(): """Return the master XSD schema. The master schema contains schemas for OAI-PMH responses and the OAI DC metadata format. Return ------ lxml.etree.XMLSchema The schema. """ global _the_schema if _the_schema is None: schema_path = os.path.join(__path__[0], 'master.xsd') _the_schema = etree.XMLSchema(file=schema_path) return _the_schema
Python
def formats(self): """ List the available metadata formats. Return ------ dict from unicode to (unicode, unicode): Mapping from metadata prefixes to (namespace, schema location) tuples. """ # Only OAI DC is available from this provider. return { 'oai_dc': ('http://www.openarchives.org/OAI/2.0/oai_dc/', 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'), }
def formats(self): """ List the available metadata formats. Return ------ dict from unicode to (unicode, unicode): Mapping from metadata prefixes to (namespace, schema location) tuples. """ # Only OAI DC is available from this provider. return { 'oai_dc': ('http://www.openarchives.org/OAI/2.0/oai_dc/', 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'), }
Python
def has_changed(self, identifier, since): """ Check wheter the given item has been modified. Parameters ---------- identifier: unicode The OAI identifier (as returned by identifiers()) of the item. since: datetime.datetime Ignore modifications before this date/time. Return ------ bool: `True`, if metadata or sets of the item have change since the given time. Otherwise `False`. """ filename = self.get_filename(identifier) path = os.path.join(self.directory, filename) mtime = os.path.getmtime(path) ctime = os.path.getctime(path) datestamp = datetime.utcfromtimestamp(max(mtime, ctime)) return datestamp >= since
def has_changed(self, identifier, since): """ Check wheter the given item has been modified. Parameters ---------- identifier: unicode The OAI identifier (as returned by identifiers()) of the item. since: datetime.datetime Ignore modifications before this date/time. Return ------ bool: `True`, if metadata or sets of the item have change since the given time. Otherwise `False`. """ filename = self.get_filename(identifier) path = os.path.join(self.directory, filename) mtime = os.path.getmtime(path) ctime = os.path.getctime(path) datestamp = datetime.utcfromtimestamp(max(mtime, ctime)) return datestamp >= since
Python
def make_identifier(self, filename): """ Form an OAI identifier for the given file. """ # Remove the ".xml" extension from filename. filename = filename[len(self.directory) + 1:-4] return self.oai_identifier_prefix + filename
def make_identifier(self, filename): """ Form an OAI identifier for the given file. """ # Remove the ".xml" extension from filename. filename = filename[len(self.directory) + 1:-4] return self.oai_identifier_prefix + filename
Python
def add_field(name, text): """Add a field to the DC XML tree.""" if text is not None and len(text) > 0 and not text.isspace(): element = root.makeelement( '{{{dc}}}{name}'.format(name=name, **nsmap) ) element.text = text root.append(element)
def add_field(name, text): """Add a field to the DC XML tree.""" if text is not None and len(text) > 0 and not text.isspace(): element = root.makeelement( '{{{dc}}}{name}'.format(name=name, **nsmap) ) element.text = text root.append(element)
Python
def clean_oai_settings(settings): """Parse and validate OAI app settings in a dictionary. Check that the settings required by the OAI app are in the settings dictionary and have valid values. Convert them to correct types. Required settings are: admin_emails deleted_records item_list_limit logging_config repository_descriptions repository_name sqlalchemy.url Parameters ---------- settings: dict from str to str The settings dictionary. Raises ------ ConfigurationError: If some setting is missing or has an invalid value. """ cleaners = { 'admin_emails': _clean_admin_emails, 'deleted_records': _clean_deleted_records, 'item_list_limit': _clean_item_list_limit, 'logging_config': _clean_unicode, 'repository_descriptions': _load_repository_descriptions, 'repository_name': _clean_unicode, 'sqlalchemy.url': _clean_unicode, } _clean_settings(settings, cleaners)
def clean_oai_settings(settings): """Parse and validate OAI app settings in a dictionary. Check that the settings required by the OAI app are in the settings dictionary and have valid values. Convert them to correct types. Required settings are: admin_emails deleted_records item_list_limit logging_config repository_descriptions repository_name sqlalchemy.url Parameters ---------- settings: dict from str to str The settings dictionary. Raises ------ ConfigurationError: If some setting is missing or has an invalid value. """ cleaners = { 'admin_emails': _clean_admin_emails, 'deleted_records': _clean_deleted_records, 'item_list_limit': _clean_item_list_limit, 'logging_config': _clean_unicode, 'repository_descriptions': _load_repository_descriptions, 'repository_name': _clean_unicode, 'sqlalchemy.url': _clean_unicode, } _clean_settings(settings, cleaners)
Python
def clean_importer_settings(settings): """Parse and validate metadata importer settings in a dictionary. Check that the settings required by the metadata importer are in the settings dictionary and have valid values. Convert them to correct types. Required settings are: deleted_records dry_run force_update logging_config sqlalchemy.url timestamp_file metadata_provider_class metadata_provider_args Parameters ---------- settings: dict from str to str The settings dictionary. Raises ------ ConfigurationError: If some setting is missing or has an invalid value. """ cleaners = { 'deleted_records': _clean_deleted_records, 'dry_run': _clean_boolean, 'force_update': _clean_boolean, 'logging_config': _clean_unicode, 'sqlalchemy.url': _clean_unicode, 'timestamp_file': _clean_unicode, 'metadata_provider_args': _clean_unicode, 'metadata_provider_class': _clean_provider_class, } return _clean_settings(settings, cleaners)
def clean_importer_settings(settings): """Parse and validate metadata importer settings in a dictionary. Check that the settings required by the metadata importer are in the settings dictionary and have valid values. Convert them to correct types. Required settings are: deleted_records dry_run force_update logging_config sqlalchemy.url timestamp_file metadata_provider_class metadata_provider_args Parameters ---------- settings: dict from str to str The settings dictionary. Raises ------ ConfigurationError: If some setting is missing or has an invalid value. """ cleaners = { 'deleted_records': _clean_deleted_records, 'dry_run': _clean_boolean, 'force_update': _clean_boolean, 'logging_config': _clean_unicode, 'sqlalchemy.url': _clean_unicode, 'timestamp_file': _clean_unicode, 'metadata_provider_args': _clean_unicode, 'metadata_provider_class': _clean_provider_class, } return _clean_settings(settings, cleaners)
Python
def _clean_settings(settings, cleaners): """Check that settings are ok. The parameter `cleaners` is a dict from setting names to functions. Each cleaner function is called with the value of the corresponding setting. The cleaners should raise an exception if the value is invalid and otherwise return a cleaned value. The old value gets replaced by the cleaned value. Parameters ---------- settings: dict from str to str The settings dictionary. cleaners: dict from str to callable Mapping from setting names to cleaner functions. Raises ------ ConfigurationError: If any setting is missing or invalid. """ for name, func in cleaners.iteritems(): if name not in settings: raise ConfigurationError('missing setting {0}'.format(name)) try: cleaned = func(settings[name]) settings[name] = cleaned except Exception as error: raise ConfigurationError( 'invalid {0} setting: {1}'.format(name, error) )
def _clean_settings(settings, cleaners): """Check that settings are ok. The parameter `cleaners` is a dict from setting names to functions. Each cleaner function is called with the value of the corresponding setting. The cleaners should raise an exception if the value is invalid and otherwise return a cleaned value. The old value gets replaced by the cleaned value. Parameters ---------- settings: dict from str to str The settings dictionary. cleaners: dict from str to callable Mapping from setting names to cleaner functions. Raises ------ ConfigurationError: If any setting is missing or invalid. """ for name, func in cleaners.iteritems(): if name not in settings: raise ConfigurationError('missing setting {0}'.format(name)) try: cleaned = func(settings[name]) settings[name] = cleaned except Exception as error: raise ConfigurationError( 'invalid {0} setting: {1}'.format(name, error) )
Python
def _clean_admin_emails(value): """Check that the value is a list of valid email addresses.""" # email regex pattern defined in the OAI-PMH XML schema pattern = re.compile(r'^\S+@(\S+\.)+\S+$', flags=re.UNICODE) emails = _clean_unicode(value).split() if not emails: raise ValueError('no emails') for email in emails: if re.match(pattern, email) is None: raise ValueError( 'invalid email address: {0}' ''.format(repr(email)) ) return emails
def _clean_admin_emails(value): """Check that the value is a list of valid email addresses.""" # email regex pattern defined in the OAI-PMH XML schema pattern = re.compile(r'^\S+@(\S+\.)+\S+$', flags=re.UNICODE) emails = _clean_unicode(value).split() if not emails: raise ValueError('no emails') for email in emails: if re.match(pattern, email) is None: raise ValueError( 'invalid email address: {0}' ''.format(repr(email)) ) return emails
Python
def _clean_item_list_limit(value): """Check that value is a positive integer.""" int_value = int(value) if int_value <= 0: raise ValueError('item_list_limit must be positive') return int_value
def _clean_item_list_limit(value): """Check that value is a positive integer.""" int_value = int(value) if int_value <= 0: raise ValueError('item_list_limit must be positive') return int_value
Python
def _clean_unicode(value): """Return the value as a unicode.""" if isinstance(value, str): return value.decode('utf-8') else: return unicode(value)
def _clean_unicode(value): """Return the value as a unicode.""" if isinstance(value, str): return value.decode('utf-8') else: return unicode(value)
Python
def _clean_provider_class(value): """Split the value to module name and classname.""" modulename, classname = value.split(':') if len(modulename) == 0: raise ValueError('empty module name') if len(classname) == 0: raise ValueError('empty class name') return (modulename, classname)
def _clean_provider_class(value): """Split the value to module name and classname.""" modulename, classname = value.split(':') if len(modulename) == 0: raise ValueError('empty module name') if len(classname) == 0: raise ValueError('empty class name') return (modulename, classname)
Python
def _load_repository_descriptions(value): """Load XML fragments from files.""" def load_description(path): """Load a single description.""" with open(path, 'r') as file_: contents = file_.read() try: doc = etree.fromstring(contents.encode('utf-8')) except Exception as error: raise ValueError( 'ill-formed XML in repository description {0}: ' '{1}'.format(repr(path), error) ) xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance' if doc.get('{{{0}}}schemaLocation'.format(xsi_ns)) is None: raise ValueError('no schema location') return contents paths = value.split() return map(load_description, paths)
def _load_repository_descriptions(value): """Load XML fragments from files.""" def load_description(path): """Load a single description.""" with open(path, 'r') as file_: contents = file_.read() try: doc = etree.fromstring(contents.encode('utf-8')) except Exception as error: raise ValueError( 'ill-formed XML in repository description {0}: ' '{1}'.format(repr(path), error) ) xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance' if doc.get('{{{0}}}schemaLocation'.format(xsi_ns)) is None: raise ValueError('no schema location') return contents paths = value.split() return map(load_description, paths)
Python
def assert_emitted(self, message): """Verify captured log output. Assert that the given message is part of the captured log. Parameters ---------- message: str The expected log message. """ output = '\n'.join(self.messages) assert message in output, \ 'Message "{0}" not found in "{1}"'.format(message, output)
def assert_emitted(self, message): """Verify captured log output. Assert that the given message is part of the captured log. Parameters ---------- message: str The expected log message. """ output = '\n'.join(self.messages) assert message in output, \ 'Message "{0}" not found in "{1}"'.format(message, output)
Python
def minimal_params(self): """Return a multidict containing minimal request parameters that are needed to successfully call the view function. """ return MultiDict(verb=self.verb)
def minimal_params(self): """Return a multidict containing minimal request parameters that are needed to successfully call the view function. """ return MultiDict(verb=self.verb)
Python
def check_response(self, response, **kwargs): """Check that a response contains the expected values.""" self.assertTrue('time' in response) for key, value in kwargs.iteritems(): self.assertEqual(response[key], value)
def check_response(self, response, **kwargs): """Check that a response contains the expected values.""" self.assertTrue('time' in response) for key, value in kwargs.iteritems(): self.assertEqual(response[key], value)
Python
def check_token(self, response, token): """Check that a resumption token contains the expected values.""" attributes = ['verb', 'metadataPrefix', 'offset', 'from', 'until'] parsed = json.loads(response['token']) self.assertIn('date', parsed) for a in attributes: self.assertEqual(parsed[a], token[a])
def check_token(self, response, token): """Check that a resumption token contains the expected values.""" attributes = ['verb', 'metadataPrefix', 'offset', 'from', 'until'] parsed = json.loads(response['token']) self.assertIn('date', parsed) for a in attributes: self.assertEqual(parsed[a], token[a])
Python
def formats(self): """ List the available metadata formats. Return ------ dict from unicode to (unicode, unicode): Mapping from metadata prefixes to (namespace, schema location) tuples. """ # NOTE: The OAI DC format is required by the OAI-PMH specification. return { u'oai_dc': (u'http://www.openarchives.org/OAI/2.0/oai_dc/', u'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'), }
def formats(self): """ List the available metadata formats. Return ------ dict from unicode to (unicode, unicode): Mapping from metadata prefixes to (namespace, schema location) tuples. """ # NOTE: The OAI DC format is required by the OAI-PMH specification. return { u'oai_dc': (u'http://www.openarchives.org/OAI/2.0/oai_dc/', u'http://www.openarchives.org/OAI/2.0/oai_dc.xsd'), }
Python
def has_changed(self, identifier, since): """ Check wheter the given item has been modified. Parameters ---------- identifier: unicode The OAI identifier (as returned by identifiers()) of the item. since: datetime.datetime Ignore modifications before this date/time. Return ------ bool: `True`, if metadata or sets of the item have change since the given time. Otherwise `False`. """ return False
def has_changed(self, identifier, since): """ Check wheter the given item has been modified. Parameters ---------- identifier: unicode The OAI identifier (as returned by identifiers()) of the item. since: datetime.datetime Ignore modifications before this date/time. Return ------ bool: `True`, if metadata or sets of the item have change since the given time. Otherwise `False`. """ return False
Python
def read(self, address): """ Read a block of memory. """ tag = self.get_tag(address) set = self.get_set(address) line = None for candidate in set: if candidate.tag == tag and candidate.valid: line = candidate break return line.data if line else line
def read(self, address): """ Read a block of memory. """ tag = self.get_tag(address) set = self.get_set(address) line = None for candidate in set: if candidate.tag == tag and candidate.valid: line = candidate break return line.data if line else line
Python
def write(self, address, byte): """ Write a byte to cache. """ tag = self.get_tag(address) # Tag of cache line set = self.get_set(address) # Set of cache lines line = None # Search for cache line within set for candidate in set: if candidate.tag == tag and candidate.valid: line = candidate break # Update data of cache line if line: line.data[self.get_offset(address)] = byte line.modified = 1 return True if line else False
def write(self, address, byte): """ Write a byte to cache. """ tag = self.get_tag(address) # Tag of cache line set = self.get_set(address) # Set of cache lines line = None # Search for cache line within set for candidate in set: if candidate.tag == tag and candidate.valid: line = candidate break # Update data of cache line if line: line.data[self.get_offset(address)] = byte line.modified = 1 return True if line else False
Python
def load(self, address, data): """ Load a block of memory. """ tag = self.get_tag(address) set = self.get_set(address) victim_info = None victim = set[0] for index in range(len(set)): if set[index] < victim.use: victim = set[index] victim.use = 0 self.update_use(victim, set) if victim.modified: victim_info = (index, victim.data) victim.modified = 0 victim.valid = 1 victim.tag = tag victim.data = data return victim_info
def load(self, address, data): """ Load a block of memory. """ tag = self.get_tag(address) set = self.get_set(address) victim_info = None victim = set[0] for index in range(len(set)): if set[index] < victim.use: victim = set[index] victim.use = 0 self.update_use(victim, set) if victim.modified: victim_info = (index, victim.data) victim.modified = 0 victim.valid = 1 victim.tag = tag victim.data = data return victim_info
Python
def update_use(self, line, set): """ Update the use bits of a cache line. """ use = line.use if line.use < self.mapping: line.use = self.mapping for other in set: if other is not line and other.use > use: other.use -= 1
def update_use(self, line, set): """ Update the use bits of a cache line. """ use = line.use if line.use < self.mapping: line.use = self.mapping for other in set: if other is not line and other.use > use: other.use -= 1
Python
def assemble(filename): """ Takes a file and outputs binary instructions. """ try: ass_file = open(filename, 'r') except IOError: print ("There was an error reading ", file_name) sys.exit() for line in ass_file: for word in line.split(): print(word) if word in reg_map.keys(): instruction.append(reg_to_bin(word)) elif word in r_type_map.keys(): instruction.append(r_inst_to_bin(word)) elif word in i_type_map.keys(): instruction.append(i_inst_to_bin(word)) elif word in j_type_map.keys(): instruction.append(j_inst_to_bin(word)) else: if line.split()[0] in i_type_map.keys(): instruction.append(imm_to_bin(word)) elif line.split[0] in j_type_map.keys(): instruction.append(j_off_to_bin(word)) instruction_list.append(instruction) ass_file.close() return instruction_list
def assemble(filename): """ Takes a file and outputs binary instructions. """ try: ass_file = open(filename, 'r') except IOError: print ("There was an error reading ", file_name) sys.exit() for line in ass_file: for word in line.split(): print(word) if word in reg_map.keys(): instruction.append(reg_to_bin(word)) elif word in r_type_map.keys(): instruction.append(r_inst_to_bin(word)) elif word in i_type_map.keys(): instruction.append(i_inst_to_bin(word)) elif word in j_type_map.keys(): instruction.append(j_inst_to_bin(word)) else: if line.split()[0] in i_type_map.keys(): instruction.append(imm_to_bin(word)) elif line.split[0] in j_type_map.keys(): instruction.append(j_off_to_bin(word)) instruction_list.append(instruction) ass_file.close() return instruction_list
Python
def subscribe(cls, feed_url, callback, dispatch_uid=None): """Subscribes a callback to a feed to get notified of new entries. The susbscription is loaded and ready right away. Args: feed_url: the URL of the feed callback: a callable function which will be called when there are new entries Must be a function in a module or a classmethod. Do not use a staticmethod. dispatch_uid: A unique identifier for a signal receiver in cases where duplicate signals may be sent. See Preventing duplicate signals for more information in Django documentation. Returns: A Boolean to tell whether something went wrong. """ log_desc = '%s - Subscribing to %s' % (cls.log_desc, feed_url) # Get or create the Feed f, created = models.Feed.objects.get_or_create(url=feed_url) callback = models.Subscription.prepare_callback(callback) dispatch_uid = models.Subscription.prepare_dispatch_uid(dispatch_uid, callback) try: # Get or create the subscription sub, sub_created = models.Subscription.objects.get_or_create( feed=f, callback=callback, dispatch_uid=dispatch_uid ) if sub_created: logger.info('%s => <Subscription: %s> created' % (log_desc, sub)) # Load it sub.load() return True except Exception as e: logger.error('%s => Cannot get or create a Subscription: callback=%s (dispatch_uid=%s) [KO]\n%s' % ( log_desc, callback, dispatch_uid, e ) ) return False
def subscribe(cls, feed_url, callback, dispatch_uid=None): """Subscribes a callback to a feed to get notified of new entries. The susbscription is loaded and ready right away. Args: feed_url: the URL of the feed callback: a callable function which will be called when there are new entries Must be a function in a module or a classmethod. Do not use a staticmethod. dispatch_uid: A unique identifier for a signal receiver in cases where duplicate signals may be sent. See Preventing duplicate signals for more information in Django documentation. Returns: A Boolean to tell whether something went wrong. """ log_desc = '%s - Subscribing to %s' % (cls.log_desc, feed_url) # Get or create the Feed f, created = models.Feed.objects.get_or_create(url=feed_url) callback = models.Subscription.prepare_callback(callback) dispatch_uid = models.Subscription.prepare_dispatch_uid(dispatch_uid, callback) try: # Get or create the subscription sub, sub_created = models.Subscription.objects.get_or_create( feed=f, callback=callback, dispatch_uid=dispatch_uid ) if sub_created: logger.info('%s => <Subscription: %s> created' % (log_desc, sub)) # Load it sub.load() return True except Exception as e: logger.error('%s => Cannot get or create a Subscription: callback=%s (dispatch_uid=%s) [KO]\n%s' % ( log_desc, callback, dispatch_uid, e ) ) return False
Python
def unsubscribe(cls, feed_url, callback, dispatch_uid=None): """Unsubscribes a callback to a Feed to not be notified anymore about new entries. Args: feed_url: the URL of the feed callback: a callable function which will be called when there are new entries dispatch_uid: A unique identifier for a signal receiver in cases where duplicate signals may be sent. See Preventing duplicate signals for more information in Django documentation. Returns: A Boolean to tell whether something went wrong. """ log_desc = '%s - Unsubscribing to %s' % (cls.log_desc, feed_url) callback = models.Subscription.prepare_callback(callback) dispatch_uid = models.Subscription.prepare_dispatch_uid(dispatch_uid, callback) try: # Get the subscription sub = models.Subscription.objects.get( feed__url=feed_url, callback=callback, dispatch_uid=dispatch_uid ) # Delete it sub.delete() logger.info('%s => <Subscription: %s> deleted' % (log_desc, sub)) return True except ObjectDoesNotExist: pass except Exception as e: logger.error('%s => Subscription cannot be deleted: callback=%s (dispatch_uid=%s) [KO]\n%s' % ( log_desc, callback, dispatch_uid, e ) ) return False
def unsubscribe(cls, feed_url, callback, dispatch_uid=None): """Unsubscribes a callback to a Feed to not be notified anymore about new entries. Args: feed_url: the URL of the feed callback: a callable function which will be called when there are new entries dispatch_uid: A unique identifier for a signal receiver in cases where duplicate signals may be sent. See Preventing duplicate signals for more information in Django documentation. Returns: A Boolean to tell whether something went wrong. """ log_desc = '%s - Unsubscribing to %s' % (cls.log_desc, feed_url) callback = models.Subscription.prepare_callback(callback) dispatch_uid = models.Subscription.prepare_dispatch_uid(dispatch_uid, callback) try: # Get the subscription sub = models.Subscription.objects.get( feed__url=feed_url, callback=callback, dispatch_uid=dispatch_uid ) # Delete it sub.delete() logger.info('%s => <Subscription: %s> deleted' % (log_desc, sub)) return True except ObjectDoesNotExist: pass except Exception as e: logger.error('%s => Subscription cannot be deleted: callback=%s (dispatch_uid=%s) [KO]\n%s' % ( log_desc, callback, dispatch_uid, e ) ) return False
Python
def fetch_collection(cls, feeds, prefix_log): """Fetches a collection of Feed. Args: feeds: the collection of Feed to fetch prefix_log: a prefix to use in the log to know who called it Returns: The time elapsed in seconds. """ start = timezone.now() log_desc = '%s - Fetching %s Feeds' % (prefix_log, feeds.count()) logger.info('%s => start' % (log_desc,)) for feed in feeds: try: feed.fetch() except Exception as err: logger.error('%s - Fetching => [KO]\n%s' % (feed.log_desc, err)) delta = timezone.now() - start logger.info('%s in %ss => end' % (log_desc, delta.total_seconds())) return delta
def fetch_collection(cls, feeds, prefix_log): """Fetches a collection of Feed. Args: feeds: the collection of Feed to fetch prefix_log: a prefix to use in the log to know who called it Returns: The time elapsed in seconds. """ start = timezone.now() log_desc = '%s - Fetching %s Feeds' % (prefix_log, feeds.count()) logger.info('%s => start' % (log_desc,)) for feed in feeds: try: feed.fetch() except Exception as err: logger.error('%s - Fetching => [KO]\n%s' % (feed.log_desc, err)) delta = timezone.now() - start logger.info('%s in %ss => end' % (log_desc, delta.total_seconds())) return delta
Python
def fetch(self): """Fetches a Feed and creates the new entries. A Fetch status report is also created.""" data = etag = status_code = entries = None status = FetchStatus(feed=self) status.timestamp_start = timezone.now() status.save() # Important to save it here because we need an ID try: # Get content data, etag, status_code = http.get_content( url=self.url, etag=self.etag, use_http_compression=USE_HTTP_COMPRESSION, return_etag=True, return_status_code=True ) except Exception as e: logger.append_msg('Error while getting the content.\n%s' % (e,)) status.http_status_code = status_code if status_code != 200 and status_code != 304: logger.append_msg('HTTP Status code = %s != 200 or 304.' % (status_code,)) elif status_code == 200: # There is data to parse status.size_bytes = len(data) try: # Parse the xml and get the entries entries = self._get_entries(data) except Exception as e: logger.append_msg('Feed cannot be parsed.\n%s' % (e, )) if not entries: logger.append_msg('No entries found.') else: # There are entries to parse status.nb_entries = len(entries) new_entries = [] # Get all the existing uid hash to compare # Not very efficient but OK for now # Later, assumes that taking the X (TBD) last entries is sufficient existing_entries_uid_hash = [v for v in self.entry_set.values_list('uid_hash', flat=True)] # Use of list comprehension because values_list returns a ValuesListQuerySet which does not have an append attribute. # Foreach entry, check whether it must be saved for i, entry in enumerate(entries): uid = self.make_uid(entry) if not uid: logger.append_msg('Entry #%s: UID cannot be made.' % (i, )) elif uid not in existing_entries_uid_hash: try: e_xml = etree.tostring(entry, encoding=unicode) new_entry = self.entry_set.create(fetch_status=status, xml=e_xml, uid_hash=uid) # Do not use bulk_create because the size of the requests can be too big and leads to an error! new_entries.append(new_entry) existing_entries_uid_hash.append(uid) except Exception as err: logger.append_msg('Entry #%s cannot be parsed.\n%s' % (i, err)) status.nb_new_entries = len(new_entries) if new_entries: try: Subscription.notify(self, new_entries) except Exception as err: logger.append_msg('New entries cannot be notified to the subscribers.\n%s' % (err,)) if etag: self.etag = etag self.save() status.timestamp_end = timezone.now() # Log log_desc = '%s - Fetching' % (self.log_desc,) error_msg = logger.flush_messages() if error_msg: # Store the file if it has been downloaded if data: error_msg += '\n' + logger.store(data, self.url, status.timestamp_start) status.error_msg = error_msg logger.error(log_desc + '\n' + error_msg) else: if status_code == 304: logger.info('%s => 304 Feed not modified.' % (log_desc,)) else: delta = status.timestamp_end - status.timestamp_start logger.info('%s => %s bytes fetched in %ss. %s new entries out of %s.' % ( log_desc, status.size_bytes, delta.total_seconds(), status.nb_new_entries, status.nb_entries )) status.save() # At the end to save all changes return error_msg == ''
def fetch(self): """Fetches a Feed and creates the new entries. A Fetch status report is also created.""" data = etag = status_code = entries = None status = FetchStatus(feed=self) status.timestamp_start = timezone.now() status.save() # Important to save it here because we need an ID try: # Get content data, etag, status_code = http.get_content( url=self.url, etag=self.etag, use_http_compression=USE_HTTP_COMPRESSION, return_etag=True, return_status_code=True ) except Exception as e: logger.append_msg('Error while getting the content.\n%s' % (e,)) status.http_status_code = status_code if status_code != 200 and status_code != 304: logger.append_msg('HTTP Status code = %s != 200 or 304.' % (status_code,)) elif status_code == 200: # There is data to parse status.size_bytes = len(data) try: # Parse the xml and get the entries entries = self._get_entries(data) except Exception as e: logger.append_msg('Feed cannot be parsed.\n%s' % (e, )) if not entries: logger.append_msg('No entries found.') else: # There are entries to parse status.nb_entries = len(entries) new_entries = [] # Get all the existing uid hash to compare # Not very efficient but OK for now # Later, assumes that taking the X (TBD) last entries is sufficient existing_entries_uid_hash = [v for v in self.entry_set.values_list('uid_hash', flat=True)] # Use of list comprehension because values_list returns a ValuesListQuerySet which does not have an append attribute. # Foreach entry, check whether it must be saved for i, entry in enumerate(entries): uid = self.make_uid(entry) if not uid: logger.append_msg('Entry #%s: UID cannot be made.' % (i, )) elif uid not in existing_entries_uid_hash: try: e_xml = etree.tostring(entry, encoding=unicode) new_entry = self.entry_set.create(fetch_status=status, xml=e_xml, uid_hash=uid) # Do not use bulk_create because the size of the requests can be too big and leads to an error! new_entries.append(new_entry) existing_entries_uid_hash.append(uid) except Exception as err: logger.append_msg('Entry #%s cannot be parsed.\n%s' % (i, err)) status.nb_new_entries = len(new_entries) if new_entries: try: Subscription.notify(self, new_entries) except Exception as err: logger.append_msg('New entries cannot be notified to the subscribers.\n%s' % (err,)) if etag: self.etag = etag self.save() status.timestamp_end = timezone.now() # Log log_desc = '%s - Fetching' % (self.log_desc,) error_msg = logger.flush_messages() if error_msg: # Store the file if it has been downloaded if data: error_msg += '\n' + logger.store(data, self.url, status.timestamp_start) status.error_msg = error_msg logger.error(log_desc + '\n' + error_msg) else: if status_code == 304: logger.info('%s => 304 Feed not modified.' % (log_desc,)) else: delta = status.timestamp_end - status.timestamp_start logger.info('%s => %s bytes fetched in %ss. %s new entries out of %s.' % ( log_desc, status.size_bytes, delta.total_seconds(), status.nb_new_entries, status.nb_entries )) status.save() # At the end to save all changes return error_msg == ''
Python
def _get_entry_id(cls, entry): """Get the ID of an entry.""" for k, v in FEED_FORMAT.items(): try: id = entry.xpath(v['id'])[0].text if id: return id except: pass return None
def _get_entry_id(cls, entry): """Get the ID of an entry.""" for k, v in FEED_FORMAT.items(): try: id = entry.xpath(v['id'])[0].text if id: return id except: pass return None
Python
def make_uid(cls, entry): """Make a suitable uid for the storage.""" uid = cls._get_entry_id(entry) if uid: return cls.calc_hash(uid) return None
def make_uid(cls, entry): """Make a suitable uid for the storage.""" uid = cls._get_entry_id(entry) if uid: return cls.calc_hash(uid) return None
Python
def save(self, *args, **kwargs): """Overrides the save method. Converts the callback and ensures a dispatch_uid is used.""" self.callback = self.prepare_callback(self.callback) self.dispatch_uid = self.prepare_dispatch_uid(self.dispatch_uid, self.callback) super(Subscription, self).save(*args, **kwargs)
def save(self, *args, **kwargs): """Overrides the save method. Converts the callback and ensures a dispatch_uid is used.""" self.callback = self.prepare_callback(self.callback) self.dispatch_uid = self.prepare_dispatch_uid(self.dispatch_uid, self.callback) super(Subscription, self).save(*args, **kwargs)
Python
def prepare_callback(cls, callback): """Prepares a callback to be stored in the DB. i.e. converts it to a string. Returns: A string. """ if callable(callback): callback = serialize_function(callback) return callback
def prepare_callback(cls, callback): """Prepares a callback to be stored in the DB. i.e. converts it to a string. Returns: A string. """ if callable(callback): callback = serialize_function(callback) return callback
Python
def receiver_exist(receiver, signal, dispatch_uid): """Code adapted from Django code to test whether a receiver already exists.""" if dispatch_uid: lookup_key = (dispatch_uid, _make_id(None)) # _make_id(sender) not use of sender else: lookup_key = (_make_id(receiver), _make_id(None)) signal.lock.acquire() try: for r_key, _ in signal.receivers: if r_key == lookup_key: return True finally: signal.lock.release() return False
def receiver_exist(receiver, signal, dispatch_uid): """Code adapted from Django code to test whether a receiver already exists.""" if dispatch_uid: lookup_key = (dispatch_uid, _make_id(None)) # _make_id(sender) not use of sender else: lookup_key = (_make_id(receiver), _make_id(None)) signal.lock.acquire() try: for r_key, _ in signal.receivers: if r_key == lookup_key: return True finally: signal.lock.release() return False
Python
def new_entries_connect(feed, callback, dispatch_uid): """Connects a callback to a feed only if it is not connected already. Returns: A boolen saying if it has been newly connected. """ LOCK.acquire() try: if not feed.pk in FEED_NEW_ENTRIES_SIGNALS: FEED_NEW_ENTRIES_SIGNALS[feed.pk] = make_new_entries_signal() if not receiver_exist(callback, FEED_NEW_ENTRIES_SIGNALS[feed.pk], dispatch_uid): FEED_NEW_ENTRIES_SIGNALS[feed.pk].connect(callback, dispatch_uid=dispatch_uid) return True finally: LOCK.release() return False
def new_entries_connect(feed, callback, dispatch_uid): """Connects a callback to a feed only if it is not connected already. Returns: A boolen saying if it has been newly connected. """ LOCK.acquire() try: if not feed.pk in FEED_NEW_ENTRIES_SIGNALS: FEED_NEW_ENTRIES_SIGNALS[feed.pk] = make_new_entries_signal() if not receiver_exist(callback, FEED_NEW_ENTRIES_SIGNALS[feed.pk], dispatch_uid): FEED_NEW_ENTRIES_SIGNALS[feed.pk].connect(callback, dispatch_uid=dispatch_uid) return True finally: LOCK.release() return False
Python
def new_entries_disconnect(feed, callback, dispatch_uid): """Disconnects a callback to a feed only if it is already connected. Returns: A boolen saying if it was connected and has been disconnected. """ LOCK.acquire() try: if feed.pk in FEED_NEW_ENTRIES_SIGNALS and receiver_exist(callback, FEED_NEW_ENTRIES_SIGNALS[feed.pk], dispatch_uid): FEED_NEW_ENTRIES_SIGNALS[feed.pk].disconnect(callback, dispatch_uid=dispatch_uid) return True finally: LOCK.release() return False
def new_entries_disconnect(feed, callback, dispatch_uid): """Disconnects a callback to a feed only if it is already connected. Returns: A boolen saying if it was connected and has been disconnected. """ LOCK.acquire() try: if feed.pk in FEED_NEW_ENTRIES_SIGNALS and receiver_exist(callback, FEED_NEW_ENTRIES_SIGNALS[feed.pk], dispatch_uid): FEED_NEW_ENTRIES_SIGNALS[feed.pk].disconnect(callback, dispatch_uid=dispatch_uid) return True finally: LOCK.release() return False
Python
def new_entries_send(feed, new_entries): """Sends notifications to the receivers of the new entries signals. Uses send_robust to ensure all receivers are notified of the signal. Returns: A list of tuple pairs [(receiver, response), ... ], representing the list of called receiver functions and their response values. See the Django documentation about signals for further information. """ if feed.pk in FEED_NEW_ENTRIES_SIGNALS: return FEED_NEW_ENTRIES_SIGNALS[feed.pk].send_robust(sender=SENDER, feed_url=feed.url, new_entries=new_entries)
def new_entries_send(feed, new_entries): """Sends notifications to the receivers of the new entries signals. Uses send_robust to ensure all receivers are notified of the signal. Returns: A list of tuple pairs [(receiver, response), ... ], representing the list of called receiver functions and their response values. See the Django documentation about signals for further information. """ if feed.pk in FEED_NEW_ENTRIES_SIGNALS: return FEED_NEW_ENTRIES_SIGNALS[feed.pk].send_robust(sender=SENDER, feed_url=feed.url, new_entries=new_entries)
Python
def _return(*args): """Selects which value to return.""" to_return = () for arg in args: cond, value = arg if cond: to_return += (value,) if len(to_return) == 1: return to_return[0] return to_return
def _return(*args): """Selects which value to return.""" to_return = () for arg in args: cond, value = arg if cond: to_return += (value,) if len(to_return) == 1: return to_return[0] return to_return
Python
def combinations(nums): """Get all combinations of numbers in num""" combos = [] if len(nums) == 2: return [[nums[0], nums[1]], [nums[1], nums[0]]] else: for num in nums: nums2 = nums[:] nums2.remove(num) combos.extend([x + [num] for x in combinations(nums2)]) return combos
def combinations(nums): """Get all combinations of numbers in num""" combos = [] if len(nums) == 2: return [[nums[0], nums[1]], [nums[1], nums[0]]] else: for num in nums: nums2 = nums[:] nums2.remove(num) combos.extend([x + [num] for x in combinations(nums2)]) return combos
Python
def add_solution(self, remaining=0): """Add a solution to the solutions. If numbers are missing add it to the partial solutions. Return `True` if a new solution was added. """ new_solution = False m = hashlib.md5() m.update(str(self.solution.values())) if m.hexdigest() not in self.solutions: if remaining > 0: self.partial[m.hexdigest()] = remaining self.solutions[m.hexdigest()] = self.solution.copy() new_solution = True return new_solution
def add_solution(self, remaining=0): """Add a solution to the solutions. If numbers are missing add it to the partial solutions. Return `True` if a new solution was added. """ new_solution = False m = hashlib.md5() m.update(str(self.solution.values())) if m.hexdigest() not in self.solutions: if remaining > 0: self.partial[m.hexdigest()] = remaining self.solutions[m.hexdigest()] = self.solution.copy() new_solution = True return new_solution
Python
def _get_found(self): """Return the digits already found.""" found_digits = [] for coord in self.coords: if coord in self.solver.solution: found_digits.append(self.solver.solution[coord]) return found_digits
def _get_found(self): """Return the digits already found.""" found_digits = [] for coord in self.coords: if coord in self.solver.solution: found_digits.append(self.solver.solution[coord]) return found_digits
Python
def undo(self, guess_coord): """Undo a change to the solution if a previously placed digit prevents a solution from being found. """ coord = None while coord != guess_coord: coord = self.coord_changes.pop() value = self.solver.solution[coord] del self.solver.solution[coord]
def undo(self, guess_coord): """Undo a change to the solution if a previously placed digit prevents a solution from being found. """ coord = None while coord != guess_coord: coord = self.coord_changes.pop() value = self.solver.solution[coord] del self.solver.solution[coord]
Python
def fill_cells(self, test=False): """Fill in numbers into the run and return the number of digits found. """ self.digit_coords = {x: set() for x in range(1, 10)} found = self._get_found() if len(found) != len(set(found)): return -1 digits1, digits2 = self.get_digits() filled_count = 0 for coord in self.coords: if coord not in self.solver.solution: digits3, digits4 = self.intersect[coord].get_digits() common = digits3 & digits1 if len(common) == 1: found = common.pop() logging.debug("Found: %s %s" % (coord, found)) self.add_found(coord, found, test) if found in digits2: digits2.remove(found) filled_count += 1 elif len(common) == 0: return -1 for digit in common: self.digit_coords[digit].add(coord) if test and filled_count != 0 and self.intersect[coord].fill_cells(test) == -1: return -1 filled_count += self._fill_unique(digits2) return filled_count
def fill_cells(self, test=False): """Fill in numbers into the run and return the number of digits found. """ self.digit_coords = {x: set() for x in range(1, 10)} found = self._get_found() if len(found) != len(set(found)): return -1 digits1, digits2 = self.get_digits() filled_count = 0 for coord in self.coords: if coord not in self.solver.solution: digits3, digits4 = self.intersect[coord].get_digits() common = digits3 & digits1 if len(common) == 1: found = common.pop() logging.debug("Found: %s %s" % (coord, found)) self.add_found(coord, found, test) if found in digits2: digits2.remove(found) filled_count += 1 elif len(common) == 0: return -1 for digit in common: self.digit_coords[digit].add(coord) if test and filled_count != 0 and self.intersect[coord].fill_cells(test) == -1: return -1 filled_count += self._fill_unique(digits2) return filled_count
Python
def _create_buttons(self): """ For each _xxx_button attribute, if it has been set to a pin number, then overwrite the attribute with a Button object that connects to that pin number """ for button_name in BUTTON_NAMES: button_attr_name = "_{}_button".format(button_name) pin_number = getattr(self, button_attr_name) if pin_number is not None: button = Button(_BUTTON_TO_BCM_LOOKUP[pin_number]) button.when_pressed = self._led_feedback_wrapper( getattr(self, "_{}".format(button_name)) ) setattr(self, button_attr_name, button) # To mute, the "volume down" button is held if self._volume_down_button: self._volume_down_button.when_held = self._mute
def _create_buttons(self): """ For each _xxx_button attribute, if it has been set to a pin number, then overwrite the attribute with a Button object that connects to that pin number """ for button_name in BUTTON_NAMES: button_attr_name = "_{}_button".format(button_name) pin_number = getattr(self, button_attr_name) if pin_number is not None: button = Button(_BUTTON_TO_BCM_LOOKUP[pin_number]) button.when_pressed = self._led_feedback_wrapper( getattr(self, "_{}".format(button_name)) ) setattr(self, button_attr_name, button) # To mute, the "volume down" button is held if self._volume_down_button: self._volume_down_button.when_held = self._mute
Python
def aligned_buf(buf, alignment): """Return an aligned buffer Given a buffer, return a memory view within that buffer, which starts at an aligned address in RAM. The returned memory view is possibly smaller. !! You must keep a reference to the original buffer to prevent the garbage collector from collecting the aligned view! Arguments: buf -- An object that implements buffer protocol alignment -- Integer value """ p = lv.C_Pointer() p.ptr_val = buf mod = p.uint_val % alignment offset = alignment - mod if mod != 0 else 0 if len(buf) <= offset: return None p.uint_val += offset return p.ptr_val.__dereference__(len(buf) - offset)
def aligned_buf(buf, alignment): """Return an aligned buffer Given a buffer, return a memory view within that buffer, which starts at an aligned address in RAM. The returned memory view is possibly smaller. !! You must keep a reference to the original buffer to prevent the garbage collector from collecting the aligned view! Arguments: buf -- An object that implements buffer protocol alignment -- Integer value """ p = lv.C_Pointer() p.ptr_val = buf mod = p.uint_val % alignment offset = alignment - mod if mod != 0 else 0 if len(buf) <= offset: return None p.uint_val += offset return p.ptr_val.__dereference__(len(buf) - offset)
Python
def save_jobs_to_pickle(context): """ Pickles all active jobs into JOBS_FILE. """ if isinstance(context, CallbackContext): jobs = context.job.context.jobs() else: jobs = context.jobs() with open(JOBS_FILE, "wb") as f: for job in jobs: if job.name != 'save_job': data = tuple(getattr(job, attr) for attr in JOB_DATA) pickle.dump(data, f)
def save_jobs_to_pickle(context): """ Pickles all active jobs into JOBS_FILE. """ if isinstance(context, CallbackContext): jobs = context.job.context.jobs() else: jobs = context.jobs() with open(JOBS_FILE, "wb") as f: for job in jobs: if job.name != 'save_job': data = tuple(getattr(job, attr) for attr in JOB_DATA) pickle.dump(data, f)
Python
def load_jobs_from_pickle(queue: JobQueue): """ Unpickles jobs from JOBS_FILE and creates new jobs with given arguments. """ with open(JOBS_FILE, 'rb') as f: while 1: try: data = pickle.load(f) args = [x for x in data] if args[3].endswith("-once"): queue.run_once(data[0], data[1], context=data[2], name=data[3]) else: queue.run_daily(data[0], data[1], context=data[2], name=data[3]) except EOFError: break
def load_jobs_from_pickle(queue: JobQueue): """ Unpickles jobs from JOBS_FILE and creates new jobs with given arguments. """ with open(JOBS_FILE, 'rb') as f: while 1: try: data = pickle.load(f) args = [x for x in data] if args[3].endswith("-once"): queue.run_once(data[0], data[1], context=data[2], name=data[3]) else: queue.run_daily(data[0], data[1], context=data[2], name=data[3]) except EOFError: break
Python
def start(update: Update, context: CallbackContext): """ Sends a message when the /start command is used. """ context.user_data.update({'chat_id': update.message.chat_id}) context.user_data.update({'timezone': pytz.UTC}) update.message.reply_text("Hi! I'm RemindPy, a bot to help you keep track of things.") update.message.reply_text("First, use /set_timezone to, well, set your timezone!") update.message.reply_text("After that, just use /set_time to set the time of your daily reminder " + "and /add to add new reminders. \nYou can check out other commands with /help")
def start(update: Update, context: CallbackContext): """ Sends a message when the /start command is used. """ context.user_data.update({'chat_id': update.message.chat_id}) context.user_data.update({'timezone': pytz.UTC}) update.message.reply_text("Hi! I'm RemindPy, a bot to help you keep track of things.") update.message.reply_text("First, use /set_timezone to, well, set your timezone!") update.message.reply_text("After that, just use /set_time to set the time of your daily reminder " + "and /add to add new reminders. \nYou can check out other commands with /help")
Python
def remove_job_if_exists(name: str, context: CallbackContext): """ Removes a job with a certain name if it exists. """ current_jobs = context.job_queue.get_jobs_by_name(name) if not current_jobs: return False for job in current_jobs: job.schedule_removal() return True
def remove_job_if_exists(name: str, context: CallbackContext): """ Removes a job with a certain name if it exists. """ current_jobs = context.job_queue.get_jobs_by_name(name) if not current_jobs: return False for job in current_jobs: job.schedule_removal() return True
Python
def check_validity_of_time_string(time_str: str): """ Checks if the string is a valid HH:MM string. """ try: datetime.strptime(time_str, "%H:%M") return True except ValueError: return False
def check_validity_of_time_string(time_str: str): """ Checks if the string is a valid HH:MM string. """ try: datetime.strptime(time_str, "%H:%M") return True except ValueError: return False