language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def sortColors(self, nums: List[int]) -> None: """ Do not return anything, modify nums in-place instead. """ # Time complexity: O(N) d = {0:0, 1:0, 2:0} for num in nums: d[num] += 1 for i in range(d[0]): nums[i] = 0 for i in range(d[0], d[0] + d[1]): nums[i] = 1 for i in range((d[0] + d[1]), d[0] + d[1] + d[2]): nums[i] = 2
def sortColors(self, nums: List[int]) -> None: """ Do not return anything, modify nums in-place instead. """ # Time complexity: O(N) d = {0:0, 1:0, 2:0} for num in nums: d[num] += 1 for i in range(d[0]): nums[i] = 0 for i in range(d[0], d[0] + d[1]): nums[i] = 1 for i in range((d[0] + d[1]), d[0] + d[1] + d[2]): nums[i] = 2
Python
def reverseString(self, s: List[str]) -> None: """ Do not return anything, modify s in-place instead. """ s[::] = s[::-1]
def reverseString(self, s: List[str]) -> None: """ Do not return anything, modify s in-place instead. """ s[::] = s[::-1]
Python
def parse(s): """ Parse human specific timedelta and return :class:`datetime.timedelta` object """ td = {"days": 0.0, "hours": 0.0, "minutes": 0.0, "seconds": 0.0} for m in _rex.finditer(s): for k in td.keys(): v = m.group(k) if v is not None: td[k] = float(v) return datetime.timedelta(days=td["days"], seconds=td["seconds"], minutes=td["minutes"], hours=td["hours"])
def parse(s): """ Parse human specific timedelta and return :class:`datetime.timedelta` object """ td = {"days": 0.0, "hours": 0.0, "minutes": 0.0, "seconds": 0.0} for m in _rex.finditer(s): for k in td.keys(): v = m.group(k) if v is not None: td[k] = float(v) return datetime.timedelta(days=td["days"], seconds=td["seconds"], minutes=td["minutes"], hours=td["hours"])
Python
def k_nearest_neighbors(training_feature_vector, testInstance, k): """ Returns the k-amount nearest neighbors (and their distance) around the color value of the test-image. If k is set to '3', three possible clusters that are close to the value of the test-image will be returned, in an array. Keyword arguments: training_feature_vector -- vector file of the test image testInstance -- specific value at position 'x' in the training dataset array k -- K-Means amount of clusters Return variables: neighbors (int, DEFAULT 0) """ distances = [] length = len(testInstance) for x in range(len(training_feature_vector)): dist = calculate_euclideandistance(testInstance, training_feature_vector[x], length) distances.append((training_feature_vector[x], dist)) distances.sort(key=operator.itemgetter(1)) neighbors = [] for x in range(k): neighbors.append(distances[x][0]) return neighbors
def k_nearest_neighbors(training_feature_vector, testInstance, k): """ Returns the k-amount nearest neighbors (and their distance) around the color value of the test-image. If k is set to '3', three possible clusters that are close to the value of the test-image will be returned, in an array. Keyword arguments: training_feature_vector -- vector file of the test image testInstance -- specific value at position 'x' in the training dataset array k -- K-Means amount of clusters Return variables: neighbors (int, DEFAULT 0) """ distances = [] length = len(testInstance) for x in range(len(training_feature_vector)): dist = calculate_euclideandistance(testInstance, training_feature_vector[x], length) distances.append((training_feature_vector[x], dist)) distances.sort(key=operator.itemgetter(1)) neighbors = [] for x in range(k): neighbors.append(distances[x][0]) return neighbors
Python
def response_of_neighbors(neighbors): """ Returns the amount votes of each neighbour around the color, presented in the test-image. This definition is called by 'knn_classifiermain', but uses argument 'neighbors' from 'k_nearest_neighbors' (in order to count the nearest neighbors, ofcourse...) Keyword arguments: neighbors -- amount of neighbors around the test feature cluster centroid Return variables: sortedVotes[0][0] (int, DEFAULT 0) """ all_possible_neighbors = {} for x in range(len(neighbors)): response = neighbors[x][-1] if response in all_possible_neighbors: all_possible_neighbors[response] += 1 else: all_possible_neighbors[response] = 1 sortedVotes = sorted(all_possible_neighbors.items(), key=operator.itemgetter(1), reverse=True) return sortedVotes[0][0]
def response_of_neighbors(neighbors): """ Returns the amount votes of each neighbour around the color, presented in the test-image. This definition is called by 'knn_classifiermain', but uses argument 'neighbors' from 'k_nearest_neighbors' (in order to count the nearest neighbors, ofcourse...) Keyword arguments: neighbors -- amount of neighbors around the test feature cluster centroid Return variables: sortedVotes[0][0] (int, DEFAULT 0) """ all_possible_neighbors = {} for x in range(len(neighbors)): response = neighbors[x][-1] if response in all_possible_neighbors: all_possible_neighbors[response] += 1 else: all_possible_neighbors[response] = 1 sortedVotes = sorted(all_possible_neighbors.items(), key=operator.itemgetter(1), reverse=True) return sortedVotes[0][0]
Python
def load_dataset(filename, filename2, training_feature_vector=[], test_feature_vector=[]): """ Converts the test.data and training.data files to .csv vector files, in order to load training feature vectors and a test feature vector. Keyword arguments: filename -- training .csv file filename2 -- test_feature .csv file training_feature_vector=[] -- empty array for training_feature_vector data test_feature_vector=[] -- empty array for test_feature_vector data Return variables: none """ with open(filename) as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for x in range(len(dataset)): for y in range(3): dataset[x][y] = float(dataset[x][y]) training_feature_vector.append(dataset[x]) with open(filename2) as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for x in range(len(dataset)): for y in range(3): dataset[x][y] = float(dataset[x][y]) test_feature_vector.append(dataset[x])
def load_dataset(filename, filename2, training_feature_vector=[], test_feature_vector=[]): """ Converts the test.data and training.data files to .csv vector files, in order to load training feature vectors and a test feature vector. Keyword arguments: filename -- training .csv file filename2 -- test_feature .csv file training_feature_vector=[] -- empty array for training_feature_vector data test_feature_vector=[] -- empty array for test_feature_vector data Return variables: none """ with open(filename) as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for x in range(len(dataset)): for y in range(3): dataset[x][y] = float(dataset[x][y]) training_feature_vector.append(dataset[x]) with open(filename2) as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for x in range(len(dataset)): for y in range(3): dataset[x][y] = float(dataset[x][y]) test_feature_vector.append(dataset[x])
Python
def color_histogram_of_training_image(img_name): """ Creates a .data training file that contains color-cluster data. This definition is called by 'training'. Keyword arguments: img_name -- image file from the training data collection Return variables: none """ # detect image color by using image file name to label training data if 'red' in img_name: data_source = 'red' elif 'yellow' in img_name: data_source = 'yellow' elif 'green' in img_name: data_source = 'green' elif 'orange' in img_name: data_source = 'orange' elif 'white' in img_name: data_source = 'white' elif 'black' in img_name: data_source = 'black' elif 'blue' in img_name: data_source = 'blue' elif 'violet' in img_name: data_source = 'violet' # load the image image = cv2.imread(img_name) chans = cv2.split(image) colors = ('b', 'g', 'r') features = [] feature_data = '' counter = 0 for (chan, color) in zip(chans, colors): counter = counter + 1 hist = cv2.calcHist([chan], [0], None, [256], [0, 256]) features.extend(hist) # find the peak pixel values for R, G, and B elem = np.argmax(hist) if counter == 1: blue = str(elem) elif counter == 2: green = str(elem) elif counter == 3: red = str(elem) feature_data = red + ',' + green + ',' + blue with open('../data/color_recognition/training.data', 'a') as myfile: myfile.write(feature_data + ',' + data_source + '\n')
def color_histogram_of_training_image(img_name): """ Creates a .data training file that contains color-cluster data. This definition is called by 'training'. Keyword arguments: img_name -- image file from the training data collection Return variables: none """ # detect image color by using image file name to label training data if 'red' in img_name: data_source = 'red' elif 'yellow' in img_name: data_source = 'yellow' elif 'green' in img_name: data_source = 'green' elif 'orange' in img_name: data_source = 'orange' elif 'white' in img_name: data_source = 'white' elif 'black' in img_name: data_source = 'black' elif 'blue' in img_name: data_source = 'blue' elif 'violet' in img_name: data_source = 'violet' # load the image image = cv2.imread(img_name) chans = cv2.split(image) colors = ('b', 'g', 'r') features = [] feature_data = '' counter = 0 for (chan, color) in zip(chans, colors): counter = counter + 1 hist = cv2.calcHist([chan], [0], None, [256], [0, 256]) features.extend(hist) # find the peak pixel values for R, G, and B elem = np.argmax(hist) if counter == 1: blue = str(elem) elif counter == 2: green = str(elem) elif counter == 3: red = str(elem) feature_data = red + ',' + green + ',' + blue with open('../data/color_recognition/training.data', 'a') as myfile: myfile.write(feature_data + ',' + data_source + '\n')
Python
def color_controller(source_image, debugparam, debugcolor, print_test_and_training): """ Main (parent) definition that returns color based upon child definitions. Keyword arguments: source_image -- test image source (array) debugparam -- bool operator that enables text output when debugging is enabled Return variables: prediction (string, DEFAULT "Color wasn't found due to an error") """ # checking whether the training data is ready PATH = '../data/color_recognition/training.data' if os.path.isfile(PATH) and os.access(PATH, os.R_OK): if debugparam: print('training data is ready, classifier is loading...') else: if debugparam: print('training data is being created...') open('../data/color_recognition/training.data', 'w') training() if debugparam: print('training data is ready, classifier is loading...') # get the prediction color_histogram_of_test_image(debugparam, source_image, debugcolor) prediction = knn_classifiermain('../data/color_recognition/training.data', '../data/color_recognition/test.data') if debugparam: print('Also plotting the color of test image in ' + debugcolor + ' color spectrum ') plotcolor(source_image, debugcolor) if print_test_and_training: print('Also generating a plot of training and test data in a 3D (HSV!) plot.') plot_test_and_training_data() return prediction
def color_controller(source_image, debugparam, debugcolor, print_test_and_training): """ Main (parent) definition that returns color based upon child definitions. Keyword arguments: source_image -- test image source (array) debugparam -- bool operator that enables text output when debugging is enabled Return variables: prediction (string, DEFAULT "Color wasn't found due to an error") """ # checking whether the training data is ready PATH = '../data/color_recognition/training.data' if os.path.isfile(PATH) and os.access(PATH, os.R_OK): if debugparam: print('training data is ready, classifier is loading...') else: if debugparam: print('training data is being created...') open('../data/color_recognition/training.data', 'w') training() if debugparam: print('training data is ready, classifier is loading...') # get the prediction color_histogram_of_test_image(debugparam, source_image, debugcolor) prediction = knn_classifiermain('../data/color_recognition/training.data', '../data/color_recognition/test.data') if debugparam: print('Also plotting the color of test image in ' + debugcolor + ' color spectrum ') plotcolor(source_image, debugcolor) if print_test_and_training: print('Also generating a plot of training and test data in a 3D (HSV!) plot.') plot_test_and_training_data() return prediction
Python
def average(lst): """ Definition to determine an average of a list. Keyword arguments: lst - list with integers to determine the average Return variables: sum(lst) / len(lst) (int, DEFAULT 0) """ return sum(lst) / len(lst)
def average(lst): """ Definition to determine an average of a list. Keyword arguments: lst - list with integers to determine the average Return variables: sum(lst) / len(lst) (int, DEFAULT 0) """ return sum(lst) / len(lst)
Python
def addtolist(integer, lst): """ Defintion to add a variable to a list. Keyword arguments: integer -- integer which needs to be appended to the list lst lst -- target list to append the integer to Return variables: none """ lst.append(integer)
def addtolist(integer, lst): """ Defintion to add a variable to a list. Keyword arguments: integer -- integer which needs to be appended to the list lst lst -- target list to append the integer to Return variables: none """ lst.append(integer)
Python
def calculate_radialdistance_theta(x_between_centerandobject, y_between_centerandobject): """ Calculate euclidian distance and theta, given a safezone and distance between camera and object. Keyword arguments: x_between_centerandobject -- horizontal distance between object and center camera frame y_between_centerandobject -- vertical distance between object and center camera frame Return variables: theta, radial_distance (int, int; DEFAULT 0, 0) """ radial_distance = sqrt((x_between_centerandobject * x_between_centerandobject) + ( y_between_centerandobject * y_between_centerandobject)) # If structures to correctly determine the angle where the object is positioned. if y_between_centerandobject < 0: if x_between_centerandobject < 0: cos = (abs(x_between_centerandobject) / abs(radial_distance)) theta_firstquadrant = math.degrees(math.acos(cos)) theta = theta_firstquadrant + 180 return theta, radial_distance if x_between_centerandobject > 0: cos = -1 * (abs(x_between_centerandobject) / abs(radial_distance)) theta_secondquadrant = math.degrees(math.acos(cos)) theta = theta_secondquadrant + 180 return theta, radial_distance else: theta = math.degrees(math.acos(x_between_centerandobject / radial_distance)) return theta, radial_distance
def calculate_radialdistance_theta(x_between_centerandobject, y_between_centerandobject): """ Calculate euclidian distance and theta, given a safezone and distance between camera and object. Keyword arguments: x_between_centerandobject -- horizontal distance between object and center camera frame y_between_centerandobject -- vertical distance between object and center camera frame Return variables: theta, radial_distance (int, int; DEFAULT 0, 0) """ radial_distance = sqrt((x_between_centerandobject * x_between_centerandobject) + ( y_between_centerandobject * y_between_centerandobject)) # If structures to correctly determine the angle where the object is positioned. if y_between_centerandobject < 0: if x_between_centerandobject < 0: cos = (abs(x_between_centerandobject) / abs(radial_distance)) theta_firstquadrant = math.degrees(math.acos(cos)) theta = theta_firstquadrant + 180 return theta, radial_distance if x_between_centerandobject > 0: cos = -1 * (abs(x_between_centerandobject) / abs(radial_distance)) theta_secondquadrant = math.degrees(math.acos(cos)) theta = theta_secondquadrant + 180 return theta, radial_distance else: theta = math.degrees(math.acos(x_between_centerandobject / radial_distance)) return theta, radial_distance
Python
def create_transformation(path_to_importfolder, path_to_exportfolder, start_number, repeat_variable): """ Main definition to create the transformations and store them. Keyword arguments: path_to_importfolder -- path to the import folder path_to_exportfolder -- path to the export folder start_number -- start number for writing images to the export folder repeat_variable -- amount of times an augment needs to be created from the same image Return variables: none """ # Create a collection of transformations. The choices which tranformations are arbitrary. my_transforms = transforms.Compose([ transforms.Resize((500, 500)), transforms.RandomCrop((500, 500)), transforms.ColorJitter(brightness=0.5, hue=0.5, saturation=0.5), transforms.RandomRotation(degrees=45), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomVerticalFlip(p=0.25), transforms.RandomGrayscale(p=0.2), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])]) # Create a dataset from an image-folder. This image-folder should contain all the sample-images you want to be augmented. val_set = datasets.ImageFolder(root=path_to_importfolder, transform=my_transforms) # Set a img_num. Default 0, but can be different if you want to add more images upon existing ones. img_num = start_number # For loop which will repeat 20 times per image in the dataset, explained above. # Ex.: 101 sample pictures will result into 2020 pictures. for _ in range(repeat_variable): for img, label in val_set: save_image(img, path_to_exportfolder + str(img_num) + '.jpg') img_num += 1
def create_transformation(path_to_importfolder, path_to_exportfolder, start_number, repeat_variable): """ Main definition to create the transformations and store them. Keyword arguments: path_to_importfolder -- path to the import folder path_to_exportfolder -- path to the export folder start_number -- start number for writing images to the export folder repeat_variable -- amount of times an augment needs to be created from the same image Return variables: none """ # Create a collection of transformations. The choices which tranformations are arbitrary. my_transforms = transforms.Compose([ transforms.Resize((500, 500)), transforms.RandomCrop((500, 500)), transforms.ColorJitter(brightness=0.5, hue=0.5, saturation=0.5), transforms.RandomRotation(degrees=45), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomVerticalFlip(p=0.25), transforms.RandomGrayscale(p=0.2), transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0])]) # Create a dataset from an image-folder. This image-folder should contain all the sample-images you want to be augmented. val_set = datasets.ImageFolder(root=path_to_importfolder, transform=my_transforms) # Set a img_num. Default 0, but can be different if you want to add more images upon existing ones. img_num = start_number # For loop which will repeat 20 times per image in the dataset, explained above. # Ex.: 101 sample pictures will result into 2020 pictures. for _ in range(repeat_variable): for img, label in val_set: save_image(img, path_to_exportfolder + str(img_num) + '.jpg') img_num += 1
Python
def find_job_ids(self, filter=None, doc_filter=None): """Find job ids from a state point or document filter. Parameters ---------- filter : dict A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : dict A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). Returns ------- list List of job ids matching the provided filter(s). Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. """ if filter: filter = dict(self._resolve_statepoint_filter(filter)) if doc_filter: filter.update(doc_filter) elif doc_filter: warnings.warn(DOC_FILTER_WARNING, DeprecationWarning) filter = doc_filter return self._collection._find(filter)
def find_job_ids(self, filter=None, doc_filter=None): """Find job ids from a state point or document filter. Parameters ---------- filter : dict A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : dict A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). Returns ------- list List of job ids matching the provided filter(s). Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. """ if filter: filter = dict(self._resolve_statepoint_filter(filter)) if doc_filter: filter.update(doc_filter) elif doc_filter: warnings.warn(DOC_FILTER_WARNING, DeprecationWarning) filter = doc_filter return self._collection._find(filter)
Python
def _invalidate_config_cache(project): """Invalidate cached properties derived from a project config.""" project._id = None project._rd = None project._wd = None
def _invalidate_config_cache(project): """Invalidate cached properties derived from a project config.""" project._id = None project._rd = None project._wd = None
Python
def _repr_html_(self): """Project details in HTML format for use in IPython environment. Returns ------- str HTML containing project details. """ return ( "<p>" + f"<strong>Project:</strong> {self.id}<br>" + f"<strong>Root:</strong> {self.root_directory()}<br>" + f"<strong>Workspace:</strong> {self.workspace()}<br>" + f"<strong>Size:</strong> {len(self)}" + "</p>" + self.find_jobs()._repr_html_jobs() )
def _repr_html_(self): """Project details in HTML format for use in IPython environment. Returns ------- str HTML containing project details. """ return ( "<p>" + f"<strong>Project:</strong> {self.id}<br>" + f"<strong>Root:</strong> {self.root_directory()}<br>" + f"<strong>Workspace:</strong> {self.workspace()}<br>" + f"<strong>Size:</strong> {len(self)}" + "</p>" + self.find_jobs()._repr_html_jobs() )
Python
def _check_schema_compatibility(self): """Check whether this project's data schema is compatible with this version. Raises ------ :class:`~signac.errors.IncompatibleSchemaVersion` If the schema version is incompatible. """ schema_version = version.parse(SCHEMA_VERSION) config_schema_version = version.parse(self.config["schema_version"]) if config_schema_version > schema_version: # Project config schema version is newer and therefore not supported. raise IncompatibleSchemaVersion( "The signac schema version used by this project is '{}', but signac {} " "only supports up to schema version '{}'. Try updating signac.".format( config_schema_version, __version__, schema_version ) ) elif config_schema_version < schema_version: raise IncompatibleSchemaVersion( "The signac schema version used by this project is '{}', but signac {} " "requires schema version '{}'. Please use '$ signac migrate' to " "irreversibly migrate this project's schema to the supported " "version.".format(config_schema_version, __version__, schema_version) ) else: # identical and therefore compatible logger.debug( f"The project's schema version {config_schema_version} is supported." )
def _check_schema_compatibility(self): """Check whether this project's data schema is compatible with this version. Raises ------ :class:`~signac.errors.IncompatibleSchemaVersion` If the schema version is incompatible. """ schema_version = version.parse(SCHEMA_VERSION) config_schema_version = version.parse(self.config["schema_version"]) if config_schema_version > schema_version: # Project config schema version is newer and therefore not supported. raise IncompatibleSchemaVersion( "The signac schema version used by this project is '{}', but signac {} " "only supports up to schema version '{}'. Try updating signac.".format( config_schema_version, __version__, schema_version ) ) elif config_schema_version < schema_version: raise IncompatibleSchemaVersion( "The signac schema version used by this project is '{}', but signac {} " "requires schema version '{}'. Please use '$ signac migrate' to " "irreversibly migrate this project's schema to the supported " "version.".format(config_schema_version, __version__, schema_version) ) else: # identical and therefore compatible logger.debug( f"The project's schema version {config_schema_version} is supported." )
Python
def min_len_unique_id(self): """Determine the minimum length required for a job id to be unique. This method's runtime scales with the number of jobs in the workspace. Returns ------- int Minimum string length of a unique job identifier. """ job_ids = list(self._find_job_ids()) tmp = set() for i in range(32): tmp.clear() for _id in job_ids: if _id[:i] in tmp: break else: tmp.add(_id[:i]) else: break return i
def min_len_unique_id(self): """Determine the minimum length required for a job id to be unique. This method's runtime scales with the number of jobs in the workspace. Returns ------- int Minimum string length of a unique job identifier. """ job_ids = list(self._find_job_ids()) tmp = set() for i in range(32): tmp.clear() for _id in job_ids: if _id[:i] in tmp: break else: tmp.add(_id[:i]) else: break return i
Python
def fn(self, filename): """Prepend a filename with the project's root directory path. Parameters ---------- filename : str The name of the file. Returns ------- str The joined path of project root directory and filename. """ return os.path.join(self.root_directory(), filename)
def fn(self, filename): """Prepend a filename with the project's root directory path. Parameters ---------- filename : str The name of the file. Returns ------- str The joined path of project root directory and filename. """ return os.path.join(self.root_directory(), filename)
Python
def _reset_document(self, new_doc): """Reset document to new document passed. Parameters ---------- new_doc : dict The new project document. """ with self._lock: self.document.reset(new_doc)
def _reset_document(self, new_doc): """Reset document to new document passed. Parameters ---------- new_doc : dict The new project document. """ with self._lock: self.document.reset(new_doc)
Python
def document(self): """Get document associated with this project. Returns ------- :class:`~signac.synced_collections.backends.collection_json.BufferedJSONAttrDict` The project document. """ with self._lock: if self._document is None: fn_doc = os.path.join(self.root_directory(), self.FN_DOCUMENT) self._document = BufferedJSONAttrDict( filename=fn_doc, write_concern=True ) return self._document
def document(self): """Get document associated with this project. Returns ------- :class:`~signac.synced_collections.backends.collection_json.BufferedJSONAttrDict` The project document. """ with self._lock: if self._document is None: fn_doc = os.path.join(self.root_directory(), self.FN_DOCUMENT) self._document = BufferedJSONAttrDict( filename=fn_doc, write_concern=True ) return self._document
Python
def document(self, new_doc): """Setter method for document associated with this project. Parameters ---------- new_doc : dict The new project document. """ self._reset_document(new_doc)
def document(self, new_doc): """Setter method for document associated with this project. Parameters ---------- new_doc : dict The new project document. """ self._reset_document(new_doc)
Python
def doc(self): """Get document associated with this project. Alias for :meth:`~signac.Project.document`. Returns ------- :class:`~signac.synced_collections.backends.collection_json.BufferedJSONAttrDict` The project document. """ return self.document
def doc(self): """Get document associated with this project. Alias for :meth:`~signac.Project.document`. Returns ------- :class:`~signac.synced_collections.backends.collection_json.BufferedJSONAttrDict` The project document. """ return self.document
Python
def doc(self, new_doc): """Setter method for document associated with this project. Parameters ---------- new_doc : dict The new project document. """ self.document = new_doc
def doc(self, new_doc): """Setter method for document associated with this project. Parameters ---------- new_doc : dict The new project document. """ self.document = new_doc
Python
def data(self, new_data): """Setter method for data associated with this project. Parameters ---------- new_data : :class:`~signac.H5Store` An HDF5-backed datastore. """ self.stores[self.KEY_DATA] = new_data
def data(self, new_data): """Setter method for data associated with this project. Parameters ---------- new_data : :class:`~signac.H5Store` An HDF5-backed datastore. """ self.stores[self.KEY_DATA] = new_data
Python
def open_job(self, statepoint=None, id=None): """Get a job handle associated with a state point. This method returns the job instance associated with the given state point or job id. Opening a job by a valid state point never fails. Opening a job by id requires a lookup of the state point from the job id, which may fail if the job was not previously initialized. Parameters ---------- statepoint : dict The job's unique set of state point parameters (Default value = None). id : str The job id (Default value = None). Returns ------- :class:`~signac.contrib.job.Job` The job instance. Raises ------ KeyError If the attempt to open the job by id fails. LookupError If the attempt to open the job by an abbreviated id returns more than one match. """ if (statepoint is None) == (id is None): raise ValueError("Either statepoint or id must be provided, but not both.") if id is None: # Second best case (Job will update self._sp_cache on init) return self.Job(project=self, statepoint=statepoint) try: # Optimal case (id is in the state point cache) return self.Job(project=self, statepoint=self._sp_cache[id], _id=id) except KeyError: # Worst case: no state point was provided and the state point cache # missed. The Job will register itself in self._sp_cache when the # state point is accessed. if len(id) < 32: # Resolve partial job ids (first few characters) into a full job id job_ids = self._find_job_ids() matches = [_id for _id in job_ids if _id.startswith(id)] if len(matches) == 1: id = matches[0] elif len(matches) > 1: raise LookupError(id) else: # By elimination, len(matches) == 0 raise KeyError(id) elif not self._contains_job_id(id): # id does not exist in the project data space raise KeyError(id) return self.Job(project=self, _id=id)
def open_job(self, statepoint=None, id=None): """Get a job handle associated with a state point. This method returns the job instance associated with the given state point or job id. Opening a job by a valid state point never fails. Opening a job by id requires a lookup of the state point from the job id, which may fail if the job was not previously initialized. Parameters ---------- statepoint : dict The job's unique set of state point parameters (Default value = None). id : str The job id (Default value = None). Returns ------- :class:`~signac.contrib.job.Job` The job instance. Raises ------ KeyError If the attempt to open the job by id fails. LookupError If the attempt to open the job by an abbreviated id returns more than one match. """ if (statepoint is None) == (id is None): raise ValueError("Either statepoint or id must be provided, but not both.") if id is None: # Second best case (Job will update self._sp_cache on init) return self.Job(project=self, statepoint=statepoint) try: # Optimal case (id is in the state point cache) return self.Job(project=self, statepoint=self._sp_cache[id], _id=id) except KeyError: # Worst case: no state point was provided and the state point cache # missed. The Job will register itself in self._sp_cache when the # state point is accessed. if len(id) < 32: # Resolve partial job ids (first few characters) into a full job id job_ids = self._find_job_ids() matches = [_id for _id in job_ids if _id.startswith(id)] if len(matches) == 1: id = matches[0] elif len(matches) > 1: raise LookupError(id) else: # By elimination, len(matches) == 0 raise KeyError(id) elif not self._contains_job_id(id): # id does not exist in the project data space raise KeyError(id) return self.Job(project=self, _id=id)
Python
def num_jobs(self): """Return the number of initialized jobs. Returns ------- int Count of initialized jobs. """ # We simply count the the number of valid directories and avoid building a list # for improved performance. i = 0 for i, _ in enumerate(self._job_dirs(), 1): pass return i
def num_jobs(self): """Return the number of initialized jobs. Returns ------- int Count of initialized jobs. """ # We simply count the the number of valid directories and avoid building a list # for improved performance. i = 0 for i, _ in enumerate(self._job_dirs(), 1): pass return i
Python
def _contains_job_id(self, job_id): """Determine whether a job id is in the project's data space. Parameters ---------- job_id : str The job id to test for initialization. Returns ------- bool True if the job id is initialized for this project. """ # We can rely on the project workspace to be well-formed, so just use # str.join with os.sep instead of os.path.join for speed. return os.path.exists(os.sep.join((self.workspace(), job_id)))
def _contains_job_id(self, job_id): """Determine whether a job id is in the project's data space. Parameters ---------- job_id : str The job id to test for initialization. Returns ------- bool True if the job id is initialized for this project. """ # We can rely on the project workspace to be well-formed, so just use # str.join with os.sep instead of os.path.join for speed. return os.path.exists(os.sep.join((self.workspace(), job_id)))
Python
def build_job_statepoint_index(self, exclude_const=False, index=None): """Build a state point index to identify jobs with specific parameters. This method generates pairs of state point keys and mappings of values to a set of all corresponding job ids. The pairs are ordered by the number of different values. Since state point keys may be nested, they are represented as a tuple. For example: .. code-block:: python >>> for i in range(4): ... project.open_job({'a': i, 'b': {'c': i % 2}}).init() ... >>> for key, value in project.build_job_statepoint_index(): ... print(key) ... pprint.pprint(value) ... ('b', 'c') defaultdict(<class 'set'>, {0: {'3a530c13bfaf57517b4e81ecab6aec7f', '4e9a45a922eae6bb5d144b36d82526e4'}, 1: {'d49c6609da84251ab096654971115d0c', '5c2658722218d48a5eb1e0ef7c26240b'}}) ('a',) defaultdict(<class 'set'>, {0: {'4e9a45a922eae6bb5d144b36d82526e4'}, 1: {'d49c6609da84251ab096654971115d0c'}, 2: {'3a530c13bfaf57517b4e81ecab6aec7f'}, 3: {'5c2658722218d48a5eb1e0ef7c26240b'}}) Values that are constant over the complete data space can be optionally ignored with the `exclude_const` argument set to True. Parameters ---------- exclude_const : bool Exclude entries that are shared by all jobs that are part of the index (Default value = False). index : A document index. Yields ------ tuple Pairs of state point keys and mappings of values to a set of all corresponding job ids (Default value = None). """ from .schema import _build_job_statepoint_index if index is None: index = [{"_id": job.id, "sp": job.sp()} for job in self] for x, y in _build_job_statepoint_index( exclude_const=exclude_const, index=index ): yield tuple(x.split(".")), y
def build_job_statepoint_index(self, exclude_const=False, index=None): """Build a state point index to identify jobs with specific parameters. This method generates pairs of state point keys and mappings of values to a set of all corresponding job ids. The pairs are ordered by the number of different values. Since state point keys may be nested, they are represented as a tuple. For example: .. code-block:: python >>> for i in range(4): ... project.open_job({'a': i, 'b': {'c': i % 2}}).init() ... >>> for key, value in project.build_job_statepoint_index(): ... print(key) ... pprint.pprint(value) ... ('b', 'c') defaultdict(<class 'set'>, {0: {'3a530c13bfaf57517b4e81ecab6aec7f', '4e9a45a922eae6bb5d144b36d82526e4'}, 1: {'d49c6609da84251ab096654971115d0c', '5c2658722218d48a5eb1e0ef7c26240b'}}) ('a',) defaultdict(<class 'set'>, {0: {'4e9a45a922eae6bb5d144b36d82526e4'}, 1: {'d49c6609da84251ab096654971115d0c'}, 2: {'3a530c13bfaf57517b4e81ecab6aec7f'}, 3: {'5c2658722218d48a5eb1e0ef7c26240b'}}) Values that are constant over the complete data space can be optionally ignored with the `exclude_const` argument set to True. Parameters ---------- exclude_const : bool Exclude entries that are shared by all jobs that are part of the index (Default value = False). index : A document index. Yields ------ tuple Pairs of state point keys and mappings of values to a set of all corresponding job ids (Default value = None). """ from .schema import _build_job_statepoint_index if index is None: index = [{"_id": job.id, "sp": job.sp()} for job in self] for x, y in _build_job_statepoint_index( exclude_const=exclude_const, index=index ): yield tuple(x.split(".")), y
Python
def find_job_ids(self, filter=None, doc_filter=None, index=None): """Find the job_ids of all jobs matching the filters. The optional filter arguments must be a Mapping of key-value pairs and JSON serializable. .. note:: Providing a pre-calculated index may vastly increase the performance of this function. Parameters ---------- filter : Mapping A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : Mapping A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). index : A document index. If not provided, an index will be computed (Default value = None). Returns ------- The ids of all indexed jobs matching both filters. Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. """ return self._find_job_ids(filter, doc_filter, index)
def find_job_ids(self, filter=None, doc_filter=None, index=None): """Find the job_ids of all jobs matching the filters. The optional filter arguments must be a Mapping of key-value pairs and JSON serializable. .. note:: Providing a pre-calculated index may vastly increase the performance of this function. Parameters ---------- filter : Mapping A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : Mapping A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). index : A document index. If not provided, an index will be computed (Default value = None). Returns ------- The ids of all indexed jobs matching both filters. Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. """ return self._find_job_ids(filter, doc_filter, index)
Python
def _find_job_ids(self, filter=None, doc_filter=None, index=None): """Find the job_ids of all jobs matching the filters. The optional filter arguments must be a JSON serializable mapping of key-value pairs. .. note:: Providing a pre-calculated index may vastly increase the performance of this function. Parameters ---------- filter : Mapping A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : Mapping A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). index : A document index. If not provided, an index will be computed (Default value = None). Returns ------- Collection or list The ids of all indexed jobs matching both filters. If no arguments are provided to this method, the ids are returned as a list. If any of the arguments are provided, a :class:`Collection` containing all the ids is returned. Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. Notes ----- If all arguments are ``None``, this method skips indexing the data space and instead simply iterates over all job directories. This code path can be much faster for certain use cases since it defers all work that would be required to construct an index, so in performance-critical applications where no filtering of the data space is required, passing no arguments to this method (as opposed to empty dict filters) is recommended. """ if not filter and not doc_filter and index is None: return list(self._job_dirs()) if index is None: filter = dict(parse_filter(_add_prefix("sp.", filter))) if doc_filter: warnings.warn(DOC_FILTER_WARNING, DeprecationWarning) filter.update(parse_filter(_add_prefix("doc.", doc_filter))) index = self.index(include_job_document=True) elif "doc" in _root_keys(filter): index = self.index(include_job_document=True) else: index = self._sp_index() else: warnings.warn(INDEX_DEPRECATION_WARNING, DeprecationWarning) return Collection(index, _trust=True)._find(filter)
def _find_job_ids(self, filter=None, doc_filter=None, index=None): """Find the job_ids of all jobs matching the filters. The optional filter arguments must be a JSON serializable mapping of key-value pairs. .. note:: Providing a pre-calculated index may vastly increase the performance of this function. Parameters ---------- filter : Mapping A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : Mapping A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). index : A document index. If not provided, an index will be computed (Default value = None). Returns ------- Collection or list The ids of all indexed jobs matching both filters. If no arguments are provided to this method, the ids are returned as a list. If any of the arguments are provided, a :class:`Collection` containing all the ids is returned. Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. Notes ----- If all arguments are ``None``, this method skips indexing the data space and instead simply iterates over all job directories. This code path can be much faster for certain use cases since it defers all work that would be required to construct an index, so in performance-critical applications where no filtering of the data space is required, passing no arguments to this method (as opposed to empty dict filters) is recommended. """ if not filter and not doc_filter and index is None: return list(self._job_dirs()) if index is None: filter = dict(parse_filter(_add_prefix("sp.", filter))) if doc_filter: warnings.warn(DOC_FILTER_WARNING, DeprecationWarning) filter.update(parse_filter(_add_prefix("doc.", doc_filter))) index = self.index(include_job_document=True) elif "doc" in _root_keys(filter): index = self.index(include_job_document=True) else: index = self._sp_index() else: warnings.warn(INDEX_DEPRECATION_WARNING, DeprecationWarning) return Collection(index, _trust=True)._find(filter)
Python
def find_jobs(self, filter=None, doc_filter=None): """Find all jobs in the project's workspace. The optional filter arguments must be a Mapping of key-value pairs and JSON serializable. The `filter` argument is used to search against job state points, whereas the `doc_filter` argument compares against job document keys. See :ref:`signac find <signac-cli-find>` for the command line equivalent. Parameters ---------- filter : Mapping A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : Mapping A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). Returns ------- :class:`~signac.contrib.project.JobsCursor` JobsCursor of jobs matching the provided filter(s). Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. """ filter = dict(parse_filter(_add_prefix("sp.", filter))) if doc_filter: warnings.warn(DOC_FILTER_WARNING, DeprecationWarning) filter.update(parse_filter(_add_prefix("doc.", doc_filter))) return JobsCursor(self, filter)
def find_jobs(self, filter=None, doc_filter=None): """Find all jobs in the project's workspace. The optional filter arguments must be a Mapping of key-value pairs and JSON serializable. The `filter` argument is used to search against job state points, whereas the `doc_filter` argument compares against job document keys. See :ref:`signac find <signac-cli-find>` for the command line equivalent. Parameters ---------- filter : Mapping A mapping of key-value pairs that all indexed job state points are compared against (Default value = None). doc_filter : Mapping A mapping of key-value pairs that all indexed job documents are compared against (Default value = None). Returns ------- :class:`~signac.contrib.project.JobsCursor` JobsCursor of jobs matching the provided filter(s). Raises ------ TypeError If the filters are not JSON serializable. ValueError If the filters are invalid. RuntimeError If the filters are not supported by the index. """ filter = dict(parse_filter(_add_prefix("sp.", filter))) if doc_filter: warnings.warn(DOC_FILTER_WARNING, DeprecationWarning) filter.update(parse_filter(_add_prefix("doc.", doc_filter))) return JobsCursor(self, filter)
Python
def write_statepoints(self, statepoints=None, fn=None, indent=2): """Dump state points to a file. If the file already contains state points, all new state points will be appended, while the old ones are preserved. See Also -------- dump_statepoints : Dump the state points and associated job ids. Parameters ---------- statepoints : iterable A list of state points, defaults to all state points which are defined in the workspace. fn : str The filename of the file containing the state points, defaults to :attr:`~signac.Project.FN_STATEPOINTS`. indent : int Specify the indentation of the JSON file (Default value = 2). """ if fn is None: fn = self.fn(self.FN_STATEPOINTS) try: tmp = self.read_statepoints(fn=fn) except OSError as error: if error.errno != errno.ENOENT: raise tmp = {} if statepoints is None: job_ids = self._job_dirs() _cache = {_id: self._get_statepoint(_id) for _id in job_ids} else: _cache = {calc_id(sp): sp for sp in statepoints} tmp.update(_cache) logger.debug(f"Writing state points file with {len(tmp)} entries.") with open(fn, "w") as file: file.write(json.dumps(tmp, indent=indent))
def write_statepoints(self, statepoints=None, fn=None, indent=2): """Dump state points to a file. If the file already contains state points, all new state points will be appended, while the old ones are preserved. See Also -------- dump_statepoints : Dump the state points and associated job ids. Parameters ---------- statepoints : iterable A list of state points, defaults to all state points which are defined in the workspace. fn : str The filename of the file containing the state points, defaults to :attr:`~signac.Project.FN_STATEPOINTS`. indent : int Specify the indentation of the JSON file (Default value = 2). """ if fn is None: fn = self.fn(self.FN_STATEPOINTS) try: tmp = self.read_statepoints(fn=fn) except OSError as error: if error.errno != errno.ENOENT: raise tmp = {} if statepoints is None: job_ids = self._job_dirs() _cache = {_id: self._get_statepoint(_id) for _id in job_ids} else: _cache = {calc_id(sp): sp for sp in statepoints} tmp.update(_cache) logger.debug(f"Writing state points file with {len(tmp)} entries.") with open(fn, "w") as file: file.write(json.dumps(tmp, indent=indent))
Python
def _register(self, _id, statepoint): """Register the job state point in the project state point cache. Parameters ---------- _id : str A job identifier. statepoint : dict A validated job state point. """ self._sp_cache[_id] = statepoint
def _register(self, _id, statepoint): """Register the job state point in the project state point cache. Parameters ---------- _id : str A job identifier. statepoint : dict A validated job state point. """ self._sp_cache[_id] = statepoint
Python
def _get_statepoint_from_workspace(self, job_id): """Attempt to read the state point from the workspace. Parameters ---------- job_id : str Identifier of the job. """ # We can rely on the project workspace to be well-formed, so just use # str.join with os.sep instead of os.path.join for speed. fn_manifest = os.sep.join((self.workspace(), job_id, self.Job.FN_MANIFEST)) try: with open(fn_manifest, "rb") as manifest: return json.loads(manifest.read().decode()) except (OSError, ValueError) as error: if os.path.isdir(os.sep.join((self.workspace(), job_id))): logger.error( "Error while trying to access state " "point manifest file of job '{}': '{}'.".format(job_id, error) ) raise JobsCorruptedError([job_id]) raise KeyError(job_id)
def _get_statepoint_from_workspace(self, job_id): """Attempt to read the state point from the workspace. Parameters ---------- job_id : str Identifier of the job. """ # We can rely on the project workspace to be well-formed, so just use # str.join with os.sep instead of os.path.join for speed. fn_manifest = os.sep.join((self.workspace(), job_id, self.Job.FN_MANIFEST)) try: with open(fn_manifest, "rb") as manifest: return json.loads(manifest.read().decode()) except (OSError, ValueError) as error: if os.path.isdir(os.sep.join((self.workspace(), job_id))): logger.error( "Error while trying to access state " "point manifest file of job '{}': '{}'.".format(job_id, error) ) raise JobsCorruptedError([job_id]) raise KeyError(job_id)
Python
def reset_statepoint(self, job, new_statepoint): """Overwrite the state point of this job while preserving job data. This method will change the job id if the state point has been altered. .. danger:: Use this function with caution! Resetting a job's state point, may sometimes be necessary, but can possibly lead to incoherent data spaces. Parameters ---------- job : :class:`~signac.contrib.job.Job` The job that should be reset to a new state point. new_statepoint : mapping The job's new state point. Raises ------ :class:`~signac.errors.DestinationExistsError` If a job associated with the new state point is already initialized. OSError If the move failed due to an unknown system related error. """ job.reset_statepoint(new_statepoint=new_statepoint)
def reset_statepoint(self, job, new_statepoint): """Overwrite the state point of this job while preserving job data. This method will change the job id if the state point has been altered. .. danger:: Use this function with caution! Resetting a job's state point, may sometimes be necessary, but can possibly lead to incoherent data spaces. Parameters ---------- job : :class:`~signac.contrib.job.Job` The job that should be reset to a new state point. new_statepoint : mapping The job's new state point. Raises ------ :class:`~signac.errors.DestinationExistsError` If a job associated with the new state point is already initialized. OSError If the move failed due to an unknown system related error. """ job.reset_statepoint(new_statepoint=new_statepoint)
Python
def update_statepoint(self, job, update, overwrite=False): """Change the state point of this job while preserving job data. By default, this method will not change existing parameters of the state point of the job. This method will change the job id if the state point has been altered. .. warning:: While appending to a job's state point is generally safe, modifying existing parameters may lead to data inconsistency. Use the ``overwrite`` argument with caution! Parameters ---------- job : :class:`~signac.contrib.job.Job` The job whose state point shall be updated. update : mapping A mapping used for the state point update. overwrite : bool, optional If False, an error will be raised if the update modifies the values of existing keys in the state point. If True, any existing keys will be overwritten in the same way as :meth:`dict.update`. Use with caution! (Default value = False). Raises ------ KeyError If the update contains keys which are already part of the job's state point and ``overwrite`` is False. :class:`~signac.errors.DestinationExistsError` If a job associated with the new state point is already initialized. OSError If the move failed due to an unknown system related error. """ job.update_statepoint(update=update, overwrite=overwrite)
def update_statepoint(self, job, update, overwrite=False): """Change the state point of this job while preserving job data. By default, this method will not change existing parameters of the state point of the job. This method will change the job id if the state point has been altered. .. warning:: While appending to a job's state point is generally safe, modifying existing parameters may lead to data inconsistency. Use the ``overwrite`` argument with caution! Parameters ---------- job : :class:`~signac.contrib.job.Job` The job whose state point shall be updated. update : mapping A mapping used for the state point update. overwrite : bool, optional If False, an error will be raised if the update modifies the values of existing keys in the state point. If True, any existing keys will be overwritten in the same way as :meth:`dict.update`. Use with caution! (Default value = False). Raises ------ KeyError If the update contains keys which are already part of the job's state point and ``overwrite`` is False. :class:`~signac.errors.DestinationExistsError` If a job associated with the new state point is already initialized. OSError If the move failed due to an unknown system related error. """ job.update_statepoint(update=update, overwrite=overwrite)
Python
def import_from(self, origin=None, schema=None, sync=None, copytree=None): """Import the data space located at origin into this project. This function will walk through the data space located at origin and will try to identify data space paths that can be imported as a job workspace into this project. The ``schema`` argument expects a function that takes a path argument and returns a state point dictionary. A default function is used when no argument is provided. The default schema function will simply look for state point manifest files--usually named ``signac_statepoint.json``--and then import all data located within that path into the job workspace corresponding to the state point specified in the manifest file. Alternatively the schema argument may be a string, that is converted into a schema function, for example: Providing ``foo/{foo:int}`` as schema argument means that all directories under ``foo/`` will be imported and their names will be interpreted as the value for ``foo`` within the state point. .. tip:: Use ``copytree=os.replace`` or ``copytree=shutil.move`` to move dataspaces on import instead of copying them. Warning: Imports can fail due to conflicts. Moving data instead of copying may therefore lead to inconsistent states and users are advised to apply caution. See Also -------- :meth:`~signac.Project.export_to` : Export the project data space. :ref:`signac import <signac-cli-import>` : See signac import for the command line equivalent. Parameters ---------- origin : The path to the data space origin, which is to be imported. This may be a path to a directory, a zip file, or a tarball archive (Default value = None). schema : An optional schema function, which is either a string or a function that accepts a path as its first and only argument and returns the corresponding state point as dict. (Default value = None). sync : If ``True``, the project will be synchronized with the imported data space. If a dict of keyword arguments is provided, the arguments will be used for :meth:`~signac.Project.sync` (Default value = None). copytree : Specify which exact function to use for the actual copytree operation. Defaults to :func:`shutil.copytree`. Returns ------- dict A dict that maps the source directory paths to the target directory paths. """ from .import_export import import_into_project if sync: with self.temporary_project() as tmp_project: ret = tmp_project.import_from(origin=origin, schema=schema) if sync is True: self.sync(other=tmp_project) else: self.sync(other=tmp_project, **sync) return ret return dict( import_into_project( origin=origin, project=self, schema=schema, copytree=copytree ) )
def import_from(self, origin=None, schema=None, sync=None, copytree=None): """Import the data space located at origin into this project. This function will walk through the data space located at origin and will try to identify data space paths that can be imported as a job workspace into this project. The ``schema`` argument expects a function that takes a path argument and returns a state point dictionary. A default function is used when no argument is provided. The default schema function will simply look for state point manifest files--usually named ``signac_statepoint.json``--and then import all data located within that path into the job workspace corresponding to the state point specified in the manifest file. Alternatively the schema argument may be a string, that is converted into a schema function, for example: Providing ``foo/{foo:int}`` as schema argument means that all directories under ``foo/`` will be imported and their names will be interpreted as the value for ``foo`` within the state point. .. tip:: Use ``copytree=os.replace`` or ``copytree=shutil.move`` to move dataspaces on import instead of copying them. Warning: Imports can fail due to conflicts. Moving data instead of copying may therefore lead to inconsistent states and users are advised to apply caution. See Also -------- :meth:`~signac.Project.export_to` : Export the project data space. :ref:`signac import <signac-cli-import>` : See signac import for the command line equivalent. Parameters ---------- origin : The path to the data space origin, which is to be imported. This may be a path to a directory, a zip file, or a tarball archive (Default value = None). schema : An optional schema function, which is either a string or a function that accepts a path as its first and only argument and returns the corresponding state point as dict. (Default value = None). sync : If ``True``, the project will be synchronized with the imported data space. If a dict of keyword arguments is provided, the arguments will be used for :meth:`~signac.Project.sync` (Default value = None). copytree : Specify which exact function to use for the actual copytree operation. Defaults to :func:`shutil.copytree`. Returns ------- dict A dict that maps the source directory paths to the target directory paths. """ from .import_export import import_into_project if sync: with self.temporary_project() as tmp_project: ret = tmp_project.import_from(origin=origin, schema=schema) if sync is True: self.sync(other=tmp_project) else: self.sync(other=tmp_project, **sync) return ret return dict( import_into_project( origin=origin, project=self, schema=schema, copytree=copytree ) )
Python
def check(self): """Check the project's workspace for corruption. Raises ------ :class:`signac.errors.JobsCorruptedError` When one or more jobs are identified as corrupted. """ corrupted = [] logger.info("Checking workspace for corruption...") for job_id in self._find_job_ids(): try: statepoint = self._get_statepoint(job_id) if calc_id(statepoint) != job_id: corrupted.append(job_id) else: self.open_job(statepoint).init() except JobsCorruptedError as error: corrupted.extend(error.job_ids) if corrupted: logger.error( "At least one job appears to be corrupted. Call Project.repair() " "to try to fix errors." ) raise JobsCorruptedError(corrupted)
def check(self): """Check the project's workspace for corruption. Raises ------ :class:`signac.errors.JobsCorruptedError` When one or more jobs are identified as corrupted. """ corrupted = [] logger.info("Checking workspace for corruption...") for job_id in self._find_job_ids(): try: statepoint = self._get_statepoint(job_id) if calc_id(statepoint) != job_id: corrupted.append(job_id) else: self.open_job(statepoint).init() except JobsCorruptedError as error: corrupted.extend(error.job_ids) if corrupted: logger.error( "At least one job appears to be corrupted. Call Project.repair() " "to try to fix errors." ) raise JobsCorruptedError(corrupted)
Python
def repair(self, fn_statepoints=None, index=None, job_ids=None): """Attempt to repair the workspace after it got corrupted. This method will attempt to repair lost or corrupted job state point manifest files using a state points file or a document index or both. Parameters ---------- fn_statepoints : str The filename of the file containing the state points, defaults to :attr:`~signac.Project.FN_STATEPOINTS`. index : A document index (Default value = None). job_ids : An iterable of job ids that should get repaired. Defaults to all jobs. Raises ------ :class:`signac.errors.JobsCorruptedError` When one or more corrupted job could not be repaired. """ if job_ids is None: job_ids = self._find_job_ids() # Load internal cache from all available external sources. self._read_cache() try: # Updates the state point cache from the provided file # # In signac 2.0, Project.read_statepoints will be removed. # Remove this code path (only use "self._read_cache()" above) and # update the method signature and docs to remove "fn_statepoints." self._sp_cache.update(self.read_statepoints(fn=fn_statepoints)) except OSError as error: if error.errno != errno.ENOENT or fn_statepoints is not None: raise if index is not None: for doc in index: self._sp_cache[doc["signac_id"]] = doc["sp"] warnings.warn(INDEX_DEPRECATION_WARNING, DeprecationWarning) corrupted = [] for job_id in job_ids: try: # First, check if we can look up the state point. statepoint = self._get_statepoint(job_id) # Check if state point and id correspond. correct_id = calc_id(statepoint) if correct_id != job_id: logger.warning( "The job id of job '{}' is incorrect; " "it should be '{}'.".format(job_id, correct_id) ) invalid_wd = os.path.join(self.workspace(), job_id) correct_wd = os.path.join(self.workspace(), correct_id) try: os.replace(invalid_wd, correct_wd) except OSError as error: logger.critical( "Unable to fix location of job with " " id '{}': '{}'.".format(job_id, error) ) corrupted.append(job_id) continue else: logger.info("Moved job to correct workspace.") job = self.open_job(statepoint) except KeyError: logger.critical( f"Unable to look up state point for job with id '{job_id}'." ) corrupted.append(job_id) else: try: # Try to reinit the job (triggers state point manifest file check). job.init() except Exception as error: logger.error( "Error during initialization of job with " "id '{}': '{}'.".format(job_id, error) ) try: # Attempt to fix the job manifest file. job.init(force=True) except Exception as error2: logger.critical( f"Unable to force init job with id '{job_id}': '{error2}'." ) corrupted.append(job_id) if corrupted: raise JobsCorruptedError(corrupted)
def repair(self, fn_statepoints=None, index=None, job_ids=None): """Attempt to repair the workspace after it got corrupted. This method will attempt to repair lost or corrupted job state point manifest files using a state points file or a document index or both. Parameters ---------- fn_statepoints : str The filename of the file containing the state points, defaults to :attr:`~signac.Project.FN_STATEPOINTS`. index : A document index (Default value = None). job_ids : An iterable of job ids that should get repaired. Defaults to all jobs. Raises ------ :class:`signac.errors.JobsCorruptedError` When one or more corrupted job could not be repaired. """ if job_ids is None: job_ids = self._find_job_ids() # Load internal cache from all available external sources. self._read_cache() try: # Updates the state point cache from the provided file # # In signac 2.0, Project.read_statepoints will be removed. # Remove this code path (only use "self._read_cache()" above) and # update the method signature and docs to remove "fn_statepoints." self._sp_cache.update(self.read_statepoints(fn=fn_statepoints)) except OSError as error: if error.errno != errno.ENOENT or fn_statepoints is not None: raise if index is not None: for doc in index: self._sp_cache[doc["signac_id"]] = doc["sp"] warnings.warn(INDEX_DEPRECATION_WARNING, DeprecationWarning) corrupted = [] for job_id in job_ids: try: # First, check if we can look up the state point. statepoint = self._get_statepoint(job_id) # Check if state point and id correspond. correct_id = calc_id(statepoint) if correct_id != job_id: logger.warning( "The job id of job '{}' is incorrect; " "it should be '{}'.".format(job_id, correct_id) ) invalid_wd = os.path.join(self.workspace(), job_id) correct_wd = os.path.join(self.workspace(), correct_id) try: os.replace(invalid_wd, correct_wd) except OSError as error: logger.critical( "Unable to fix location of job with " " id '{}': '{}'.".format(job_id, error) ) corrupted.append(job_id) continue else: logger.info("Moved job to correct workspace.") job = self.open_job(statepoint) except KeyError: logger.critical( f"Unable to look up state point for job with id '{job_id}'." ) corrupted.append(job_id) else: try: # Try to reinit the job (triggers state point manifest file check). job.init() except Exception as error: logger.error( "Error during initialization of job with " "id '{}': '{}'.".format(job_id, error) ) try: # Attempt to fix the job manifest file. job.init(force=True) except Exception as error2: logger.critical( f"Unable to force init job with id '{job_id}': '{error2}'." ) corrupted.append(job_id) if corrupted: raise JobsCorruptedError(corrupted)
Python
def _sp_index(self): """Update and return the state point index cache. Returns ------- dict Dictionary containing ids and state points in the cache. """ job_ids = set(self._job_dirs()) to_add = job_ids.difference(self._index_cache) to_remove = set(self._index_cache).difference(job_ids) for _id in to_remove: del self._index_cache[_id] for _id in to_add: self._index_cache[_id] = dict(sp=self._get_statepoint(_id), _id=_id) return self._index_cache.values()
def _sp_index(self): """Update and return the state point index cache. Returns ------- dict Dictionary containing ids and state points in the cache. """ job_ids = set(self._job_dirs()) to_add = job_ids.difference(self._index_cache) to_remove = set(self._index_cache).difference(job_ids) for _id in to_remove: del self._index_cache[_id] for _id in to_add: self._index_cache[_id] = dict(sp=self._get_statepoint(_id), _id=_id) return self._index_cache.values()
Python
def _build_index(self, include_job_document=False): """Generate a basic state point index. Parameters ---------- include_job_document : Whether to include the job document in the index (Default value = False). """ wd = self.workspace() if self.Job is Job else None for _id in self._find_job_ids(): doc = dict(_id=_id, sp=self._get_statepoint(_id)) if include_job_document: if wd is None: doc["doc"] = self.open_job(id=_id).document else: # use optimized path try: with open( os.path.join(wd, _id, self.Job.FN_DOCUMENT), "rb" ) as file: doc["doc"] = json.loads(file.read().decode()) except OSError as error: if error.errno != errno.ENOENT: raise yield doc
def _build_index(self, include_job_document=False): """Generate a basic state point index. Parameters ---------- include_job_document : Whether to include the job document in the index (Default value = False). """ wd = self.workspace() if self.Job is Job else None for _id in self._find_job_ids(): doc = dict(_id=_id, sp=self._get_statepoint(_id)) if include_job_document: if wd is None: doc["doc"] = self.open_job(id=_id).document else: # use optimized path try: with open( os.path.join(wd, _id, self.Job.FN_DOCUMENT), "rb" ) as file: doc["doc"] = json.loads(file.read().decode()) except OSError as error: if error.errno != errno.ENOENT: raise yield doc
Python
def _update_in_memory_cache(self): """Update the in-memory state point cache to reflect the workspace.""" logger.debug("Updating in-memory cache...") start = time.time() job_ids = set(self._job_dirs()) cached_ids = set(self._sp_cache) to_add = job_ids.difference(cached_ids) to_remove = cached_ids.difference(job_ids) if to_add or to_remove: for _id in to_remove: del self._sp_cache[_id] def _add(_id): self._sp_cache[_id] = self._get_statepoint_from_workspace(_id) to_add_chunks = split_and_print_progress( iterable=list(to_add), num_chunks=max(1, min(100, int(len(to_add) / 1000))), write=logger.info, desc="Read metadata: ", ) with ThreadPool() as pool: for chunk in to_add_chunks: pool.map(_add, chunk) delta = time.time() - start logger.debug(f"Updated in-memory cache in {delta:.3f} seconds.") return to_add, to_remove else: logger.debug("In-memory cache is up to date.")
def _update_in_memory_cache(self): """Update the in-memory state point cache to reflect the workspace.""" logger.debug("Updating in-memory cache...") start = time.time() job_ids = set(self._job_dirs()) cached_ids = set(self._sp_cache) to_add = job_ids.difference(cached_ids) to_remove = cached_ids.difference(job_ids) if to_add or to_remove: for _id in to_remove: del self._sp_cache[_id] def _add(_id): self._sp_cache[_id] = self._get_statepoint_from_workspace(_id) to_add_chunks = split_and_print_progress( iterable=list(to_add), num_chunks=max(1, min(100, int(len(to_add) / 1000))), write=logger.info, desc="Read metadata: ", ) with ThreadPool() as pool: for chunk in to_add_chunks: pool.map(_add, chunk) delta = time.time() - start logger.debug(f"Updated in-memory cache in {delta:.3f} seconds.") return to_add, to_remove else: logger.debug("In-memory cache is up to date.")
Python
def _remove_persistent_cache_file(self): """Remove the persistent cache file (if it exists).""" try: os.remove(self.fn(self.FN_CACHE)) except OSError as error: if error.errno != errno.ENOENT: raise error
def _remove_persistent_cache_file(self): """Remove the persistent cache file (if it exists).""" try: os.remove(self.fn(self.FN_CACHE)) except OSError as error: if error.errno != errno.ENOENT: raise error
Python
def update_cache(self): """Update the persistent state point cache. This function updates a persistent state point cache, which is stored in the project root directory. Most data space operations, including iteration and filtering or selection are expected to be significantly faster after calling this function, especially for large data spaces. """ logger.info("Update cache...") start = time.time() cache = self._read_cache() cached_ids = set(self._sp_cache) self._update_in_memory_cache() if cache is None or set(cache) != cached_ids: fn_cache = self.fn(self.FN_CACHE) fn_cache_tmp = fn_cache + "~" try: with gzip.open(fn_cache_tmp, "wb") as cachefile: cachefile.write(json.dumps(self._sp_cache).encode()) except OSError: # clean-up try: os.remove(fn_cache_tmp) except OSError: pass raise else: os.replace(fn_cache_tmp, fn_cache) delta = time.time() - start logger.info(f"Updated cache in {delta:.3f} seconds.") return len(self._sp_cache) else: logger.info("Cache is up to date.")
def update_cache(self): """Update the persistent state point cache. This function updates a persistent state point cache, which is stored in the project root directory. Most data space operations, including iteration and filtering or selection are expected to be significantly faster after calling this function, especially for large data spaces. """ logger.info("Update cache...") start = time.time() cache = self._read_cache() cached_ids = set(self._sp_cache) self._update_in_memory_cache() if cache is None or set(cache) != cached_ids: fn_cache = self.fn(self.FN_CACHE) fn_cache_tmp = fn_cache + "~" try: with gzip.open(fn_cache_tmp, "wb") as cachefile: cachefile.write(json.dumps(self._sp_cache).encode()) except OSError: # clean-up try: os.remove(fn_cache_tmp) except OSError: pass raise else: os.replace(fn_cache_tmp, fn_cache) delta = time.time() - start logger.info(f"Updated cache in {delta:.3f} seconds.") return len(self._sp_cache) else: logger.info("Cache is up to date.")
Python
def _read_cache(self): """Read the persistent state point cache (if available).""" logger.debug("Reading cache...") start = time.time() try: with gzip.open(self.fn(self.FN_CACHE), "rb") as cachefile: cache = json.loads(cachefile.read().decode()) self._sp_cache.update(cache) except OSError as error: if error.errno != errno.ENOENT: raise logger.debug("No cache file found.") else: delta = time.time() - start logger.debug(f"Read cache in {delta:.3f} seconds.") return cache
def _read_cache(self): """Read the persistent state point cache (if available).""" logger.debug("Reading cache...") start = time.time() try: with gzip.open(self.fn(self.FN_CACHE), "rb") as cachefile: cache = json.loads(cachefile.read().decode()) self._sp_cache.update(cache) except OSError as error: if error.errno != errno.ENOENT: raise logger.debug("No cache file found.") else: delta = time.time() - start logger.debug(f"Read cache in {delta:.3f} seconds.") return cache
Python
def create_access_module(self, filename=None, main=True, master=None): """Create the access module for indexing. This method generates the access module required to make this project's index part of a main index. Parameters ---------- filename : str The name of the access module file. Defaults to the standard name and should usually not be changed. main : bool If True, add directives for the compilation of a master index when executing the module (Default value = True). master : bool Deprecated parameter. Replaced by main. Returns ------- str Access module name. """ if master is not None: warnings.warn( "The parameter master has been renamed to main.", DeprecationWarning ) main = master if filename is None: filename = os.path.join(self.root_directory(), MainCrawler.FN_ACCESS_MODULE) with open(filename, "x") as file: if main: file.write(ACCESS_MODULE_MAIN) else: file.write(ACCESS_MODULE_MINIMAL) if main: mode = os.stat(filename).st_mode | stat.S_IEXEC os.chmod(filename, mode) logger.info(f"Created access module file '{filename}'.") return filename
def create_access_module(self, filename=None, main=True, master=None): """Create the access module for indexing. This method generates the access module required to make this project's index part of a main index. Parameters ---------- filename : str The name of the access module file. Defaults to the standard name and should usually not be changed. main : bool If True, add directives for the compilation of a master index when executing the module (Default value = True). master : bool Deprecated parameter. Replaced by main. Returns ------- str Access module name. """ if master is not None: warnings.warn( "The parameter master has been renamed to main.", DeprecationWarning ) main = master if filename is None: filename = os.path.join(self.root_directory(), MainCrawler.FN_ACCESS_MODULE) with open(filename, "x") as file: if main: file.write(ACCESS_MODULE_MAIN) else: file.write(ACCESS_MODULE_MINIMAL) if main: mode = os.stat(filename).st_mode | stat.S_IEXEC os.chmod(filename, mode) logger.info(f"Created access module file '{filename}'.") return filename
Python
def temporary_project(self, name=None, dir=None): """Context manager for the initialization of a temporary project. The temporary project is by default created within the root project's workspace to ensure that they share the same file system. This is an example for how this method can be used for the import and synchronization of external data spaces. .. code-block:: python with project.temporary_project() as tmp_project: tmp_project.import_from('/data') project.sync(tmp_project) Parameters ---------- name : str An optional name for the temporary project. Defaults to a unique random string. dir : str Optionally specify where the temporary project root directory is to be created. Defaults to the project's workspace directory. Returns ------- :class:`~signac.Project` An instance of :class:`~signac.Project`. """ if name is None: name = os.path.join(self.id, str(uuid.uuid4())) if dir is None: dir = self.workspace() _mkdir_p(self.workspace()) # ensure workspace exists with TemporaryProject(name=name, cls=type(self), dir=dir) as tmp_project: yield tmp_project
def temporary_project(self, name=None, dir=None): """Context manager for the initialization of a temporary project. The temporary project is by default created within the root project's workspace to ensure that they share the same file system. This is an example for how this method can be used for the import and synchronization of external data spaces. .. code-block:: python with project.temporary_project() as tmp_project: tmp_project.import_from('/data') project.sync(tmp_project) Parameters ---------- name : str An optional name for the temporary project. Defaults to a unique random string. dir : str Optionally specify where the temporary project root directory is to be created. Defaults to the project's workspace directory. Returns ------- :class:`~signac.Project` An instance of :class:`~signac.Project`. """ if name is None: name = os.path.join(self.id, str(uuid.uuid4())) if dir is None: dir = self.workspace() _mkdir_p(self.workspace()) # ensure workspace exists with TemporaryProject(name=name, cls=type(self), dir=dir) as tmp_project: yield tmp_project
Python
def init_project(cls, name, root=None, workspace=None, make_dir=True): """Initialize a project with the given name. It is safe to call this function multiple times with the same arguments. However, a `RuntimeError` is raised if an existing project configuration would conflict with the provided initialization parameters. See :ref:`signac init <signac-cli-init>` for the command line equivalent. Parameters ---------- name : str The name of the project to initialize. root : str The root directory for the project. Defaults to the current working directory. workspace : str The workspace directory for the project. Defaults to a subdirectory ``workspace`` in the project root. make_dir : bool Create the project root directory if it does not exist yet (Default value = True). Returns ------- :class:`~signac.Project` Initialized project, an instance of :class:`~signac.Project`. Raises ------ RuntimeError If the project root path already contains a conflicting project configuration. """ if root is None: root = os.getcwd() try: project = cls.get_project(root=root, search=False) except LookupError: fn_config = os.path.join(root, "signac.rc") if make_dir: _mkdir_p(os.path.dirname(fn_config)) config = get_config(fn_config) config["project"] = name if workspace is not None: config["workspace_dir"] = workspace config["schema_version"] = SCHEMA_VERSION config.write() project = cls.get_project(root=root) assert project.id == str(name) return project else: try: assert project.id == str(name) if workspace is not None: assert os.path.realpath(workspace) == os.path.realpath( project.workspace() ) return project except AssertionError: raise RuntimeError( "Failed to initialize project '{}'. Path '{}' already " "contains a conflicting project configuration.".format( name, os.path.abspath(root) ) )
def init_project(cls, name, root=None, workspace=None, make_dir=True): """Initialize a project with the given name. It is safe to call this function multiple times with the same arguments. However, a `RuntimeError` is raised if an existing project configuration would conflict with the provided initialization parameters. See :ref:`signac init <signac-cli-init>` for the command line equivalent. Parameters ---------- name : str The name of the project to initialize. root : str The root directory for the project. Defaults to the current working directory. workspace : str The workspace directory for the project. Defaults to a subdirectory ``workspace`` in the project root. make_dir : bool Create the project root directory if it does not exist yet (Default value = True). Returns ------- :class:`~signac.Project` Initialized project, an instance of :class:`~signac.Project`. Raises ------ RuntimeError If the project root path already contains a conflicting project configuration. """ if root is None: root = os.getcwd() try: project = cls.get_project(root=root, search=False) except LookupError: fn_config = os.path.join(root, "signac.rc") if make_dir: _mkdir_p(os.path.dirname(fn_config)) config = get_config(fn_config) config["project"] = name if workspace is not None: config["workspace_dir"] = workspace config["schema_version"] = SCHEMA_VERSION config.write() project = cls.get_project(root=root) assert project.id == str(name) return project else: try: assert project.id == str(name) if workspace is not None: assert os.path.realpath(workspace) == os.path.realpath( project.workspace() ) return project except AssertionError: raise RuntimeError( "Failed to initialize project '{}'. Path '{}' already " "contains a conflicting project configuration.".format( name, os.path.abspath(root) ) )
Python
def TemporaryProject(name=None, cls=None, **kwargs): r"""Context manager for the generation of a temporary project. This is a factory function that creates a Project within a temporary directory and must be used as context manager, for example like this: .. code-block:: python with TemporaryProject() as tmp_project: tmp_project.import_from('/data') Parameters ---------- name : str An optional name for the temporary project. Defaults to a unique random string. cls : The class of the temporary project. Defaults to :class:`~signac.Project`. \*\*kwargs : Optional keyword arguments that are forwarded to the TemporaryDirectory class constructor, which is used to create a temporary root directory. Yields ------ :class:`~signac.Project` An instance of :class:`~signac.Project`. """ if name is None: name = str(uuid.uuid4()) if cls is None: cls = Project with TemporaryDirectory(**kwargs) as tmp_dir: yield cls.init_project(name=name, root=tmp_dir)
def TemporaryProject(name=None, cls=None, **kwargs): r"""Context manager for the generation of a temporary project. This is a factory function that creates a Project within a temporary directory and must be used as context manager, for example like this: .. code-block:: python with TemporaryProject() as tmp_project: tmp_project.import_from('/data') Parameters ---------- name : str An optional name for the temporary project. Defaults to a unique random string. cls : The class of the temporary project. Defaults to :class:`~signac.Project`. \*\*kwargs : Optional keyword arguments that are forwarded to the TemporaryDirectory class constructor, which is used to create a temporary root directory. Yields ------ :class:`~signac.Project` An instance of :class:`~signac.Project`. """ if name is None: name = str(uuid.uuid4()) if cls is None: cls = Project with TemporaryDirectory(**kwargs) as tmp_dir: yield cls.init_project(name=name, root=tmp_dir)
Python
def _strip_prefix(key): """Strip the prefix, if it is present. Implicit and explicit sp prefixes are equivalent and can be treated identically for this purpose. """ return key.split(".", 1)[-1]
def _strip_prefix(key): """Strip the prefix, if it is present. Implicit and explicit sp prefixes are equivalent and can be treated identically for this purpose. """ return key.split(".", 1)[-1]
Python
def export_to(self, target, path=None, copytree=None): """Export all jobs to a target location, such as a directory or a (zipped) archive file. See Also -------- :meth:`~signac.Project.export_to` : For full details on how to use this function. Parameters ---------- target : str A path to a directory or archive file to export to. path : str or callable The path (function) used to structure the exported data space (Default value = None). copytree : callable The function used for copying of directory tree structures. Defaults to :func:`shutil.copytree`. Can only be used when the target is a directory (Default value = None). Returns ------- dict A dictionary that maps the source directory paths to the target directory paths. """ from .import_export import export_jobs return dict( export_jobs(jobs=list(self), target=target, path=path, copytree=copytree) )
def export_to(self, target, path=None, copytree=None): """Export all jobs to a target location, such as a directory or a (zipped) archive file. See Also -------- :meth:`~signac.Project.export_to` : For full details on how to use this function. Parameters ---------- target : str A path to a directory or archive file to export to. path : str or callable The path (function) used to structure the exported data space (Default value = None). copytree : callable The function used for copying of directory tree structures. Defaults to :func:`shutil.copytree`. Can only be used when the target is a directory (Default value = None). Returns ------- dict A dictionary that maps the source directory paths to the target directory paths. """ from .import_export import export_jobs return dict( export_jobs(jobs=list(self), target=target, path=path, copytree=copytree) )
Python
def to_dataframe( self, sp_prefix="sp.", doc_prefix="doc.", usecols=None, flatten=False ): """Convert the selection of jobs to a pandas :class:`~pandas.DataFrame`. This function exports the job metadata to a :py:class:`pandas.DataFrame`. All state point and document keys are prefixed by default to be able to distinguish them. Parameters ---------- sp_prefix : str, optional Prefix state point keys with the given string. Defaults to "sp.". doc_prefix : str, optional Prefix document keys with the given string. Defaults to "doc.". usecols : list-like or callable, optional Used to select a subset of columns. If list-like, must contain strings corresponding to the column names that should be included. For example, ``['sp.a', 'doc.notes']``. If callable, the column will be included if the function called on the column name returns True. For example, ``lambda x: 'sp.' in x``. Defaults to ``None``, which uses all columns from the state point and document. Note that this filter is applied *after* the doc and sp prefixes are added to the column names. flatten : bool, optional Whether nested state points or document keys should be flattened. If True, ``{'a': {'b': 'c'}}`` becomes a column named ``a.b`` with value ``c``. If False, it becomes a column named ``a`` with value ``{'b': 'c'}``. Defaults to ``False``. Returns ------- :class:`~pandas.DataFrame` A pandas DataFrame with all job metadata. """ import pandas if usecols is None: def usecols(column): return True elif not callable(usecols): included_columns = set(usecols) def usecols(column): return column in included_columns def _flatten(d): return dict(_nested_dicts_to_dotted_keys(d)) if flatten else d def _export_sp_and_doc(job): """Prefix and filter state point and document keys. Parameters ---------- job : :class:`~signac.contrib.job.Job` The job instance. Yields ------ tuple tuple with prefixed state point or document key and values. """ for key, value in _flatten(job.statepoint).items(): prefixed_key = sp_prefix + key if usecols(prefixed_key): yield prefixed_key, value for key, value in _flatten(job.doc).items(): prefixed_key = doc_prefix + key if usecols(prefixed_key): yield prefixed_key, value return pandas.DataFrame.from_dict( data={job.id: dict(_export_sp_and_doc(job)) for job in self}, orient="index", ).infer_objects()
def to_dataframe( self, sp_prefix="sp.", doc_prefix="doc.", usecols=None, flatten=False ): """Convert the selection of jobs to a pandas :class:`~pandas.DataFrame`. This function exports the job metadata to a :py:class:`pandas.DataFrame`. All state point and document keys are prefixed by default to be able to distinguish them. Parameters ---------- sp_prefix : str, optional Prefix state point keys with the given string. Defaults to "sp.". doc_prefix : str, optional Prefix document keys with the given string. Defaults to "doc.". usecols : list-like or callable, optional Used to select a subset of columns. If list-like, must contain strings corresponding to the column names that should be included. For example, ``['sp.a', 'doc.notes']``. If callable, the column will be included if the function called on the column name returns True. For example, ``lambda x: 'sp.' in x``. Defaults to ``None``, which uses all columns from the state point and document. Note that this filter is applied *after* the doc and sp prefixes are added to the column names. flatten : bool, optional Whether nested state points or document keys should be flattened. If True, ``{'a': {'b': 'c'}}`` becomes a column named ``a.b`` with value ``c``. If False, it becomes a column named ``a`` with value ``{'b': 'c'}``. Defaults to ``False``. Returns ------- :class:`~pandas.DataFrame` A pandas DataFrame with all job metadata. """ import pandas if usecols is None: def usecols(column): return True elif not callable(usecols): included_columns = set(usecols) def usecols(column): return column in included_columns def _flatten(d): return dict(_nested_dicts_to_dotted_keys(d)) if flatten else d def _export_sp_and_doc(job): """Prefix and filter state point and document keys. Parameters ---------- job : :class:`~signac.contrib.job.Job` The job instance. Yields ------ tuple tuple with prefixed state point or document key and values. """ for key, value in _flatten(job.statepoint).items(): prefixed_key = sp_prefix + key if usecols(prefixed_key): yield prefixed_key, value for key, value in _flatten(job.doc).items(): prefixed_key = doc_prefix + key if usecols(prefixed_key): yield prefixed_key, value return pandas.DataFrame.from_dict( data={job.id: dict(_export_sp_and_doc(job)) for job in self}, orient="index", ).infer_objects()
Python
def _export_sp_and_doc(job): """Prefix and filter state point and document keys. Parameters ---------- job : :class:`~signac.contrib.job.Job` The job instance. Yields ------ tuple tuple with prefixed state point or document key and values. """ for key, value in _flatten(job.statepoint).items(): prefixed_key = sp_prefix + key if usecols(prefixed_key): yield prefixed_key, value for key, value in _flatten(job.doc).items(): prefixed_key = doc_prefix + key if usecols(prefixed_key): yield prefixed_key, value
def _export_sp_and_doc(job): """Prefix and filter state point and document keys. Parameters ---------- job : :class:`~signac.contrib.job.Job` The job instance. Yields ------ tuple tuple with prefixed state point or document key and values. """ for key, value in _flatten(job.statepoint).items(): prefixed_key = sp_prefix + key if usecols(prefixed_key): yield prefixed_key, value for key, value in _flatten(job.doc).items(): prefixed_key = doc_prefix + key if usecols(prefixed_key): yield prefixed_key, value
Python
def _repr_html_jobs(self): """Jobs representation as HTML. Returns ------- str HTML representation of jobs. """ html = "" len_self = len(self) try: if len_self > 100: raise RuntimeError # too large if self._use_pandas_for_html_repr: import pandas else: raise RuntimeError except ImportError: warnings.warn("Install pandas for a pretty representation of jobs.") html += f"<br/><strong>{len_self}</strong> job(s) found" except RuntimeError: html += f"<br/><strong>{len_self}</strong> job(s) found" else: with pandas.option_context("display.max_rows", 20): html += self.to_dataframe()._repr_html_() return html
def _repr_html_jobs(self): """Jobs representation as HTML. Returns ------- str HTML representation of jobs. """ html = "" len_self = len(self) try: if len_self > 100: raise RuntimeError # too large if self._use_pandas_for_html_repr: import pandas else: raise RuntimeError except ImportError: warnings.warn("Install pandas for a pretty representation of jobs.") html += f"<br/><strong>{len_self}</strong> job(s) found" except RuntimeError: html += f"<br/><strong>{len_self}</strong> job(s) found" else: with pandas.option_context("display.max_rows", 20): html += self.to_dataframe()._repr_html_() return html
Python
def _repr_html_(self): """Return an HTML representation of JobsCursor. Returns ------- str HTML representation of jobs. """ return repr(self) + self._repr_html_jobs()
def _repr_html_(self): """Return an HTML representation of JobsCursor. Returns ------- str HTML representation of jobs. """ return repr(self) + self._repr_html_jobs()
Python
def init_project(name, root=None, workspace=None, make_dir=True): """Initialize a project with the given name. It is safe to call this function multiple times with the same arguments. However, a `RuntimeError` is raised if an existing project configuration would conflict with the provided initialization parameters. Parameters ---------- name : str The name of the project to initialize. root : str The root directory for the project. Defaults to the current working directory. workspace : str The workspace directory for the project. Defaults to a subdirectory ``workspace`` in the project root. make_dir : bool Create the project root directory, if it does not exist yet (Default value = True). Returns ------- :class:`~signac.Project` The initialized project instance. Raises ------ RuntimeError If the project root path already contains a conflicting project configuration. """ return Project.init_project( name=name, root=root, workspace=workspace, make_dir=make_dir )
def init_project(name, root=None, workspace=None, make_dir=True): """Initialize a project with the given name. It is safe to call this function multiple times with the same arguments. However, a `RuntimeError` is raised if an existing project configuration would conflict with the provided initialization parameters. Parameters ---------- name : str The name of the project to initialize. root : str The root directory for the project. Defaults to the current working directory. workspace : str The workspace directory for the project. Defaults to a subdirectory ``workspace`` in the project root. make_dir : bool Create the project root directory, if it does not exist yet (Default value = True). Returns ------- :class:`~signac.Project` The initialized project instance. Raises ------ RuntimeError If the project root path already contains a conflicting project configuration. """ return Project.init_project( name=name, root=root, workspace=workspace, make_dir=make_dir )
Python
def _h5set(store, grp, key, value, path=None): """Set a key in an h5py container. This method recursively converts Mappings to h5py groups and transparently handles None values. """ import h5py import numpy # h5py depends on numpy, so this is safe. path = path + "/" + key if path else key # Guard against assigning a group to itself, e.g., `h5s[key] = h5s[key]`, # where h5s[key] is a mapping. This is necessary, because the original # mapping would be deleted prior to assignment. if key in grp: if isinstance(value, H5Group): if grp[key] == value._group: return # Groups are identical, do nothing. elif isinstance(value, h5py._hl.dataset.Dataset): if grp == value.parent: return # Dataset is identical, do nothing. # Delete any existing data del grp[key] # Mapping-types if isinstance(value, Mapping): subgrp = grp.create_group(key) for k, v in value.items(): _h5set(store, subgrp, k, v, path) # Regular built-in types: elif value is None: grp.create_dataset(key, data=None, shape=None, dtype="f") elif isinstance(value, (int, float, str, bool, array.array)): grp[key] = value elif isinstance(value, bytes): grp[key] = numpy.bytes_(value) # NumPy types elif type(value).__module__ == numpy.__name__: grp[key] = value # h5py native types elif isinstance(value, h5py._hl.dataset.Dataset): grp[key] = value # Creates hard-link! # Other types else: _load_pandas() # might be a pandas type if _is_pandas_type(value): _requires_tables() store.close() with _pandas.HDFStore(store._filename, mode="a") as store_: store_[path] = value store.open() else: grp[key] = value warnings.warn( "Storage for object of type '{}' appears to have succeeded, but this " "type is not officially supported!".format(type(value)) )
def _h5set(store, grp, key, value, path=None): """Set a key in an h5py container. This method recursively converts Mappings to h5py groups and transparently handles None values. """ import h5py import numpy # h5py depends on numpy, so this is safe. path = path + "/" + key if path else key # Guard against assigning a group to itself, e.g., `h5s[key] = h5s[key]`, # where h5s[key] is a mapping. This is necessary, because the original # mapping would be deleted prior to assignment. if key in grp: if isinstance(value, H5Group): if grp[key] == value._group: return # Groups are identical, do nothing. elif isinstance(value, h5py._hl.dataset.Dataset): if grp == value.parent: return # Dataset is identical, do nothing. # Delete any existing data del grp[key] # Mapping-types if isinstance(value, Mapping): subgrp = grp.create_group(key) for k, v in value.items(): _h5set(store, subgrp, k, v, path) # Regular built-in types: elif value is None: grp.create_dataset(key, data=None, shape=None, dtype="f") elif isinstance(value, (int, float, str, bool, array.array)): grp[key] = value elif isinstance(value, bytes): grp[key] = numpy.bytes_(value) # NumPy types elif type(value).__module__ == numpy.__name__: grp[key] = value # h5py native types elif isinstance(value, h5py._hl.dataset.Dataset): grp[key] = value # Creates hard-link! # Other types else: _load_pandas() # might be a pandas type if _is_pandas_type(value): _requires_tables() store.close() with _pandas.HDFStore(store._filename, mode="a") as store_: store_[path] = value store.open() else: grp[key] = value warnings.warn( "Storage for object of type '{}' appears to have succeeded, but this " "type is not officially supported!".format(type(value)) )
Python
def _h5get(store, grp, key, path=None): """Retrieve the underlying data for a key from its h5py container.""" import h5py path = path + "/" + key if path else key result = grp[key] if _group_is_pandas_type(result): _load_pandas() _requires_tables() grp.file.flush() # The store must be closed for pandas to open it safely, but first we # copy the filename since it is not accessible after closing the file. # The pandas data is returned by copy, so the HDFStore can be closed. # Then we re-open the store. filename = grp.file.filename store.close() with _pandas.HDFStore(filename, mode="r") as store_: data = store_[path] store.open() return data try: shape = result.shape if shape is None: return None elif shape: return result elif ( h5py.version.version_tuple.major >= 3 and h5py.check_dtype(vlen=result.dtype) is str ): # h5py >=3.0.0 returns strings as bytes. This returns str for # consistency with past behavior in signac. return result.asstr()[()] else: return result[()] except AttributeError: if isinstance(result, MutableMapping): return H5Group(store, path) else: return result
def _h5get(store, grp, key, path=None): """Retrieve the underlying data for a key from its h5py container.""" import h5py path = path + "/" + key if path else key result = grp[key] if _group_is_pandas_type(result): _load_pandas() _requires_tables() grp.file.flush() # The store must be closed for pandas to open it safely, but first we # copy the filename since it is not accessible after closing the file. # The pandas data is returned by copy, so the HDFStore can be closed. # Then we re-open the store. filename = grp.file.filename store.close() with _pandas.HDFStore(filename, mode="r") as store_: data = store_[path] store.open() return data try: shape = result.shape if shape is None: return None elif shape: return result elif ( h5py.version.version_tuple.major >= 3 and h5py.check_dtype(vlen=result.dtype) is str ): # h5py >=3.0.0 returns strings as bytes. This returns str for # consistency with past behavior in signac. return result.asstr()[()] else: return result[()] except AttributeError: if isinstance(result, MutableMapping): return H5Group(store, path) else: return result
Python
def open(self, mode=None): """Open the underlying HDF5 file. :param mode: The file open mode to use. Defaults to 'a' (append). :returns: This H5Store instance. """ if mode is None: mode = self._kwargs.get("mode", "a") return self._open(mode=mode)
def open(self, mode=None): """Open the underlying HDF5 file. :param mode: The file open mode to use. Defaults to 'a' (append). :returns: This H5Store instance. """ if mode is None: mode = self._kwargs.get("mode", "a") return self._open(mode=mode)
Python
def close(self): """Close the underlying HDF5 file.""" locked = True try: self._file.close() self._file = None except AttributeError: locked = False finally: if locked: try: self._thread_lock.release() except RuntimeError as error: if "cannot release un-acquired lock" not in str(error): raise
def close(self): """Close the underlying HDF5 file.""" locked = True try: self._file.close() self._file = None except AttributeError: locked = False finally: if locked: try: self._thread_lock.release() except RuntimeError as error: if "cannot release un-acquired lock" not in str(error): raise
Python
def file(self): """Access the underlying instance of h5py.File. This property exposes the underlying ``h5py.File`` object enabling use of functions such as ``create_dataset()`` or ``requires_dataset()``. .. note:: The store must be open to access this property! :returns: The ``h5py`` file-object that this store is operating on. :rtype: ``h5py.File`` :raises H5StoreClosedError: When the store is closed at the time of accessing this property. """ if self._file is None: raise H5StoreClosedError(self._filename) else: return self._file
def file(self): """Access the underlying instance of h5py.File. This property exposes the underlying ``h5py.File`` object enabling use of functions such as ``create_dataset()`` or ``requires_dataset()``. .. note:: The store must be open to access this property! :returns: The ``h5py`` file-object that this store is operating on. :rtype: ``h5py.File`` :raises H5StoreClosedError: When the store is closed at the time of accessing this property. """ if self._file is None: raise H5StoreClosedError(self._filename) else: return self._file
Python
def flush(self): """Flush the underlying HDF5 file.""" if self._file is None: raise H5StoreClosedError(self._filename) else: self._file.flush()
def flush(self): """Flush the underlying HDF5 file.""" if self._file is None: raise H5StoreClosedError(self._filename) else: self._file.flush()
Python
def _validate_key(key): """Emit a warning or raise an exception if key is invalid. Returns key.""" if "." in key: raise InvalidKeyError("Keys for the H5Store may not contain dots ('.').") return key
def _validate_key(key): """Emit a warning or raise an exception if key is invalid. Returns key.""" if "." in key: raise InvalidKeyError("Keys for the H5Store may not contain dots ('.').") return key
Python
def clear(self): """Remove all data from this store. .. danger:: All data will be removed, this action cannot be reversed! """ with _ensure_open(self): self._file.clear()
def clear(self): """Remove all data from this store. .. danger:: All data will be removed, this action cannot be reversed! """ with _ensure_open(self): self._file.clear()
Python
def _validate_key(key): """Emit a warning or raise an exception if key is invalid. Returns key.""" if "." in key: raise InvalidKeyError( "Keys for the H5StoreManager may not contain dots ('.')." ) return key
def _validate_key(key): """Emit a warning or raise an exception if key is invalid. Returns key.""" if "." in key: raise InvalidKeyError( "Keys for the H5StoreManager may not contain dots ('.')." ) return key
Python
def variant_str(s): """ Converts a string to a GLib.Variant """ if not isinstance(s, str): raise TypeError('Only strings are supported for scalars') return GLib.Variant('s', s)
def variant_str(s): """ Converts a string to a GLib.Variant """ if not isinstance(s, str): raise TypeError('Only strings are supported for scalars') return GLib.Variant('s', s)
Python
def variant_bool(b): """ Converts a boolean to a GLib.Varant """ if not isinstance(b, bool): raise TypeError('Only booleans are supported') return GLib.Variant('b', b)
def variant_bool(b): """ Converts a boolean to a GLib.Varant """ if not isinstance(b, bool): raise TypeError('Only booleans are supported') return GLib.Variant('b', b)
Python
def variant_list(l): """ Converts a list to a GLib.Variant """ l_variant = list() for item in l: if item is None: item = '' l_variant.append(ModulemdUtil.python_to_variant(item)) return GLib.Variant('av', l_variant)
def variant_list(l): """ Converts a list to a GLib.Variant """ l_variant = list() for item in l: if item is None: item = '' l_variant.append(ModulemdUtil.python_to_variant(item)) return GLib.Variant('av', l_variant)
Python
def variant_dict(d): """ Converts a dictionary to a dictionary of GLib.Variant """ if not isinstance(d, dict): raise TypeError('Only dictionaries are supported for mappings') d_variant = ModulemdUtil.dict_values(d) return GLib.Variant('a{sv}', d_variant)
def variant_dict(d): """ Converts a dictionary to a dictionary of GLib.Variant """ if not isinstance(d, dict): raise TypeError('Only dictionaries are supported for mappings') d_variant = ModulemdUtil.dict_values(d) return GLib.Variant('a{sv}', d_variant)
Python
def dict_values(d): """ Converts each dictionary value to a GLib.Variant """ if not isinstance(d, dict): raise TypeError('Only dictionaries are supported for mappings') d_variant = dict() for k, v in d.items(): if v is None: v = '' d_variant[k] = ModulemdUtil.python_to_variant(v) return d_variant
def dict_values(d): """ Converts each dictionary value to a GLib.Variant """ if not isinstance(d, dict): raise TypeError('Only dictionaries are supported for mappings') d_variant = dict() for k, v in d.items(): if v is None: v = '' d_variant[k] = ModulemdUtil.python_to_variant(v) return d_variant
Python
def sync_db(domain, username, restore_as=None): """Call Formplayer API to force a sync for a user.""" user = CouchUser.get_by_username(username) assert user.is_member_of(domain, allow_mirroring=True) user_id = user.user_id use_livequery = FORMPLAYER_USE_LIVEQUERY.enabled(domain) data = { 'action': 'sync-db', 'username': username, 'domain': domain, 'restoreAs': restore_as, 'useLiveQuery': use_livequery, } response_json = _post_data(data, user_id) if not response_json.get("status") == "accepted": raise FormplayerResponseException(response_json)
def sync_db(domain, username, restore_as=None): """Call Formplayer API to force a sync for a user.""" user = CouchUser.get_by_username(username) assert user.is_member_of(domain, allow_mirroring=True) user_id = user.user_id use_livequery = FORMPLAYER_USE_LIVEQUERY.enabled(domain) data = { 'action': 'sync-db', 'username': username, 'domain': domain, 'restoreAs': restore_as, 'useLiveQuery': use_livequery, } response_json = _post_data(data, user_id) if not response_json.get("status") == "accepted": raise FormplayerResponseException(response_json)
Python
def sso_saml_metadata(request, idp_slug): """ Returns XML with SAML 2.0 Metadata as the Service Provider (SP). Often referred to as the SP Identifier or SP Entity ID in the Identity Provider's Documentation. """ saml_settings = OneLogin_Saml2_Settings(get_saml2_config(request.idp)) metadata = saml_settings.get_sp_metadata() errors = saml_settings.validate_metadata(metadata) if len(errors) == 0: resp = HttpResponse(content=metadata, content_type='text/xml') else: resp = HttpResponseServerError(content=', '.join(errors)) return resp
def sso_saml_metadata(request, idp_slug): """ Returns XML with SAML 2.0 Metadata as the Service Provider (SP). Often referred to as the SP Identifier or SP Entity ID in the Identity Provider's Documentation. """ saml_settings = OneLogin_Saml2_Settings(get_saml2_config(request.idp)) metadata = saml_settings.get_sp_metadata() errors = saml_settings.validate_metadata(metadata) if len(errors) == 0: resp = HttpResponse(content=metadata, content_type='text/xml') else: resp = HttpResponseServerError(content=', '.join(errors)) return resp
Python
def sso_saml_acs(request, idp_slug): """ ACS stands for "Assertion Consumer Service". The Identity Provider will send its response to this view after authenticating a user. This is often referred to as the "Entity ID" in the IdP's Service Provider configuration. In this view we verify the received SAML 2.0 response and then log in the user to CommCare HQ. """ # todo these are placeholders for the json dump below error_reason = None request_session_data = None saml_relay = None request_id = request.session.get('AuthNRequestID') processed_response = request.saml2_auth.process_response(request_id=request_id) errors = request.saml2_auth.get_errors() not_auth_warn = not request.saml2_auth.is_authenticated() if not errors: if 'AuthNRequestID' in request.session: del request.session['AuthNRequestID'] request.session['samlUserdata'] = request.saml2_auth.get_attributes() request.session['samlNameId'] = request.saml2_auth.get_nameid() request.session['samlNameIdFormat'] = request.saml2_auth.get_nameid_format() request.session['samlNameIdNameQualifier'] = request.saml2_auth.get_nameid_nq() request.session['samlNameIdSPNameQualifier'] = request.saml2_auth.get_nameid_spnq() request.session['samlSessionIndex'] = request.saml2_auth.get_session_index() # todo for debugging purposes to dump into the response below request_session_data = { "samlUserdata": request.session['samlUserdata'], "samlNameId": request.session['samlNameId'], "samlNameIdFormat": request.session['samlNameIdFormat'], "samlNameIdNameQualifier": request.session['samlNameIdNameQualifier'], "samlNameIdSPNameQualifier": request.session['samlNameIdSPNameQualifier'], "samlSessionIndex": request.session['samlSessionIndex'], } # todo redirect here? saml_relay = OneLogin_Saml2_Utils.get_self_url(request.saml2_request_data) # todo this is the point where we would initiate a django auth session else: error_reason = request.saml2_auth.get_last_error_reason() return HttpResponse(json.dumps({ "errors": errors, "error_reason": error_reason, "not_auth_warn": not_auth_warn, "request_id": request_id, "processed_response": processed_response, "saml_relay": saml_relay, "request_session_data": request_session_data, }), 'text/json')
def sso_saml_acs(request, idp_slug): """ ACS stands for "Assertion Consumer Service". The Identity Provider will send its response to this view after authenticating a user. This is often referred to as the "Entity ID" in the IdP's Service Provider configuration. In this view we verify the received SAML 2.0 response and then log in the user to CommCare HQ. """ # todo these are placeholders for the json dump below error_reason = None request_session_data = None saml_relay = None request_id = request.session.get('AuthNRequestID') processed_response = request.saml2_auth.process_response(request_id=request_id) errors = request.saml2_auth.get_errors() not_auth_warn = not request.saml2_auth.is_authenticated() if not errors: if 'AuthNRequestID' in request.session: del request.session['AuthNRequestID'] request.session['samlUserdata'] = request.saml2_auth.get_attributes() request.session['samlNameId'] = request.saml2_auth.get_nameid() request.session['samlNameIdFormat'] = request.saml2_auth.get_nameid_format() request.session['samlNameIdNameQualifier'] = request.saml2_auth.get_nameid_nq() request.session['samlNameIdSPNameQualifier'] = request.saml2_auth.get_nameid_spnq() request.session['samlSessionIndex'] = request.saml2_auth.get_session_index() # todo for debugging purposes to dump into the response below request_session_data = { "samlUserdata": request.session['samlUserdata'], "samlNameId": request.session['samlNameId'], "samlNameIdFormat": request.session['samlNameIdFormat'], "samlNameIdNameQualifier": request.session['samlNameIdNameQualifier'], "samlNameIdSPNameQualifier": request.session['samlNameIdSPNameQualifier'], "samlSessionIndex": request.session['samlSessionIndex'], } # todo redirect here? saml_relay = OneLogin_Saml2_Utils.get_self_url(request.saml2_request_data) # todo this is the point where we would initiate a django auth session else: error_reason = request.saml2_auth.get_last_error_reason() return HttpResponse(json.dumps({ "errors": errors, "error_reason": error_reason, "not_auth_warn": not_auth_warn, "request_id": request_id, "processed_response": processed_response, "saml_relay": saml_relay, "request_session_data": request_session_data, }), 'text/json')
Python
def sso_saml_sls(request, idp_slug): """ SLS stands for Single Logout Service. This view is responsible for handling a logout response from the Identity Provider. """ # todo these are placeholders for the json dump below error_reason = None success_slo = False attributes = False saml_user_data_present = False request_id = request.session.get('LogoutRequestID') url = request.saml2_auth.process_slo( request_id=request_id, delete_session_cb=lambda: request.session.flush() ) errors = request.saml2_auth.get_errors() if len(errors) == 0: if url is not None: return HttpResponseRedirect(url) else: success_slo = True elif request.saml2_auth.get_settings().is_debug_active(): error_reason = request.saml2_auth.get_last_error_reason() # todo what's below is a debugging placeholder if 'samlUserdata' in request.session: saml_user_data_present = True if len(request.session['samlUserdata']) > 0: attributes = request.session['samlUserdata'].items() return HttpResponse(json.dumps({ "errors": errors, "error_reason": error_reason, "success_slo": success_slo, "attributes": attributes, "saml_user_data_present": saml_user_data_present, }), 'text/json')
def sso_saml_sls(request, idp_slug): """ SLS stands for Single Logout Service. This view is responsible for handling a logout response from the Identity Provider. """ # todo these are placeholders for the json dump below error_reason = None success_slo = False attributes = False saml_user_data_present = False request_id = request.session.get('LogoutRequestID') url = request.saml2_auth.process_slo( request_id=request_id, delete_session_cb=lambda: request.session.flush() ) errors = request.saml2_auth.get_errors() if len(errors) == 0: if url is not None: return HttpResponseRedirect(url) else: success_slo = True elif request.saml2_auth.get_settings().is_debug_active(): error_reason = request.saml2_auth.get_last_error_reason() # todo what's below is a debugging placeholder if 'samlUserdata' in request.session: saml_user_data_present = True if len(request.session['samlUserdata']) > 0: attributes = request.session['samlUserdata'].items() return HttpResponse(json.dumps({ "errors": errors, "error_reason": error_reason, "success_slo": success_slo, "attributes": attributes, "saml_user_data_present": saml_user_data_present, }), 'text/json')
Python
def sso_saml_login(request, idp_slug): """ This view initiates a SAML 2.0 login request with the Identity Provider. """ return HttpResponseRedirect(request.saml2_auth.login())
def sso_saml_login(request, idp_slug): """ This view initiates a SAML 2.0 login request with the Identity Provider. """ return HttpResponseRedirect(request.saml2_auth.login())
Python
def sso_saml_logout(request, idp_slug): """ This view initiates a SAML 2.0 logout request with the Identity Provider. """ return HttpResponseRedirect(request.saml2_auth.logout( name_id=request.session.get('samlNameId'), session_index=request.session.get('samlSessionIndex'), nq=request.session.get('samlNameIdNameQualifier'), name_id_format=request.session.get('samlNameIdFormat'), spnq=request.session.get('samlNameIdSPNameQualifier') ))
def sso_saml_logout(request, idp_slug): """ This view initiates a SAML 2.0 logout request with the Identity Provider. """ return HttpResponseRedirect(request.saml2_auth.logout( name_id=request.session.get('samlNameId'), session_index=request.session.get('samlSessionIndex'), nq=request.session.get('samlNameIdNameQualifier'), name_id_format=request.session.get('samlNameIdFormat'), spnq=request.session.get('samlNameIdSPNameQualifier') ))
Python
def handle_shadow_child_modules(app, shadow_parent): """Creates or deletes shadow child modules if the parent module requires Used primarily when changing the "source module id" of a shadow module """ changes = False if shadow_parent.shadow_module_version == 1: # For old-style shadow modules, we don't create any child-shadows return False if not shadow_parent.source_module_id: return False # if the source module is a child, but not a shadow, then the shadow should have the same parent source = app.get_module_by_unique_id(shadow_parent.source_module_id) if source.root_module_id != shadow_parent.root_module_id: shadow_parent.root_module_id = source.root_module_id changes = True source_module_children = [ m for m in app.get_modules() if m.root_module_id == shadow_parent.source_module_id and m.module_type != 'shadow' ] shadow_parent_children = [ m for m in app.get_modules() if m.root_module_id == shadow_parent.unique_id ] # Delete unneeded modules for child in shadow_parent_children: if child.source_module_id not in source_module_children: changes = True app.delete_module(child.unique_id) # Add new modules for source_child in source_module_children: changes = True new_shadow = ShadowModule.new_module(source_child.default_name(app=app), app.default_language) new_shadow.source_module_id = source_child.unique_id # ModuleBase properties new_shadow.module_filter = source_child.module_filter new_shadow.put_in_root = source_child.put_in_root new_shadow.root_module_id = shadow_parent.unique_id new_shadow.fixture_select = deepcopy(source_child.fixture_select) new_shadow.report_context_tile = source_child.report_context_tile new_shadow.auto_select_case = source_child.auto_select_case new_shadow.is_training_module = source_child.is_training_module # ShadowModule properties for prop, to_deepcopy in SHADOW_MODULE_PROPERTIES_TO_COPY: new_value = getattr(source_child, prop) setattr(new_shadow, prop, deepcopy(new_value) if to_deepcopy else new_value) # move excluded form ids source_child_form_ids = set( f.unique_id for f in app.get_module_by_unique_id(new_shadow.source_module_id).get_forms() ) new_shadow.excluded_form_ids = list(set(shadow_parent.excluded_form_ids) & source_child_form_ids) shadow_parent.excluded_form_ids = list(set(shadow_parent.excluded_form_ids) - source_child_form_ids) app.add_module(new_shadow) if changes: app.move_child_modules_after_parents() app.save() return changes
def handle_shadow_child_modules(app, shadow_parent): """Creates or deletes shadow child modules if the parent module requires Used primarily when changing the "source module id" of a shadow module """ changes = False if shadow_parent.shadow_module_version == 1: # For old-style shadow modules, we don't create any child-shadows return False if not shadow_parent.source_module_id: return False # if the source module is a child, but not a shadow, then the shadow should have the same parent source = app.get_module_by_unique_id(shadow_parent.source_module_id) if source.root_module_id != shadow_parent.root_module_id: shadow_parent.root_module_id = source.root_module_id changes = True source_module_children = [ m for m in app.get_modules() if m.root_module_id == shadow_parent.source_module_id and m.module_type != 'shadow' ] shadow_parent_children = [ m for m in app.get_modules() if m.root_module_id == shadow_parent.unique_id ] # Delete unneeded modules for child in shadow_parent_children: if child.source_module_id not in source_module_children: changes = True app.delete_module(child.unique_id) # Add new modules for source_child in source_module_children: changes = True new_shadow = ShadowModule.new_module(source_child.default_name(app=app), app.default_language) new_shadow.source_module_id = source_child.unique_id # ModuleBase properties new_shadow.module_filter = source_child.module_filter new_shadow.put_in_root = source_child.put_in_root new_shadow.root_module_id = shadow_parent.unique_id new_shadow.fixture_select = deepcopy(source_child.fixture_select) new_shadow.report_context_tile = source_child.report_context_tile new_shadow.auto_select_case = source_child.auto_select_case new_shadow.is_training_module = source_child.is_training_module # ShadowModule properties for prop, to_deepcopy in SHADOW_MODULE_PROPERTIES_TO_COPY: new_value = getattr(source_child, prop) setattr(new_shadow, prop, deepcopy(new_value) if to_deepcopy else new_value) # move excluded form ids source_child_form_ids = set( f.unique_id for f in app.get_module_by_unique_id(new_shadow.source_module_id).get_forms() ) new_shadow.excluded_form_ids = list(set(shadow_parent.excluded_form_ids) & source_child_form_ids) shadow_parent.excluded_form_ids = list(set(shadow_parent.excluded_form_ids) - source_child_form_ids) app.add_module(new_shadow) if changes: app.move_child_modules_after_parents() app.save() return changes
Python
def _handle_response(self, response): """Raises ``ApiException`` if needed, or returns response JSON obj Status codes * 401 = Invalid API key or rate limit quota violation * 400 = Invalid URL parameter """ if response.status_code == 200: return self.__success(response) elif response.status_code == 401: self.__fault(response) elif response.status_code == 400: self.__error(response) else: self.__unknown_error(response)
def _handle_response(self, response): """Raises ``ApiException`` if needed, or returns response JSON obj Status codes * 401 = Invalid API key or rate limit quota violation * 400 = Invalid URL parameter """ if response.status_code == 200: return self.__success(response) elif response.status_code == 401: self.__fault(response) elif response.status_code == 400: self.__error(response) else: self.__unknown_error(response)
Python
def __error(response): """HTTP status code 400, or something with 'errors' object""" rj = response.json() error = namedtuple('error', ['code', 'detail', 'href']) errors = [ error(err['code'], err['detail'], err['_links']['about']['href']) for err in rj['errors'] ] log.error('URL: {}\nErrors: {}'.format(response.url, errors)) raise ApiException(response.status_code, errors, response.url)
def __error(response): """HTTP status code 400, or something with 'errors' object""" rj = response.json() error = namedtuple('error', ['code', 'detail', 'href']) errors = [ error(err['code'], err['detail'], err['_links']['about']['href']) for err in rj['errors'] ] log.error('URL: {}\nErrors: {}'.format(response.url, errors)) raise ApiException(response.status_code, errors, response.url)
Python
def __fault(response): """HTTP status code 401, or something with 'faults' object""" rj = response.json() fault_str = rj['fault']['faultstring'] detail = rj['fault']['detail'] log.error('URL: {}, Faultstr: {}'.format(response.url, fault_str)) raise ApiException( response.status_code, fault_str, detail, response.url )
def __fault(response): """HTTP status code 401, or something with 'faults' object""" rj = response.json() fault_str = rj['fault']['faultstring'] detail = rj['fault']['detail'] log.error('URL: {}, Faultstr: {}'.format(response.url, fault_str)) raise ApiException( response.status_code, fault_str, detail, response.url )
Python
def __unknown_error(self, response): """Unexpected HTTP status code (not 200, 400, or 401)""" rj = response.json() if 'fault' in rj: self.__fault(response) elif 'errors' in rj: self.__error(response) else: raise ApiException(response.status_code, response.text)
def __unknown_error(self, response): """Unexpected HTTP status code (not 200, 400, or 401)""" rj = response.json() if 'fault' in rj: self.__fault(response) elif 'errors' in rj: self.__error(response) else: raise ApiException(response.status_code, response.text)
Python
def _parse_link(self, link): """Parses link into base URL and dict of parameters""" parsed_link = namedtuple('link', ['url', 'params']) link_url, link_params = link.split('?') params = self._link_params(link_params) return parsed_link(link_url, params)
def _parse_link(self, link): """Parses link into base URL and dict of parameters""" parsed_link = namedtuple('link', ['url', 'params']) link_url, link_params = link.split('?') params = self._link_params(link_params) return parsed_link(link_url, params)
Python
def _link_params(self, param_str): """Parse URL parameters from href split on '?' character""" search_params = {} params = parse.parse_qs(param_str) for k, v in params.items(): search_params[k] = v[0] search_params.update(self.api_key) return search_params
def _link_params(self, param_str): """Parse URL parameters from href split on '?' character""" search_params = {} params = parse.parse_qs(param_str) for k, v in params.items(): search_params[k] = v[0] search_params.update(self.api_key) return search_params
Python
def _assign_links(obj, json_obj, base_url=None): """Assigns ``links`` attribute to an object from JSON""" # Normal link strucutre is {link_name: {'href': url}}, # but some responses also have lists of other models. # API occasionally returns bad URLs (with {&sort} and similar) json_links = json_obj.get('_links') if not json_links: obj.links = {} else: obj_links = {} for k, v in json_links.items(): if 'href' in v: href = re.sub("({.+})", "", v['href']) if base_url: href = "{}{}".format(base_url, href) obj_links[k] = href else: obj_links[k] = v obj.links = obj_links
def _assign_links(obj, json_obj, base_url=None): """Assigns ``links`` attribute to an object from JSON""" # Normal link strucutre is {link_name: {'href': url}}, # but some responses also have lists of other models. # API occasionally returns bad URLs (with {&sort} and similar) json_links = json_obj.get('_links') if not json_links: obj.links = {} else: obj_links = {} for k, v in json_links.items(): if 'href' in v: href = re.sub("({.+})", "", v['href']) if base_url: href = "{}{}".format(base_url, href) obj_links[k] = href else: obj_links[k] = v obj.links = obj_links
Python
def from_json(json_obj): """Instantiate and return a Page(list)""" pg = Page() pg.json = json_obj _assign_links(pg, json_obj, ticketpy.ApiClient.root_url) pg.number = json_obj['page']['number'] pg.size = json_obj['page']['size'] pg.total_pages = json_obj['page']['totalPages'] pg.total_elements = json_obj['page']['totalElements'] embedded = json_obj.get('_embedded') if not embedded: return pg object_models = { 'events': Event, 'venues': Venue, 'attractions': Attraction, 'classifications': Classification } for k, v in embedded.items(): if k in object_models: obj_type = object_models[k] pg += [obj_type.from_json(obj) for obj in v] return pg
def from_json(json_obj): """Instantiate and return a Page(list)""" pg = Page() pg.json = json_obj _assign_links(pg, json_obj, ticketpy.ApiClient.root_url) pg.number = json_obj['page']['number'] pg.size = json_obj['page']['size'] pg.total_pages = json_obj['page']['totalPages'] pg.total_elements = json_obj['page']['totalElements'] embedded = json_obj.get('_embedded') if not embedded: return pg object_models = { 'events': Event, 'venues': Venue, 'attractions': Attraction, 'classifications': Classification } for k, v in embedded.items(): if k in object_models: obj_type = object_models[k] pg += [obj_type.from_json(obj) for obj in v] return pg
Python
def _get(self, keyword=None, entity_id=None, sort=None, include_test=None, page=None, size=None, locale=None, **kwargs): """Basic API search request, with only the parameters common to all search functions. Specific searches pass theirs through **kwargs. :param keyword: Keyword to search on :param entity_id: ID of the object type (such as an event ID...) :param sort: Sort method :param include_test: ['yes', 'no', 'only'] to include test objects in results. Default: *no* :param page: Page to return (default: 0) :param size: Page size (default: 20) :param locale: Locale (default: *en*) :param kwargs: Additional search parameters :return: """ # Combine universal parameters and supplied kwargs into single dict, # then map our parameter names to the ones expected by the API and # make the final request search_args = dict(kwargs) search_args.update({ 'keyword': keyword, 'id': entity_id, 'sort': sort, 'include_test': include_test, 'page': page, 'size': size, 'locale': locale }) params = self._search_params(**search_args) return self.__get(**params)
def _get(self, keyword=None, entity_id=None, sort=None, include_test=None, page=None, size=None, locale=None, **kwargs): """Basic API search request, with only the parameters common to all search functions. Specific searches pass theirs through **kwargs. :param keyword: Keyword to search on :param entity_id: ID of the object type (such as an event ID...) :param sort: Sort method :param include_test: ['yes', 'no', 'only'] to include test objects in results. Default: *no* :param page: Page to return (default: 0) :param size: Page size (default: 20) :param locale: Locale (default: *en*) :param kwargs: Additional search parameters :return: """ # Combine universal parameters and supplied kwargs into single dict, # then map our parameter names to the ones expected by the API and # make the final request search_args = dict(kwargs) search_args.update({ 'keyword': keyword, 'id': entity_id, 'sort': sort, 'include_test': include_test, 'page': page, 'size': size, 'locale': locale }) params = self._search_params(**search_args) return self.__get(**params)
Python
def by_id(self, entity_id): """Get a specific object by its ID""" get_tmpl = "{}/{}/{}" get_url = get_tmpl.format(self.api_client.url, self.method, entity_id) r = requests.get(get_url, params=self.api_client.api_key) r_json = self.api_client._handle_response(r) return self.model.from_json(r_json)
def by_id(self, entity_id): """Get a specific object by its ID""" get_tmpl = "{}/{}/{}" get_url = get_tmpl.format(self.api_client.url, self.method, entity_id) r = requests.get(get_url, params=self.api_client.api_key) r_json = self.api_client._handle_response(r) return self.model.from_json(r_json)
Python
def find(self, sort='date,asc', latlong=None, radius=None, unit=None, start_date_time=None, end_date_time=None, onsale_start_date_time=None, onsale_end_date_time=None, country_code=None, state_code=None, venue_id=None, attraction_id=None, segment_id=None, segment_name=None, classification_name=None, classification_id=None, market_id=None, promoter_id=None, dma_id=None, include_tba=None, include_tbd=None, client_visibility=None, keyword=None, event_id=None, source=None, include_test=None, page=None, size=None, locale=None, **kwargs): """Search for events matching given criteria. :param sort: Sorting order of search result (default: *'relevance,desc'*) :param latlong: Latitude/longitude filter :param radius: Radius of area to search :param unit: Unit of radius, 'miles' or 'km' (default: miles) :param start_date_time: Filter by start date/time. Timestamp format: *YYYY-MM-DDTHH:MM:SSZ* :param end_date_time: Filter by end date/time. Timestamp format: *YYYY-MM-DDTHH:MM:SSZ* :param onsale_start_date_time: :param onsale_end_date_time: :param country_code: :param state_code: State code (ex: 'GA' not 'Georgia') :param venue_id: Find events for provided venue ID :param attraction_id: :param segment_id: :param segment_name: :param classification_name: Filter events by a list of classification name(s) (genre/subgenre/type/subtype/segment) :param classification_id: :param market_id: :param promoter_id: :param dma_id: :param include_tba: True to include events with a to-be-announced date (['yes', 'no', 'only']) :param include_tbd: True to include an event with a date to be defined (['yes', 'no', 'only']) :param client_visibility: :param keyword: :param event_id: Event ID to search :param source: Filter entities by source name: ['ticketmaster', 'universe', 'frontgate', 'tmr'] :param include_test: 'yes' to include test entities in the response. False or 'no' to exclude. 'only' to return ONLY test entities. (['yes', 'no', 'only']) :param page: Page number to get (default: 0) :param size: Size of page (default: 20) :param locale: Locale (default: 'en') :return: """ return self._get(keyword, event_id, sort, include_test, page, size, locale, latlong=latlong, radius=radius, unit=unit, start_date_time=start_date_time, end_date_time=end_date_time, onsale_start_date_time=onsale_start_date_time, onsale_end_date_time=onsale_end_date_time, country_code=country_code, state_code=state_code, venue_id=venue_id, attraction_id=attraction_id, segment_id=segment_id, segment_name=segment_name, classification_name=classification_name, classification_id=classification_id, market_id=market_id, promoter_id=promoter_id, dma_id=dma_id, include_tba=include_tba, include_tbd=include_tbd, source=source, client_visibility=client_visibility, **kwargs)
def find(self, sort='date,asc', latlong=None, radius=None, unit=None, start_date_time=None, end_date_time=None, onsale_start_date_time=None, onsale_end_date_time=None, country_code=None, state_code=None, venue_id=None, attraction_id=None, segment_id=None, segment_name=None, classification_name=None, classification_id=None, market_id=None, promoter_id=None, dma_id=None, include_tba=None, include_tbd=None, client_visibility=None, keyword=None, event_id=None, source=None, include_test=None, page=None, size=None, locale=None, **kwargs): """Search for events matching given criteria. :param sort: Sorting order of search result (default: *'relevance,desc'*) :param latlong: Latitude/longitude filter :param radius: Radius of area to search :param unit: Unit of radius, 'miles' or 'km' (default: miles) :param start_date_time: Filter by start date/time. Timestamp format: *YYYY-MM-DDTHH:MM:SSZ* :param end_date_time: Filter by end date/time. Timestamp format: *YYYY-MM-DDTHH:MM:SSZ* :param onsale_start_date_time: :param onsale_end_date_time: :param country_code: :param state_code: State code (ex: 'GA' not 'Georgia') :param venue_id: Find events for provided venue ID :param attraction_id: :param segment_id: :param segment_name: :param classification_name: Filter events by a list of classification name(s) (genre/subgenre/type/subtype/segment) :param classification_id: :param market_id: :param promoter_id: :param dma_id: :param include_tba: True to include events with a to-be-announced date (['yes', 'no', 'only']) :param include_tbd: True to include an event with a date to be defined (['yes', 'no', 'only']) :param client_visibility: :param keyword: :param event_id: Event ID to search :param source: Filter entities by source name: ['ticketmaster', 'universe', 'frontgate', 'tmr'] :param include_test: 'yes' to include test entities in the response. False or 'no' to exclude. 'only' to return ONLY test entities. (['yes', 'no', 'only']) :param page: Page number to get (default: 0) :param size: Size of page (default: 20) :param locale: Locale (default: 'en') :return: """ return self._get(keyword, event_id, sort, include_test, page, size, locale, latlong=latlong, radius=radius, unit=unit, start_date_time=start_date_time, end_date_time=end_date_time, onsale_start_date_time=onsale_start_date_time, onsale_end_date_time=onsale_end_date_time, country_code=country_code, state_code=state_code, venue_id=venue_id, attraction_id=attraction_id, segment_id=segment_id, segment_name=segment_name, classification_name=classification_name, classification_id=classification_id, market_id=market_id, promoter_id=promoter_id, dma_id=dma_id, include_tba=include_tba, include_tbd=include_tbd, source=source, client_visibility=client_visibility, **kwargs)
Python
def by_location(self, latitude, longitude, radius='10', unit='miles', sort='relevance,desc', **kwargs): """Search events within a radius of a latitude/longitude coordinate. :param latitude: Latitude of radius center :param longitude: Longitude of radius center :param radius: Radius to search outside given latitude/longitude :param unit: Unit of radius ('miles' or 'km'), :param sort: Sort method. (Default: *relevance, desc*). If changed, you may get wonky results (*date, asc* returns far-away events) :return: List of events within that area """ latitude = str(latitude) longitude = str(longitude) radius = str(radius) latlong = "{lat},{long}".format(lat=latitude, long=longitude) return self.find( latlong=latlong, radius=radius, unit=unit, sort=sort, **kwargs )
def by_location(self, latitude, longitude, radius='10', unit='miles', sort='relevance,desc', **kwargs): """Search events within a radius of a latitude/longitude coordinate. :param latitude: Latitude of radius center :param longitude: Longitude of radius center :param radius: Radius to search outside given latitude/longitude :param unit: Unit of radius ('miles' or 'km'), :param sort: Sort method. (Default: *relevance, desc*). If changed, you may get wonky results (*date, asc* returns far-away events) :return: List of events within that area """ latitude = str(latitude) longitude = str(longitude) radius = str(radius) latlong = "{lat},{long}".format(lat=latitude, long=longitude) return self.find( latlong=latlong, radius=radius, unit=unit, sort=sort, **kwargs )
Python
def find(self, keyword=None, venue_id=None, sort=None, state_code=None, country_code=None, source=None, include_test=None, page=None, size=None, locale=None, **kwargs): """Search for venues matching provided parameters :param keyword: Keyword to search on (such as part of the venue name) :param venue_id: Venue ID :param sort: Sort method for response (API default: 'name,asc') :param state_code: Filter by state code (ex: 'GA' not 'Georgia') :param country_code: Filter by country code :param source: Filter entities by source (['ticketmaster', 'universe', 'frontgate', 'tmr']) :param include_test: ['yes', 'no', 'only'], whether to include entities flagged as test in the response (default: 'no') :param page: Page number (default: 0) :param size: Page size of the response (default: 20) :param locale: Locale (default: 'en') :return: Venues found matching criteria :rtype: ``ticketpy.PagedResponse`` """ return self._get(keyword, venue_id, sort, include_test, page, size, locale, state_code=state_code, country_code=country_code, source=source, **kwargs)
def find(self, keyword=None, venue_id=None, sort=None, state_code=None, country_code=None, source=None, include_test=None, page=None, size=None, locale=None, **kwargs): """Search for venues matching provided parameters :param keyword: Keyword to search on (such as part of the venue name) :param venue_id: Venue ID :param sort: Sort method for response (API default: 'name,asc') :param state_code: Filter by state code (ex: 'GA' not 'Georgia') :param country_code: Filter by country code :param source: Filter entities by source (['ticketmaster', 'universe', 'frontgate', 'tmr']) :param include_test: ['yes', 'no', 'only'], whether to include entities flagged as test in the response (default: 'no') :param page: Page number (default: 0) :param size: Page size of the response (default: 20) :param locale: Locale (default: 'en') :return: Venues found matching criteria :rtype: ``ticketpy.PagedResponse`` """ return self._get(keyword, venue_id, sort, include_test, page, size, locale, state_code=state_code, country_code=country_code, source=source, **kwargs)
Python
def by_name(self, venue_name, state_code=None, **kwargs): """Search for a venue by name. :param venue_name: Venue name to search :param state_code: Two-letter state code to narrow results (ex 'GA') (default: None) :return: List of venues found matching search criteria """ return self.find(keyword=venue_name, state_code=state_code, **kwargs)
def by_name(self, venue_name, state_code=None, **kwargs): """Search for a venue by name. :param venue_name: Venue name to search :param state_code: Two-letter state code to narrow results (ex 'GA') (default: None) :return: List of venues found matching search criteria """ return self.find(keyword=venue_name, state_code=state_code, **kwargs)
Python
def haversine(latlon1, latlon2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) Sourced from Stack Overflow: https://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points """ # convert decimal degrees to radians lat1 = float(latlon1['latitude']) lon1 = float(latlon1['longitude']) lat2 = float(latlon2['latitude']) lon2 = float(latlon2['longitude']) lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 3956 # Radius of earth in kilometers. Use 6371 for kilometers return c * r
def haversine(latlon1, latlon2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) Sourced from Stack Overflow: https://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points """ # convert decimal degrees to radians lat1 = float(latlon1['latitude']) lon1 = float(latlon1['longitude']) lat2 = float(latlon2['latitude']) lon2 = float(latlon2['longitude']) lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 c = 2 * asin(sqrt(a)) r = 3956 # Radius of earth in kilometers. Use 6371 for kilometers return c * r
Python
def process_staff_section(self, section, email_in_common=["[email protected]"]): ''' Unfortunately, none of the accessible directory sources for USGS personnel seem to have the link to USGS staff profile pages. The only location that I can find these is through the USGS web page at https://www.usgs.gov/connect/staff-profiles, and the only way to get at those data is through a web scraper. This function handles the process of extracting a usable data structure from the section on the pages that contain individual person listing. :param section: a BeautifulSoup4 data object containing the div for a given staff person listing from which we need to extract useful information :type section: bs4.element.Tag :return: dictionary containing the name, email, and profile (URL) for a person (email and profile will be returned with None if not found in the record ''' profile_page_link = section.find("a", href=self.profile_link_pattern) email_link = section.find("a", href=self.mailto_link_pattern) tel_link = section.find("a", href=self.tel_link_pattern) org_link = section.find("a", href=self.org_link_pattern) person_record = { "_date_cached": str(datetime.utcnow().isoformat()), "name": None, "title": None, "organization_name": None, "organization_link": None, "email": None, "profile": None, "telephone": None } if profile_page_link is not None: person_record["name"] = profile_page_link.text.replace("\t", "").strip() person_record["profile"] = f'{self.usgs_web_root}{profile_page_link["href"]}' else: person_record["profile"] = None name_container = section.find("h4", class_="field-content") if name_container is not None: person_record["name"] = name_container.text.replace("\t", "").strip() if org_link is not None: person_record["organization_name"] = org_link.text.replace("\t", "").strip() person_record["organization_link"] = org_link["href"].strip() if email_link is not None: person_record["email"] = email_link.text.replace("\t", "").strip().lower() if tel_link is not None: person_record["telephone"] = tel_link.text.replace("\t", "").strip() bolded_item = section.find("b") if bolded_item is not None: person_record["title"] = bolded_item.text.replace("\t", "").strip() return person_record
def process_staff_section(self, section, email_in_common=["[email protected]"]): ''' Unfortunately, none of the accessible directory sources for USGS personnel seem to have the link to USGS staff profile pages. The only location that I can find these is through the USGS web page at https://www.usgs.gov/connect/staff-profiles, and the only way to get at those data is through a web scraper. This function handles the process of extracting a usable data structure from the section on the pages that contain individual person listing. :param section: a BeautifulSoup4 data object containing the div for a given staff person listing from which we need to extract useful information :type section: bs4.element.Tag :return: dictionary containing the name, email, and profile (URL) for a person (email and profile will be returned with None if not found in the record ''' profile_page_link = section.find("a", href=self.profile_link_pattern) email_link = section.find("a", href=self.mailto_link_pattern) tel_link = section.find("a", href=self.tel_link_pattern) org_link = section.find("a", href=self.org_link_pattern) person_record = { "_date_cached": str(datetime.utcnow().isoformat()), "name": None, "title": None, "organization_name": None, "organization_link": None, "email": None, "profile": None, "telephone": None } if profile_page_link is not None: person_record["name"] = profile_page_link.text.replace("\t", "").strip() person_record["profile"] = f'{self.usgs_web_root}{profile_page_link["href"]}' else: person_record["profile"] = None name_container = section.find("h4", class_="field-content") if name_container is not None: person_record["name"] = name_container.text.replace("\t", "").strip() if org_link is not None: person_record["organization_name"] = org_link.text.replace("\t", "").strip() person_record["organization_link"] = org_link["href"].strip() if email_link is not None: person_record["email"] = email_link.text.replace("\t", "").strip().lower() if tel_link is not None: person_record["telephone"] = tel_link.text.replace("\t", "").strip() bolded_item = section.find("b") if bolded_item is not None: person_record["title"] = bolded_item.text.replace("\t", "").strip() return person_record
Python
def scrape_profile(self, page_url): ''' Unfortunately, there is no current programmatic way of getting at USGS staff profile pages, where at least some staff have put significant effort into rounding out their available online information. For some, these pages represent the best personally managed location to get at published works and other details. For now, the most interesting bits from these pages include a self-asserted list of "expertise" keywords drawn from the USGS Thesaurus along with a body section containing variable content. This function collects the expertise keywords for further analysis, pulls out links from the body (which can be compared with other sources), and shoves the body text as a whole into the data for further processing. :param page_url: URL to the profile page that can be used as a unique key :return: dictionary containing the url, list of expertise keywords (if available), list of links (text and href) values in dictionaries, and the full body html as a string ''' r = requests.get(page_url) if r.status_code != 200: return {"url": page_url, "error": f"Status-code: {r.status_code}"} soup = BeautifulSoup(r.content, 'html.parser') profile_page_data = { "profile_id": hashlib.md5(page_url.encode('utf-8')).hexdigest(), "profile": page_url, "_date_cached": datetime.utcnow().isoformat(), "content_size": sys.getsizeof(r.content), "display_name": None, "title": None, "description": None, "profile_image_url": None, "organization_name": None, "organization_link": None, "email": None, "orcid": None, "body_content_links": list(), "expertise": list() } title_section = soup.find("h2", class_="staff-profile-title") if title_section is not None: title_text = title_section.text.strip() if title_text: profile_page_data["title"] = title_text profile_lead = soup.find('div', class_="lead") if profile_lead is not None: description_text = profile_lead.text.strip() if description_text: profile_page_data["description"] = description_text expertise_section = soup.find("section", class_="staff-expertise") if expertise_section is not None: profile_page_data["expertise"] = [ t.text.strip() for t in expertise_section.findAll("a", href=self.expertise_link_pattern) ] profile_body_content = soup.find("div", class_="usgs-body") if profile_body_content is not None: profile_page_data["scraped_body_html"] = profile_body_content.decompose() link_list = profile_body_content.findAll("a") if link_list is not None: for link in link_list: try: profile_page_data["body_content_links"].append({ "link_text": link.text, "link_href": link["href"] }) except Exception as e: print(e) continue display_name_container = soup.find("div", class_="full-width col-sm-12") if display_name_container is not None: display_name_container_inner = display_name_container.find("h1", class_="page-header") if display_name_container_inner is not None: profile_page_data["display_name"] = display_name_container_inner.text email_container = soup.find("div", class_="email") if email_container is not None: email_link = email_container.find("a", href=self.mailto_link_pattern) if email_link is not None: email_string = email_link.text.lower().strip() if validators.email(email_string): profile_page_data["email"] = email_string else: profile_page_data["email"] = None organization_container = soup.find("h3", class_="staff-profile-subtitle h4") if organization_container is not None: organization_link_container = organization_container.find("a") if organization_link_container is not None: profile_page_data["organization_link"] = organization_link_container["href"].strip() profile_page_data["organization_name"] = organization_link_container.text.strip() profile_image = soup.find("img", class_='staff-profile-image') if profile_image is not None: profile_page_data["profile_image_url"] = profile_image["src"] orcid_link = soup.find("a", href=self.orcid_link_pattern) if orcid_link is not None: check_id = utilities.actionable_id(orcid_link.text) if check_id is not None and "orcid" in check_id: profile_page_data["orcid"] = check_id["orcid"] other_pubs_container = soup.find( "div", class_="entity entity-field-collection-item field-collection-item-field-non-usgs-publication clearfix" ) if other_pubs_container is not None: profile_page_data["body_content_links"].extend([ { "link_text": l.text, "link_href": l["href"] } for l in other_pubs_container.findAll("a") ]) return profile_page_data
def scrape_profile(self, page_url): ''' Unfortunately, there is no current programmatic way of getting at USGS staff profile pages, where at least some staff have put significant effort into rounding out their available online information. For some, these pages represent the best personally managed location to get at published works and other details. For now, the most interesting bits from these pages include a self-asserted list of "expertise" keywords drawn from the USGS Thesaurus along with a body section containing variable content. This function collects the expertise keywords for further analysis, pulls out links from the body (which can be compared with other sources), and shoves the body text as a whole into the data for further processing. :param page_url: URL to the profile page that can be used as a unique key :return: dictionary containing the url, list of expertise keywords (if available), list of links (text and href) values in dictionaries, and the full body html as a string ''' r = requests.get(page_url) if r.status_code != 200: return {"url": page_url, "error": f"Status-code: {r.status_code}"} soup = BeautifulSoup(r.content, 'html.parser') profile_page_data = { "profile_id": hashlib.md5(page_url.encode('utf-8')).hexdigest(), "profile": page_url, "_date_cached": datetime.utcnow().isoformat(), "content_size": sys.getsizeof(r.content), "display_name": None, "title": None, "description": None, "profile_image_url": None, "organization_name": None, "organization_link": None, "email": None, "orcid": None, "body_content_links": list(), "expertise": list() } title_section = soup.find("h2", class_="staff-profile-title") if title_section is not None: title_text = title_section.text.strip() if title_text: profile_page_data["title"] = title_text profile_lead = soup.find('div', class_="lead") if profile_lead is not None: description_text = profile_lead.text.strip() if description_text: profile_page_data["description"] = description_text expertise_section = soup.find("section", class_="staff-expertise") if expertise_section is not None: profile_page_data["expertise"] = [ t.text.strip() for t in expertise_section.findAll("a", href=self.expertise_link_pattern) ] profile_body_content = soup.find("div", class_="usgs-body") if profile_body_content is not None: profile_page_data["scraped_body_html"] = profile_body_content.decompose() link_list = profile_body_content.findAll("a") if link_list is not None: for link in link_list: try: profile_page_data["body_content_links"].append({ "link_text": link.text, "link_href": link["href"] }) except Exception as e: print(e) continue display_name_container = soup.find("div", class_="full-width col-sm-12") if display_name_container is not None: display_name_container_inner = display_name_container.find("h1", class_="page-header") if display_name_container_inner is not None: profile_page_data["display_name"] = display_name_container_inner.text email_container = soup.find("div", class_="email") if email_container is not None: email_link = email_container.find("a", href=self.mailto_link_pattern) if email_link is not None: email_string = email_link.text.lower().strip() if validators.email(email_string): profile_page_data["email"] = email_string else: profile_page_data["email"] = None organization_container = soup.find("h3", class_="staff-profile-subtitle h4") if organization_container is not None: organization_link_container = organization_container.find("a") if organization_link_container is not None: profile_page_data["organization_link"] = organization_link_container["href"].strip() profile_page_data["organization_name"] = organization_link_container.text.strip() profile_image = soup.find("img", class_='staff-profile-image') if profile_image is not None: profile_page_data["profile_image_url"] = profile_image["src"] orcid_link = soup.find("a", href=self.orcid_link_pattern) if orcid_link is not None: check_id = utilities.actionable_id(orcid_link.text) if check_id is not None and "orcid" in check_id: profile_page_data["orcid"] = check_id["orcid"] other_pubs_container = soup.find( "div", class_="entity entity-field-collection-item field-collection-item-field-non-usgs-publication clearfix" ) if other_pubs_container is not None: profile_page_data["body_content_links"].extend([ { "link_text": l.text, "link_href": l["href"] } for l in other_pubs_container.findAll("a") ]) return profile_page_data
Python
def entity_from_doi(doi_doc): ''' Processes a single DOI record retrieved via content negotiation into a flat summarized structure for processing into a graph or simple index. ''' if "error" in doi_doc: return summary_doc = { "doi": doi_doc["DOI"], "name": doi_doc["title"], "url": doi_doc["URL"], "publisher": None, "date_qualifier": doi_doc["_date"] } if "publisher" in doi_doc: summary_doc["publisher"] = doi_doc["publisher"] if "issued" in doi_doc and isinstance(doi_doc["issued"]["date-parts"], list) and len(doi_doc["issued"]["date-parts"]) == 1: issued_year = doi_doc["issued"]["date-parts"][0][0] if issued_year is None: summary_doc["year_published"] = None else: summary_doc["year_published"] = str(issued_year) if doi_doc["type"] == "dataset": summary_doc["entity_type"] = "Dataset" else: summary_doc["entity_type"] = "CreativeWork" if "abstract" in doi_doc: summary_doc["description"] = doi_doc["abstract"] if "container-title" in doi_doc: if summary_doc["publisher"] == "US Geological Survey": summary_doc["journal"] = f"USGS {doi_doc['container-title']}" else: summary_doc["journal"] = doi_doc['container-title'] if "event" in doi_doc: summary_doc["event"] = doi_doc["event"] return summary_doc
def entity_from_doi(doi_doc): ''' Processes a single DOI record retrieved via content negotiation into a flat summarized structure for processing into a graph or simple index. ''' if "error" in doi_doc: return summary_doc = { "doi": doi_doc["DOI"], "name": doi_doc["title"], "url": doi_doc["URL"], "publisher": None, "date_qualifier": doi_doc["_date"] } if "publisher" in doi_doc: summary_doc["publisher"] = doi_doc["publisher"] if "issued" in doi_doc and isinstance(doi_doc["issued"]["date-parts"], list) and len(doi_doc["issued"]["date-parts"]) == 1: issued_year = doi_doc["issued"]["date-parts"][0][0] if issued_year is None: summary_doc["year_published"] = None else: summary_doc["year_published"] = str(issued_year) if doi_doc["type"] == "dataset": summary_doc["entity_type"] = "Dataset" else: summary_doc["entity_type"] = "CreativeWork" if "abstract" in doi_doc: summary_doc["description"] = doi_doc["abstract"] if "container-title" in doi_doc: if summary_doc["publisher"] == "US Geological Survey": summary_doc["journal"] = f"USGS {doi_doc['container-title']}" else: summary_doc["journal"] = doi_doc['container-title'] if "event" in doi_doc: summary_doc["event"] = doi_doc["event"] return summary_doc
Python
def lookup_orcid(orcid, return_errors=False): ''' This function handles the process of fetching a given ORCID using content negotiation to return the JSON-LD structure from ORCID data. It checks for a number of error conditions and will either pass on those cases or return the errors for further consideration in a processing pipeline. ''' identifiers = utilities.actionable_id(orcid) if identifiers is None: if return_errors: return {"orcid": orcid, "error": "Not a valid ORCID identifier"} else: return try: r = requests.get(identifiers["url"], headers={"accept": "application/ld+json"}) if r.status_code != 200: if return_errors: return {"orcid": orcid, "error": f"HTTP Status Code: {str(r.status_code)}"} else: return else: raw_doc = r.json() except Exception as e: if return_errors: return {"orcid": orcid, "error": e} else: return if "givenName" not in raw_doc or "familyName" not in raw_doc: if return_errors: return {"orcid": orcid, "error": "Either givenName or familyName are missing from the ORCID record, and therefore it is unusable at this time."} else: return raw_doc["_date_cached"] = str(datetime.utcnow().isoformat()) raw_doc["orcid"] = raw_doc["@id"].split("/")[-1] return raw_doc
def lookup_orcid(orcid, return_errors=False): ''' This function handles the process of fetching a given ORCID using content negotiation to return the JSON-LD structure from ORCID data. It checks for a number of error conditions and will either pass on those cases or return the errors for further consideration in a processing pipeline. ''' identifiers = utilities.actionable_id(orcid) if identifiers is None: if return_errors: return {"orcid": orcid, "error": "Not a valid ORCID identifier"} else: return try: r = requests.get(identifiers["url"], headers={"accept": "application/ld+json"}) if r.status_code != 200: if return_errors: return {"orcid": orcid, "error": f"HTTP Status Code: {str(r.status_code)}"} else: return else: raw_doc = r.json() except Exception as e: if return_errors: return {"orcid": orcid, "error": e} else: return if "givenName" not in raw_doc or "familyName" not in raw_doc: if return_errors: return {"orcid": orcid, "error": "Either givenName or familyName are missing from the ORCID record, and therefore it is unusable at this time."} else: return raw_doc["_date_cached"] = str(datetime.utcnow().isoformat()) raw_doc["orcid"] = raw_doc["@id"].split("/")[-1] return raw_doc
Python
def wd_reference_uncertainty_factor(wd_reference_list): ''' Takes a given Wikidata reference set, finds any duplicate labels, and introduces an uncertainty factor (number of label occurrences) and a "see also" reference of the other Wikidata entities in the given set for follow up. ''' wd_reference_labels = [i["label"] for i in wd_reference_list] duplicate_labels = dict( (x, duplicates(wd_reference_labels, x)) for x in set(wd_reference_labels) if wd_reference_labels.count(x) > 1 ) for wd_ref in [i for i in wd_reference_list if i["label"] in duplicate_labels.keys()]: wd_ref.update( { "uncertainty_factor": len(duplicate_labels[wd_ref["label"]]), "uncertainty_see_also": [ i["identifier"] for index, i in enumerate(wd_reference_list) if index in duplicate_labels[wd_ref["label"]] and i["identifier"] != wd_ref["identifier"] ] } ) return wd_reference_list
def wd_reference_uncertainty_factor(wd_reference_list): ''' Takes a given Wikidata reference set, finds any duplicate labels, and introduces an uncertainty factor (number of label occurrences) and a "see also" reference of the other Wikidata entities in the given set for follow up. ''' wd_reference_labels = [i["label"] for i in wd_reference_list] duplicate_labels = dict( (x, duplicates(wd_reference_labels, x)) for x in set(wd_reference_labels) if wd_reference_labels.count(x) > 1 ) for wd_ref in [i for i in wd_reference_list if i["label"] in duplicate_labels.keys()]: wd_ref.update( { "uncertainty_factor": len(duplicate_labels[wd_ref["label"]]), "uncertainty_see_also": [ i["identifier"] for index, i in enumerate(wd_reference_list) if index in duplicate_labels[wd_ref["label"]] and i["identifier"] != wd_ref["identifier"] ] } ) return wd_reference_list
Python
def validate_block(self, data: str): """ Validates the data used against a template. This method returns nothing, and will only raise exceptions if there are errors :param data: The data used against the template. Expected a strigified JSON object :return: None """ template_doc = frappe.get_doc('Editorjs Template', self.name) _data = None try: # Parse the data field in the block _data: dict = frappe.parse_json(frappe.parse_json(data)) except JSONDecodeError: frappe.throw(_("Invalid Data while decoding")) # Check if the data is not None if _data is None or not isinstance(_data, dict): frappe.throw(_("Invalid Data")) else: _data_keys = _data.keys() # Get all the non nullable keys in the template non_nullable_template_keys = list(map(lambda x: x.key, list(filter(lambda x: not x.nullable, template_doc.data)))) # Check if all keys in data exist as required by the template keys_exist = all([x in _data_keys for x in non_nullable_template_keys]) if not keys_exist: frappe.throw( _("Some keys are missing. Keys required are: {template_keys}\n Provided keys are {data_keys}".format( template_keys=', '.join(non_nullable_template_keys), data_keys=_data.keys()))) # Get only fields that are part of the template all_template_keys = list(map(lambda x: x.key, template_doc.data)) # Validate the types of each value in the dict for k, v in _data.items(): if k in all_template_keys: self._check_type(k, v)
def validate_block(self, data: str): """ Validates the data used against a template. This method returns nothing, and will only raise exceptions if there are errors :param data: The data used against the template. Expected a strigified JSON object :return: None """ template_doc = frappe.get_doc('Editorjs Template', self.name) _data = None try: # Parse the data field in the block _data: dict = frappe.parse_json(frappe.parse_json(data)) except JSONDecodeError: frappe.throw(_("Invalid Data while decoding")) # Check if the data is not None if _data is None or not isinstance(_data, dict): frappe.throw(_("Invalid Data")) else: _data_keys = _data.keys() # Get all the non nullable keys in the template non_nullable_template_keys = list(map(lambda x: x.key, list(filter(lambda x: not x.nullable, template_doc.data)))) # Check if all keys in data exist as required by the template keys_exist = all([x in _data_keys for x in non_nullable_template_keys]) if not keys_exist: frappe.throw( _("Some keys are missing. Keys required are: {template_keys}\n Provided keys are {data_keys}".format( template_keys=', '.join(non_nullable_template_keys), data_keys=_data.keys()))) # Get only fields that are part of the template all_template_keys = list(map(lambda x: x.key, template_doc.data)) # Validate the types of each value in the dict for k, v in _data.items(): if k in all_template_keys: self._check_type(k, v)
Python
def _check_type(self, k: str, v: str): """ Throws an error if the types don't match. If there are no errors, nothing is returned :param k: The key within the data :param v: The value of the key :return: None """ types = list(filter(lambda x: x.key == k, self.data)) _type = types[0].get('type') _nullable = True if types[0].get('nullable') else False is_correct_type = (_nullable and v is None) or isinstance(v, self.type_dict.get(_type)) if not is_correct_type: frappe.throw(_( "Wrong type for key: {k}. Should be {correct_type} instead of {given_type}".format(k=k, correct_type=_type, given_type=type(v))))
def _check_type(self, k: str, v: str): """ Throws an error if the types don't match. If there are no errors, nothing is returned :param k: The key within the data :param v: The value of the key :return: None """ types = list(filter(lambda x: x.key == k, self.data)) _type = types[0].get('type') _nullable = True if types[0].get('nullable') else False is_correct_type = (_nullable and v is None) or isinstance(v, self.type_dict.get(_type)) if not is_correct_type: frappe.throw(_( "Wrong type for key: {k}. Should be {correct_type} instead of {given_type}".format(k=k, correct_type=_type, given_type=type(v))))
Python
def t_convert(self, t, time_format = "%m/%d %H:%M"): """Converting UNIX time to human readable time Args: t (int): UNIX timestamp time_format (str, optional): Date format. Defaults to "%m/%d %H:%M". Returns: str: Human readable time """ return datetime.utcfromtimestamp(t).strftime(time_format)
def t_convert(self, t, time_format = "%m/%d %H:%M"): """Converting UNIX time to human readable time Args: t (int): UNIX timestamp time_format (str, optional): Date format. Defaults to "%m/%d %H:%M". Returns: str: Human readable time """ return datetime.utcfromtimestamp(t).strftime(time_format)