sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _check_response_for_request_errors(self): """ Override this in each service module to check for errors that are specific to that module. For example, invalid tracking numbers in a Tracking request. """ if self.response.HighestSeverity == "ERROR": for notification in self.response.Notifications: if notification.Severity == "ERROR": raise FedexError(notification.Code, notification.Message)
Override this in each service module to check for errors that are specific to that module. For example, invalid tracking numbers in a Tracking request.
entailment
def _check_response_for_request_warnings(self): """ Override this in a service module to check for errors that are specific to that module. For example, changing state/province based on postal code in a Rate Service request. """ if self.response.HighestSeverity in ("NOTE", "WARNING"): for notification in self.response.Notifications: if notification.Severity in ("NOTE", "WARNING"): self.logger.warning(FedexFailure(notification.Code, notification.Message))
Override this in a service module to check for errors that are specific to that module. For example, changing state/province based on postal code in a Rate Service request.
entailment
def send_request(self, send_function=None): """ Sends the assembled request on the child object. @type send_function: function reference @keyword send_function: A function reference (passed without the parenthesis) to a function that will send the request. This allows for overriding the default function in cases such as validation requests. """ # Send the request and get the response back. try: # If the user has overridden the send function, use theirs # instead of the default. if send_function: # Follow the overridden function. self.response = send_function() else: # Default scenario, business as usual. self.response = self._assemble_and_send_request() except suds.WebFault as fault: # When this happens, throw an informative message reminding the # user to check all required variables, making sure they are # populated and valid raise SchemaValidationError(fault.fault) # Check the response for general Fedex errors/failures that aren't # specific to any given WSDL/request. self.__check_response_for_fedex_error() # Check the response for errors specific to the particular request. # This method can be overridden by a method on the child class object. self._check_response_for_request_errors() # Check the response for errors specific to the particular request. # This method can be overridden by a method on the child class object. self._check_response_for_request_warnings() # Debug output. (See Request and Response output) self.logger.debug("== FEDEX QUERY RESULT ==") self.logger.debug(self.response)
Sends the assembled request on the child object. @type send_function: function reference @keyword send_function: A function reference (passed without the parenthesis) to a function that will send the request. This allows for overriding the default function in cases such as validation requests.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ # We get an exception like this when specifying an IntegratorId: # suds.TypeNotFound: Type not found: 'IntegratorId' # Setting it to None does not seem to appease it. del self.ClientDetail.IntegratorId self.logger.debug(self.WebAuthenticationDetail) self.logger.debug(self.ClientDetail) self.logger.debug(self.TransactionDetail) self.logger.debug(self.VersionId) # Fire off the query. return self.client.service.addressValidation( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, InEffectAsOfTimestamp=datetime.datetime.now(), AddressesToValidate=self.AddressesToValidate)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def _prepare_wsdl_objects(self): """ This sets the package identifier information. This may be a tracking number or a few different things as per the Fedex spec. """ self.SelectionDetails = self.client.factory.create('TrackSelectionDetail') # Default to Fedex self.SelectionDetails.CarrierCode = 'FDXE' track_package_id = self.client.factory.create('TrackPackageIdentifier') # Default to tracking number. track_package_id.Type = 'TRACKING_NUMBER_OR_DOORTAG' self.SelectionDetails.PackageIdentifier = track_package_id
This sets the package identifier information. This may be a tracking number or a few different things as per the Fedex spec.
entailment
def _check_response_for_request_errors(self): """ Checks the response to see if there were any errors specific to this WSDL. """ if self.response.HighestSeverity == "ERROR": # pragma: no cover for notification in self.response.Notifications: if notification.Severity == "ERROR": if "Invalid tracking number" in notification.Message: raise FedexInvalidTrackingNumber( notification.Code, notification.Message) else: raise FedexError(notification.Code, notification.Message)
Checks the response to see if there were any errors specific to this WSDL.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ client = self.client # Fire off the query. return client.service.track( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, SelectionDetails=self.SelectionDetails, ProcessingOptions=self.ProcessingOptions)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def _prepare_wsdl_objects(self): """ This is the data that will be used to create your shipment. Create the data structure and get it ready for the WSDL request. """ # Default behavior is to not request transit information self.ReturnTransitAndCommit = False # This is the primary data structure for processShipment requests. self.RequestedShipment = self.client.factory.create('RequestedShipment') self.RequestedShipment.ShipTimestamp = datetime.datetime.now() # Defaults for TotalWeight wsdl object. total_weight = self.client.factory.create('Weight') # Start at nothing. total_weight.Value = 0.0 # Default to pounds. total_weight.Units = 'LB' # This is the total weight of the entire shipment. Shipments may # contain more than one package. self.RequestedShipment.TotalWeight = total_weight # This is the top level data structure for Shipper information. shipper = self.client.factory.create('Party') shipper.Address = self.client.factory.create('Address') shipper.Contact = self.client.factory.create('Contact') # Link the ShipperParty to our master data structure. self.RequestedShipment.Shipper = shipper # This is the top level data structure for Recipient information. recipient_party = self.client.factory.create('Party') recipient_party.Contact = self.client.factory.create('Contact') recipient_party.Address = self.client.factory.create('Address') # Link the RecipientParty object to our master data structure. self.RequestedShipment.Recipient = recipient_party # Make sender responsible for payment by default. self.RequestedShipment.ShippingChargesPayment = self.create_wsdl_object_of_type('Payment') self.RequestedShipment.ShippingChargesPayment.PaymentType = 'SENDER' # Start with no packages, user must add them. self.RequestedShipment.PackageCount = 0 self.RequestedShipment.RequestedPackageLineItems = [] # This is good to review if you'd like to see what the data structure # looks like. self.logger.debug(self.RequestedShipment)
This is the data that will be used to create your shipment. Create the data structure and get it ready for the WSDL request.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ # Fire off the query. return self.client.service.getRates( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, RequestedShipment=self.RequestedShipment, ReturnTransitAndCommit=self.ReturnTransitAndCommit)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def print_label(self, package_num=None): """ Prints all of a shipment's labels, or optionally just one. @type package_num: L{int} @param package_num: 0-based index of the package to print. This is only useful for shipments with more than one package. """ if package_num: packages = [ self.shipment.response.CompletedShipmentDetail.CompletedPackageDetails[package_num] ] else: packages = self.shipment.response.CompletedShipmentDetail.CompletedPackageDetails for package in packages: label_binary = binascii.a2b_base64(package.Label.Parts[0].Image) self._print_base64(label_binary)
Prints all of a shipment's labels, or optionally just one. @type package_num: L{int} @param package_num: 0-based index of the package to print. This is only useful for shipments with more than one package.
entailment
def _print_base64(self, base64_data): """ Pipe the binary directly to the label printer. Works under Linux without requiring PySerial. This is not typically something you should call directly, unless you have special needs. @type base64_data: L{str} @param base64_data: The base64 encoded string for the label to print. """ label_file = open(self.device, "w") label_file.write(base64_data) label_file.close()
Pipe the binary directly to the label printer. Works under Linux without requiring PySerial. This is not typically something you should call directly, unless you have special needs. @type base64_data: L{str} @param base64_data: The base64 encoded string for the label to print.
entailment
def _prepare_wsdl_objects(self): """ Create the data structure and get it ready for the WSDL request. """ # Service defaults for objects that are required. self.MultipleMatchesAction = 'RETURN_ALL' self.Constraints = self.create_wsdl_object_of_type('SearchLocationConstraints') self.Address = self.create_wsdl_object_of_type('Address') self.LocationsSearchCriterion = 'ADDRESS' self.SortDetail = self.create_wsdl_object_of_type('LocationSortDetail')
Create the data structure and get it ready for the WSDL request.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ # We get an exception like this when specifying an IntegratorId: # suds.TypeNotFound: Type not found: 'IntegratorId' # Setting it to None does not seem to appease it. del self.ClientDetail.IntegratorId self.logger.debug(self.WebAuthenticationDetail) self.logger.debug(self.ClientDetail) self.logger.debug(self.TransactionDetail) self.logger.debug(self.VersionId) # Fire off the query. return self.client.service.searchLocations( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, LocationsSearchCriterion=self.LocationsSearchCriterion, PhoneNumber=self.PhoneNumber, MultipleMatchesAction=self.MultipleMatchesAction, Constraints=self.Constraints, Address=self.Address, SortDetail=self.SortDetail)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def _check_response_for_request_errors(self): """ Checks the response to see if there were any errors specific to this WSDL. """ if self.response.HighestSeverity == "ERROR": for notification in self.response.Notifications: # pragma: no cover if notification.Severity == "ERROR": if "Postal Code Not Found" in notification.Message: raise FedexPostalCodeNotFound(notification.Code, notification.Message) elif "Invalid Postal Code Format" in self.response.Notifications: raise FedexInvalidPostalCodeFormat(notification.Code, notification.Message) else: raise FedexError(notification.Code, notification.Message)
Checks the response to see if there were any errors specific to this WSDL.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ client = self.client # We get an exception like this when specifying an IntegratorId: # suds.TypeNotFound: Type not found: 'IntegratorId' # Setting it to None does not seem to appease it. del self.ClientDetail.IntegratorId # Fire off the query. response = client.service.postalCodeInquiry(WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, PostalCode=self.PostalCode, CountryCode=self.CountryCode, CarrierCode=self.CarrierCode) return response
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def _prepare_wsdl_objects(self): """ Create the data structure and get it ready for the WSDL request. """ self.CarrierCode = 'FDXE' self.Origin = self.client.factory.create('Address') self.Destination = self.client.factory.create('Address') self.ShipDate = datetime.date.today().isoformat() self.Service = None self.Packaging = 'YOUR_PACKAGING'
Create the data structure and get it ready for the WSDL request.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ # We get an exception like this when specifying an IntegratorId: # suds.TypeNotFound: Type not found: 'IntegratorId' # Setting it to None does not seem to appease it. del self.ClientDetail.IntegratorId self.logger.debug(self.WebAuthenticationDetail) self.logger.debug(self.ClientDetail) self.logger.debug(self.TransactionDetail) self.logger.debug(self.VersionId) # Fire off the query. return self.client.service.serviceAvailability( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, Origin=self.Origin, Destination=self.Destination, ShipDate=self.ShipDate, CarrierCode=self.CarrierCode, Service=self.Service, Packaging=self.Packaging)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def basic_sobject_to_dict(obj): """Converts suds object to dict very quickly. Does not serialize date time or normalize key case. :param obj: suds object :return: dict object """ if not hasattr(obj, '__keylist__'): return obj data = {} fields = obj.__keylist__ for field in fields: val = getattr(obj, field) if isinstance(val, list): data[field] = [] for item in val: data[field].append(basic_sobject_to_dict(item)) else: data[field] = basic_sobject_to_dict(val) return data
Converts suds object to dict very quickly. Does not serialize date time or normalize key case. :param obj: suds object :return: dict object
entailment
def sobject_to_dict(obj, key_to_lower=False, json_serialize=False): """ Converts a suds object to a dict. Includes advanced features. :param json_serialize: If set, changes date and time types to iso string. :param key_to_lower: If set, changes index key name to lower case. :param obj: suds object :return: dict object """ import datetime if not hasattr(obj, '__keylist__'): if json_serialize and isinstance(obj, (datetime.datetime, datetime.time, datetime.date)): return obj.isoformat() else: return obj data = {} fields = obj.__keylist__ for field in fields: val = getattr(obj, field) if key_to_lower: field = field.lower() if isinstance(val, list): data[field] = [] for item in val: data[field].append(sobject_to_dict(item, json_serialize=json_serialize)) else: data[field] = sobject_to_dict(val, json_serialize=json_serialize) return data
Converts a suds object to a dict. Includes advanced features. :param json_serialize: If set, changes date and time types to iso string. :param key_to_lower: If set, changes index key name to lower case. :param obj: suds object :return: dict object
entailment
def sobject_to_json(obj, key_to_lower=False): """ Converts a suds object to a JSON string. :param obj: suds object :param key_to_lower: If set, changes index key name to lower case. :return: json object """ import json data = sobject_to_dict(obj, key_to_lower=key_to_lower, json_serialize=True) return json.dumps(data)
Converts a suds object to a JSON string. :param obj: suds object :param key_to_lower: If set, changes index key name to lower case. :return: json object
entailment
def _prepare_wsdl_objects(self): """ Create the data structure and get it ready for the WSDL request. """ self.CarrierCode = 'FDXE' self.RoutingCode = 'FDSD' self.Address = self.client.factory.create('Address') self.ShipDateTime = datetime.datetime.now().isoformat()
Create the data structure and get it ready for the WSDL request.
entailment
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ # We get an exception like this when specifying an IntegratorId: # suds.TypeNotFound: Type not found: 'IntegratorId' # Setting it to None does not seem to appease it. del self.ClientDetail.IntegratorId self.logger.debug(self.WebAuthenticationDetail) self.logger.debug(self.ClientDetail) self.logger.debug(self.TransactionDetail) self.logger.debug(self.VersionId) # Fire off the query. return self.client.service.validatePostal( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, Address=self.Address, ShipDateTime=self.ShipDateTime, CarrierCode=self.CarrierCode, CheckForMismatch=self.CheckForMismatch, RoutingCode=self.RoutingCode)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
entailment
def guess_tags(filename): """ Function to get potential tags for files using the file names. :param filename: This field is the name of file. """ tags = [] stripped_filename = strip_zip_suffix(filename) if stripped_filename.endswith('.vcf'): tags.append('vcf') if stripped_filename.endswith('.json'): tags.append('json') if stripped_filename.endswith('.csv'): tags.append('csv') return tags
Function to get potential tags for files using the file names. :param filename: This field is the name of file.
entailment
def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT): """ Collate local file info as preperation for Open Humans upload. Note: Files with filesize > max_bytes are not included in returned info. :param filedir: This field is target directory to get files from. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ file_data = {} logging.info('Characterizing files in {}'.format(filedir)) for filename in os.listdir(filedir): filepath = os.path.join(filedir, filename) file_stats = os.stat(filepath) creation_date = arrow.get(file_stats.st_ctime).isoformat() file_size = file_stats.st_size if file_size <= max_bytes: file_md5 = hashlib.md5() with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): file_md5.update(chunk) md5 = file_md5.hexdigest() file_data[filename] = { 'tags': guess_tags(filename), 'description': '', 'md5': md5, 'creation_date': creation_date, } return file_data
Collate local file info as preperation for Open Humans upload. Note: Files with filesize > max_bytes are not included in returned info. :param filedir: This field is target directory to get files from. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m.
entailment
def validate_metadata(target_dir, metadata): """ Check that the files listed in metadata exactly match files in target dir. :param target_dir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched. """ if not os.path.isdir(target_dir): print("Error: " + target_dir + " is not a directory") return False file_list = os.listdir(target_dir) for filename in file_list: if filename not in metadata: print("Error: " + filename + " present at" + target_dir + " not found in metadata file") return False for filename in metadata: if filename not in file_list: print("Error: " + filename + " present in metadata file " + " not found on disk at: " + target_dir) return False return True
Check that the files listed in metadata exactly match files in target dir. :param target_dir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched.
entailment
def load_metadata_csv_single_user(csv_in, header, tags_idx): """ Return the metadata as requested for a single user. :param csv_in: This field is the csv file to return metadata from. :param header: This field contains the headers in the csv file :param tags_idx: This field contains the index of the tags in the csv file. """ metadata = {} n_headers = len(header) for index, row in enumerate(csv_in, 2): if row[0] == "": raise ValueError('Error: In row number ' + str(index) + ':' + ' "filename" must not be empty.') if row[0] == 'None' and [x == 'NA' for x in row[1:]]: break if len(row) != n_headers: raise ValueError('Error: In row number ' + str(index) + ':' + ' Number of columns (' + str(len(row)) + ') doesnt match Number of headings (' + str(n_headers) + ')') metadata[row[0]] = { header[i]: row[i] for i in range(1, len(header)) if i != tags_idx } metadata[row[0]]['tags'] = [t.strip() for t in row[tags_idx].split(',') if t.strip()] return metadata
Return the metadata as requested for a single user. :param csv_in: This field is the csv file to return metadata from. :param header: This field contains the headers in the csv file :param tags_idx: This field contains the index of the tags in the csv file.
entailment
def load_metadata_csv(input_filepath): """ Return dict of metadata. Format is either dict (filenames are keys) or dict-of-dicts (project member IDs as top level keys, then filenames as keys). :param input_filepath: This field is the filepath of the csv file. """ with open(input_filepath) as f: csv_in = csv.reader(f) header = next(csv_in) if 'tags' in header: tags_idx = header.index('tags') else: raise ValueError('"tags" is a compulsory column in metadata file.') if header[0] == 'project_member_id': if header[1] == 'filename': metadata = load_metadata_csv_multi_user(csv_in, header, tags_idx) else: raise ValueError('The second column must be "filename"') elif header[0] == 'filename': metadata = load_metadata_csv_single_user(csv_in, header, tags_idx) else: raise ValueError('Incorrect Formatting of metadata. The first' + ' column for single user upload should be' + ' "filename". For multiuser uploads the first ' + 'column should be "project member id" and the' + ' second column should be "filename"') return metadata
Return dict of metadata. Format is either dict (filenames are keys) or dict-of-dicts (project member IDs as top level keys, then filenames as keys). :param input_filepath: This field is the filepath of the csv file.
entailment
def validate_date(date, project_member_id, filename): """ Check if date is in ISO 8601 format. :param date: This field is the date to be checked. :param project_member_id: This field is the project_member_id corresponding to the date provided. :param filename: This field is the filename corresponding to the date provided. """ try: arrow.get(date) except Exception: return False return True
Check if date is in ISO 8601 format. :param date: This field is the date to be checked. :param project_member_id: This field is the project_member_id corresponding to the date provided. :param filename: This field is the filename corresponding to the date provided.
entailment
def is_single_file_metadata_valid(file_metadata, project_member_id, filename): """ Check if metadata fields like project member id, description, tags, md5 and creation date are valid for a single file. :param file_metadata: This field is metadata of file. :param project_member_id: This field is the project member id corresponding to the file metadata provided. :param filename: This field is the filename corresponding to the file metadata provided. """ if project_member_id is not None: if not project_member_id.isdigit() or len(project_member_id) != 8: raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' project member id must be of 8 digits from 0 to 9') if 'description' not in file_metadata: raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "description" is a required field of the metadata') if not isinstance(file_metadata['description'], str): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "description" must be a string') if 'tags' not in file_metadata: raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "tags" is a required field of the metadata') if not isinstance(file_metadata['tags'], list): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' "tags" must be an array of strings') if 'creation_date' in file_metadata: if not validate_date(file_metadata['creation_date'], project_member_id, filename): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' Dates must be in ISO 8601 format') if 'md5' in file_metadata: if not re.match(r'[a-f0-9]{32}$', file_metadata['md5'], flags=re.IGNORECASE): raise ValueError( 'Error: for project member id: ', project_member_id, ' and filename: ', filename, ' Invalid MD5 specified') return True
Check if metadata fields like project member id, description, tags, md5 and creation date are valid for a single file. :param file_metadata: This field is metadata of file. :param project_member_id: This field is the project member id corresponding to the file metadata provided. :param filename: This field is the filename corresponding to the file metadata provided.
entailment
def review_metadata_csv_single_user(filedir, metadata, csv_in, n_headers): """ Check validity of metadata for single user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv. """ try: if not validate_metadata(filedir, metadata): return False for filename, file_metadata in metadata.items(): is_single_file_metadata_valid(file_metadata, None, filename) except ValueError as e: print_error(e) return False return True
Check validity of metadata for single user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv.
entailment
def validate_subfolders(filedir, metadata): """ Check that all folders in the given directory have a corresponding entry in the metadata file, and vice versa. :param filedir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched. """ if not os.path.isdir(filedir): print("Error: " + filedir + " is not a directory") return False subfolders = os.listdir(filedir) for subfolder in subfolders: if subfolder not in metadata: print("Error: folder " + subfolder + " present on disk but not in metadata") return False for subfolder in metadata: if subfolder not in subfolders: print("Error: folder " + subfolder + " present in metadata but not on disk") return False return True
Check that all folders in the given directory have a corresponding entry in the metadata file, and vice versa. :param filedir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched.
entailment
def review_metadata_csv_multi_user(filedir, metadata, csv_in, n_headers): """ Check validity of metadata for multi user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv. """ try: if not validate_subfolders(filedir, metadata): return False for project_member_id, member_metadata in metadata.items(): if not validate_metadata(os.path.join (filedir, project_member_id), member_metadata): return False for filename, file_metadata in member_metadata.items(): is_single_file_metadata_valid(file_metadata, project_member_id, filename) except ValueError as e: print_error(e) return False return True
Check validity of metadata for multi user. :param filedir: This field is the filepath of the directory whose csv has to be made. :param metadata: This field is the metadata generated from the load_metadata_csv function. :param csv_in: This field returns a reader object which iterates over the csv. :param n_headers: This field is the number of headers in the csv.
entailment
def review_metadata_csv(filedir, input_filepath): """ Check validity of metadata fields. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ try: metadata = load_metadata_csv(input_filepath) except ValueError as e: print_error(e) return False with open(input_filepath) as f: csv_in = csv.reader(f) header = next(csv_in) n_headers = len(header) if header[0] == 'filename': res = review_metadata_csv_single_user(filedir, metadata, csv_in, n_headers) return res if header[0] == 'project_member_id': res = review_metadata_csv_multi_user(filedir, metadata, csv_in, n_headers) return res
Check validity of metadata fields. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m.
entailment
def write_metadata_to_filestream(filedir, filestream, max_bytes=MAX_FILE_DEFAULT): """ Make metadata file for all files in a directory(helper function) :param filedir: This field is the filepath of the directory whose csv has to be made. :param filestream: This field is a stream for writing to the csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ csv_out = csv.writer(filestream) subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if os.path.isdir(os.path.join(filedir, i))] if subdirs: logging.info('Making metadata for subdirs of {}'.format(filedir)) if not all([re.match('^[0-9]{8}$', os.path.basename(d)) for d in subdirs]): raise ValueError("Subdirs not all project member ID format!") csv_out.writerow(['project_member_id', 'filename', 'tags', 'description', 'md5', 'creation_date']) for subdir in subdirs: file_info = characterize_local_files( filedir=subdir, max_bytes=max_bytes) proj_member_id = os.path.basename(subdir) if not file_info: csv_out.writerow([proj_member_id, 'None', 'NA', 'NA', 'NA', 'NA']) continue for filename in file_info: csv_out.writerow([proj_member_id, filename, ', '.join(file_info[filename]['tags']), file_info[filename]['description'], file_info[filename]['md5'], file_info[filename]['creation_date'], ]) else: csv_out.writerow(['filename', 'tags', 'description', 'md5', 'creation_date']) file_info = characterize_local_files( filedir=filedir, max_bytes=max_bytes) for filename in file_info: csv_out.writerow([filename, ', '.join(file_info[filename]['tags']), file_info[filename]['description'], file_info[filename]['md5'], file_info[filename]['creation_date'], ])
Make metadata file for all files in a directory(helper function) :param filedir: This field is the filepath of the directory whose csv has to be made. :param filestream: This field is a stream for writing to the csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m.
entailment
def mk_metadata_csv(filedir, outputfilepath, max_bytes=MAX_FILE_DEFAULT): """ Make metadata file for all files in a directory. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m. """ with open(outputfilepath, 'w') as filestream: write_metadata_to_filestream(filedir, filestream, max_bytes)
Make metadata file for all files in a directory. :param filedir: This field is the filepath of the directory whose csv has to be made. :param outputfilepath: This field is the file path of the output csv. :param max_bytes: This field is the maximum file size to consider. Its default value is 128m.
entailment
def download_file(download_url, target_filepath, max_bytes=MAX_FILE_DEFAULT): """ Download a file. :param download_url: This field is the url from which data will be downloaded. :param target_filepath: This field is the path of the file where data will be downloaded. :param max_bytes: This field is the maximum file size to download. Its default value is 128m. """ response = requests.get(download_url, stream=True) size = int(response.headers['Content-Length']) if _exceeds_size(size, max_bytes, target_filepath) is True: return response logging.info('Downloading {} ({})'.format( target_filepath, format_size(size))) if os.path.exists(target_filepath): stat = os.stat(target_filepath) if stat.st_size == size: logging.info('Skipping, file exists and is the right ' 'size: {}'.format(target_filepath)) return response else: logging.info('Replacing, file exists and is the wrong ' 'size: {}'.format(target_filepath)) os.remove(target_filepath) with open(target_filepath, 'wb') as f: for chunk in response.iter_content(chunk_size=8192): if chunk: f.write(chunk) logging.info('Download complete: {}'.format(target_filepath)) return response
Download a file. :param download_url: This field is the url from which data will be downloaded. :param target_filepath: This field is the path of the file where data will be downloaded. :param max_bytes: This field is the maximum file size to download. Its default value is 128m.
entailment
def read_id_list(filepath): """ Get project member id from a file. :param filepath: This field is the path of file to read. """ if not filepath: return None id_list = [] with open(filepath) as f: for line in f: line = line.rstrip() if not re.match('^[0-9]{8}$', line): raise('Each line in whitelist or blacklist is expected ' 'to contain an eight digit ID, and nothing else.') else: id_list.append(line) return id_list
Get project member id from a file. :param filepath: This field is the path of file to read.
entailment
def set_log_level(debug, verbose): """ Function for setting the logging level. :param debug: This boolean field is the logging level. :param verbose: This boolean field is the logging level. """ if debug: logging.basicConfig(level=logging.DEBUG) elif verbose: logging.basicConfig(level=logging.INFO)
Function for setting the logging level. :param debug: This boolean field is the logging level. :param verbose: This boolean field is the logging level.
entailment
def download_cli(directory, master_token=None, member=None, access_token=None, source=None, project_data=False, max_size='128m', verbose=False, debug=False, memberlist=None, excludelist=None, id_filename=False): """ Command line function for downloading data from project members to the target directory. For more information visit :func:`download<ohapi.command_line.download>`. """ return download(directory, master_token, member, access_token, source, project_data, max_size, verbose, debug, memberlist, excludelist, id_filename)
Command line function for downloading data from project members to the target directory. For more information visit :func:`download<ohapi.command_line.download>`.
entailment
def download(directory, master_token=None, member=None, access_token=None, source=None, project_data=False, max_size='128m', verbose=False, debug=False, memberlist=None, excludelist=None, id_filename=False): """ Download data from project members to the target directory. Unless this is a member-specific download, directories will be created for each project member ID. Also, unless a source is specified, all shared sources are downloaded and data is sorted into subdirectories according to source. Projects can optionally return data to Open Humans member accounts. If project_data is True (or the "--project-data" flag is used), this data (the project's own data files, instead of data from other sources) will be downloaded for each member. :param directory: This field is the target directory to download data. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param max_size: This field is the maximum file size. It's default value is 128m. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None. """ set_log_level(debug, verbose) if (memberlist or excludelist) and (member or access_token): raise UsageError('Please do not provide a memberlist or excludelist ' 'when retrieving data for a single member.') memberlist = read_id_list(memberlist) excludelist = read_id_list(excludelist) if not (master_token or access_token) or (master_token and access_token): raise UsageError('Please specify either a master access token (-T), ' 'or an OAuth2 user access token (-t).') if (source and project_data): raise UsageError("It doesn't make sense to use both 'source' and" "'project-data' options!") if master_token: project = OHProject(master_access_token=master_token) if member: if project_data: project.download_member_project_data( member_data=project.project_data[member], target_member_dir=directory, max_size=max_size, id_filename=id_filename) else: project.download_member_shared( member_data=project.project_data[member], target_member_dir=directory, source=source, max_size=max_size, id_filename=id_filename) else: project.download_all(target_dir=directory, source=source, max_size=max_size, memberlist=memberlist, excludelist=excludelist, project_data=project_data, id_filename=id_filename) else: member_data = exchange_oauth2_member(access_token, all_files=True) if project_data: OHProject.download_member_project_data(member_data=member_data, target_member_dir=directory, max_size=max_size, id_filename=id_filename) else: OHProject.download_member_shared(member_data=member_data, target_member_dir=directory, source=source, max_size=max_size, id_filename=id_filename)
Download data from project members to the target directory. Unless this is a member-specific download, directories will be created for each project member ID. Also, unless a source is specified, all shared sources are downloaded and data is sorted into subdirectories according to source. Projects can optionally return data to Open Humans member accounts. If project_data is True (or the "--project-data" flag is used), this data (the project's own data files, instead of data from other sources) will be downloaded for each member. :param directory: This field is the target directory to download data. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param max_size: This field is the maximum file size. It's default value is 128m. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None.
entailment
def download_metadata_cli(master_token, output_csv, verbose=False, debug=False): """ Command line function for downloading metadata. For more information visit :func:`download_metadata<ohapi.command_line.download_metadata>`. """ return download_metadata(master_token, output_csv, verbose, debug)
Command line function for downloading metadata. For more information visit :func:`download_metadata<ohapi.command_line.download_metadata>`.
entailment
def download_metadata(master_token, output_csv, verbose=False, debug=False): """ Output CSV with metadata for a project's downloadable files in Open Humans. :param master_token: This field is the master access token for the project. :param output_csv: This field is the target csv file to which metadata is written. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. """ set_log_level(debug, verbose) project = OHProject(master_access_token=master_token) with open(output_csv, 'w') as f: csv_writer = csv.writer(f) header = ['project_member_id', 'data_source', 'file_basename', 'file_upload_date'] csv_writer.writerow(header) for member_id in project.project_data: if not project.project_data[member_id]['data']: csv_writer.writerow([member_id, 'NA', 'None', 'NA']) else: for data_item in project.project_data[member_id]['data']: logging.debug(data_item) csv_writer.writerow([ member_id, data_item['source'], data_item['basename'].encode('utf-8'), data_item['created']])
Output CSV with metadata for a project's downloadable files in Open Humans. :param master_token: This field is the master access token for the project. :param output_csv: This field is the target csv file to which metadata is written. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False.
entailment
def upload_metadata_cli(directory, create_csv='', review='', max_size='128m', verbose=False, debug=False): """ Command line function for drafting or reviewing metadata files. For more information visit :func:`upload_metadata<ohapi.command_line.upload_metadata>`. """ return upload_metadata(directory, create_csv, review, max_size, verbose, debug)
Command line function for drafting or reviewing metadata files. For more information visit :func:`upload_metadata<ohapi.command_line.upload_metadata>`.
entailment
def upload_metadata(directory, create_csv='', review='', max_size='128m', verbose=False, debug=False): """ Draft or review metadata files for uploading files to Open Humans. The target directory should either represent files for a single member (no subdirectories), or contain a subdirectory for each project member ID. :param directory: This field is the directory for which metadata has to be created. :param create_csv: This field is the output filepath to which csv file will be written. :param max_size: This field is the maximum file size. It's default value is None. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. """ set_log_level(debug, verbose) max_bytes = parse_size(max_size) if create_csv and review: raise ValueError("Either create_csv must be true or review must be " + "true but not both") if review: if review_metadata_csv(directory, review): print("The metadata file has been reviewed and is valid.") elif create_csv: mk_metadata_csv(directory, create_csv, max_bytes=max_bytes) else: raise ValueError("Either create_csv must be true or review must be " + "true but not both should be false")
Draft or review metadata files for uploading files to Open Humans. The target directory should either represent files for a single member (no subdirectories), or contain a subdirectory for each project member ID. :param directory: This field is the directory for which metadata has to be created. :param create_csv: This field is the output filepath to which csv file will be written. :param max_size: This field is the maximum file size. It's default value is None. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False.
entailment
def upload_cli(directory, metadata_csv, master_token=None, member=None, access_token=None, safe=False, sync=False, max_size='128m', mode='default', verbose=False, debug=False): """ Command line function for uploading files to OH. For more information visit :func:`upload<ohapi.command_line.upload>`. """ return upload(directory, metadata_csv, master_token, member, access_token, safe, sync, max_size, mode, verbose, debug)
Command line function for uploading files to OH. For more information visit :func:`upload<ohapi.command_line.upload>`.
entailment
def upload(directory, metadata_csv, master_token=None, member=None, access_token=None, safe=False, sync=False, max_size='128m', mode='default', verbose=False, debug=False): """ Upload files for the project to Open Humans member accounts. If using a master access token and not specifying member ID: (1) Files should be organized in subdirectories according to project member ID, e.g.: main_directory/01234567/data.json main_directory/12345678/data.json main_directory/23456789/data.json (2) The metadata CSV should have the following format: 1st column: Project member ID 2nd column: filenames 3rd & additional columns: Metadata fields (see below) If uploading for a specific member: (1) The local directory should not contain subdirectories. (2) The metadata CSV should have the following format: 1st column: filenames 2nd & additional columns: Metadata fields (see below) The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. (Use --safe or --sync to change this behavior.) If included, the following metadata columns should be correctly formatted: 'tags': should be comma-separated strings 'md5': should match the file's md5 hexdigest 'creation_date', 'start_date', 'end_date': ISO 8601 dates or datetimes Other metedata fields (e.g. 'description') can be arbitrary strings. Either specify sync as True or safe as True but not both. :param directory: This field is the target directory from which data will be uploaded. :param metadata_csv: This field is the filepath of the metadata csv file. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param safe: This boolean field will overwrite matching filename. It's default value is False. :param sync: This boolean field will delete files on Open Humans that are not in the local directory. It's default value is False. :param max_size: This field is the maximum file size. It's default value is None. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False. """ if safe and sync: raise UsageError('Safe (--safe) and sync (--sync) modes are mutually ' 'incompatible!') if not (master_token or access_token) or (master_token and access_token): raise UsageError('Please specify either a master access token (-T), ' 'or an OAuth2 user access token (-t).') set_log_level(debug, verbose) if sync: mode = 'sync' elif safe: mode = 'safe' metadata = load_metadata_csv(metadata_csv) subdirs = [i for i in os.listdir(directory) if os.path.isdir(os.path.join(directory, i))] if subdirs: if not all([re.match(r'^[0-9]{8}$', d) for d in subdirs]): raise UsageError( "Subdirs expected to match project member ID format!") if (master_token and member) or not master_token: raise UsageError( "Subdirs shouldn't exist if uploading for specific member!") project = OHProject(master_access_token=master_token) for member_id in subdirs: subdir_path = os.path.join(directory, member_id) project.upload_member_from_dir( member_data=project.project_data[member_id], target_member_dir=subdir_path, metadata=metadata[member_id], mode=mode, access_token=project.master_access_token, ) else: if master_token and not (master_token and member): raise UsageError('No member specified!') if master_token: project = OHProject(master_access_token=master_token) project.upload_member_from_dir( member_data=project.project_data[member], target_member_dir=directory, metadata=metadata, mode=mode, access_token=project.master_access_token, ) else: member_data = exchange_oauth2_member(access_token) OHProject.upload_member_from_dir( member_data=member_data, target_member_dir=directory, metadata=metadata, mode=mode, access_token=access_token, )
Upload files for the project to Open Humans member accounts. If using a master access token and not specifying member ID: (1) Files should be organized in subdirectories according to project member ID, e.g.: main_directory/01234567/data.json main_directory/12345678/data.json main_directory/23456789/data.json (2) The metadata CSV should have the following format: 1st column: Project member ID 2nd column: filenames 3rd & additional columns: Metadata fields (see below) If uploading for a specific member: (1) The local directory should not contain subdirectories. (2) The metadata CSV should have the following format: 1st column: filenames 2nd & additional columns: Metadata fields (see below) The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. (Use --safe or --sync to change this behavior.) If included, the following metadata columns should be correctly formatted: 'tags': should be comma-separated strings 'md5': should match the file's md5 hexdigest 'creation_date', 'start_date', 'end_date': ISO 8601 dates or datetimes Other metedata fields (e.g. 'description') can be arbitrary strings. Either specify sync as True or safe as True but not both. :param directory: This field is the target directory from which data will be uploaded. :param metadata_csv: This field is the filepath of the metadata csv file. :param master_token: This field is the master access token for the project. It's default value is None. :param member: This field is specific member whose project data is downloaded. It's default value is None. :param access_token: This field is the user specific access token. It's default value is None. :param safe: This boolean field will overwrite matching filename. It's default value is False. :param sync: This boolean field will delete files on Open Humans that are not in the local directory. It's default value is False. :param max_size: This field is the maximum file size. It's default value is None. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param verbose: This boolean field is the logging level. It's default value is False. :param debug: This boolean field is the logging level. It's default value is False.
entailment
def oauth_token_exchange_cli(client_id, client_secret, redirect_uri, base_url=OH_BASE_URL, code=None, refresh_token=None): """ Command line function for obtaining the refresh token/code. For more information visit :func:`oauth2_token_exchange<ohapi.api.oauth2_token_exchange>`. """ print(oauth2_token_exchange(client_id, client_secret, redirect_uri, base_url, code, refresh_token))
Command line function for obtaining the refresh token/code. For more information visit :func:`oauth2_token_exchange<ohapi.api.oauth2_token_exchange>`.
entailment
def oauth2_auth_url_cli(redirect_uri=None, client_id=None, base_url=OH_BASE_URL): """ Command line function for obtaining the Oauth2 url. For more information visit :func:`oauth2_auth_url<ohapi.api.oauth2_auth_url>`. """ result = oauth2_auth_url(redirect_uri, client_id, base_url) print('The requested URL is : \r') print(result)
Command line function for obtaining the Oauth2 url. For more information visit :func:`oauth2_auth_url<ohapi.api.oauth2_auth_url>`.
entailment
def message_cli(subject, message_body, access_token, all_members=False, project_member_ids=None, base_url=OH_BASE_URL, verbose=False, debug=False): """ Command line function for sending email to a single user or in bulk. For more information visit :func:`message<ohapi.api.message>`. """ if project_member_ids: project_member_ids = re.split(r'[ ,\r\n]+', project_member_ids) return message(subject, message_body, access_token, all_members, project_member_ids, base_url)
Command line function for sending email to a single user or in bulk. For more information visit :func:`message<ohapi.api.message>`.
entailment
def delete_cli(access_token, project_member_id, base_url=OH_BASE_URL, file_basename=None, file_id=None, all_files=False): """ Command line function for deleting files. For more information visit :func:`delete_file<ohapi.api.delete_file>`. """ response = delete_file(access_token, project_member_id, base_url, file_basename, file_id, all_files) if (response.status_code == 200): print("File deleted successfully") else: print("Bad response while deleting file.")
Command line function for deleting files. For more information visit :func:`delete_file<ohapi.api.delete_file>`.
entailment
def public_data_download_cli(source, username, directory, max_size, quiet, debug): """ Command line tools for downloading public data. """ return public_download(source, username, directory, max_size, quiet, debug)
Command line tools for downloading public data.
entailment
def download_url(result, directory, max_bytes): """ Download a file. :param result: This field contains a url from which data will be downloaded. :param directory: This field is the target directory to which data will be downloaded. :param max_bytes: This field is the maximum file size in bytes. """ response = requests.get(result['download_url'], stream=True) # TODO: make this more robust by parsing the URL filename = response.url.split('/')[-1] filename = re.sub(r'\?.*$', '', filename) filename = '{}-{}'.format(result['user']['id'], filename) size = int(response.headers['Content-Length']) if size > max_bytes: logging.info('Skipping {}, {} > {}'.format(filename, format_size(size), format_size(max_bytes))) return logging.info('Downloading {} ({})'.format(filename, format_size(size))) output_path = os.path.join(directory, filename) try: stat = os.stat(output_path) if stat.st_size == size: logging.info('Skipping "{}"; exists and is the right size'.format( filename)) return else: logging.info('Removing "{}"; exists and is the wrong size'.format( filename)) os.remove(output_path) except OSError: # TODO: check errno here? pass with open(output_path, 'wb') as f: total_length = response.headers.get('content-length') total_length = int(total_length) dl = 0 for chunk in response.iter_content(chunk_size=8192): if chunk: dl += len(chunk) f.write(chunk) d = int(50 * dl / total_length) sys.stdout.write("\r[%s%s]%d%s" % ('.' * d, '' * (50 - d), d * 2, '%')) sys.stdout.flush print("\n") logging.info('Downloaded {}'.format(filename))
Download a file. :param result: This field contains a url from which data will be downloaded. :param directory: This field is the target directory to which data will be downloaded. :param max_bytes: This field is the maximum file size in bytes.
entailment
def download(source=None, username=None, directory='.', max_size='128m', quiet=None, debug=None): """ Download public data from Open Humans. :param source: This field is the data source from which to download. It's default value is None. :param username: This fiels is username of user. It's default value is None. :param directory: This field is the target directory to which data is downloaded. :param max_size: This field is the maximum file size. It's default value is 128m. :param quiet: This field is the logging level. It's default value is None. :param debug: This field is the logging level. It's default value is None. """ if debug: logging.basicConfig(level=logging.DEBUG) elif quiet: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) logging.debug("Running with source: '{}'".format(source) + " and username: '{}'".format(username) + " and directory: '{}'".format(directory) + " and max-size: '{}'".format(max_size)) signal.signal(signal.SIGINT, signal_handler_cb) max_bytes = parse_size(max_size) options = {} if source: options['source'] = source if username: options['username'] = username page = '{}?{}'.format(BASE_URL_API, urlencode(options)) results = [] counter = 1 logging.info('Retrieving metadata') while True: logging.info('Retrieving page {}'.format(counter)) response = get_page(page) results = results + response['results'] if response['next']: page = response['next'] else: break counter += 1 logging.info('Downloading {} files'.format(len(results))) download_url_partial = partial(download_url, directory=directory, max_bytes=max_bytes) with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor: for value in executor.map(download_url_partial, results): if value: logging.info(value)
Download public data from Open Humans. :param source: This field is the data source from which to download. It's default value is None. :param username: This fiels is username of user. It's default value is None. :param directory: This field is the target directory to which data is downloaded. :param max_size: This field is the maximum file size. It's default value is 128m. :param quiet: This field is the logging level. It's default value is None. :param debug: This field is the logging level. It's default value is None.
entailment
def get_members_by_source(base_url=BASE_URL_API): """ Function returns which members have joined each activity. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`. """ url = '{}members-by-source/'.format(base_url) response = get_page(url) return response
Function returns which members have joined each activity. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`.
entailment
def get_sources_by_member(base_url=BASE_URL_API, limit=LIMIT_DEFAULT): """ Function returns which activities each member has joined. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`. :param limit: It is the limit of data send by one request. """ url = '{}sources-by-member/'.format(base_url) page = '{}?{}'.format(url, urlencode({'limit': limit})) results = [] while True: data = get_page(page) results = results + data['results'] if data['next']: page = data['next'] else: break return results
Function returns which activities each member has joined. :param base_url: It is URL: `https://www.openhumans.org/api/public-data`. :param limit: It is the limit of data send by one request.
entailment
def _get_member_file_data(member_data, id_filename=False): """ Helper function to get file data of member of a project. :param member_data: This field is data related to member in a project. """ file_data = {} for datafile in member_data['data']: if id_filename: basename = '{}.{}'.format(datafile['id'], datafile['basename']) else: basename = datafile['basename'] if (basename not in file_data or arrow.get(datafile['created']) > arrow.get(file_data[basename]['created'])): file_data[basename] = datafile return file_data
Helper function to get file data of member of a project. :param member_data: This field is data related to member in a project.
entailment
def update_data(self): """ Returns data for all users including shared data files. """ url = ('https://www.openhumans.org/api/direct-sharing/project/' 'members/?access_token={}'.format(self.master_access_token)) results = get_all_results(url) self.project_data = dict() for result in results: self.project_data[result['project_member_id']] = result if len(result['data']) < result['file_count']: member_data = get_page(result['exchange_member']) final_data = member_data['data'] while member_data['next']: member_data = get_page(member_data['next']) final_data = final_data + member_data['data'] self.project_data[ result['project_member_id']]['data'] = final_data return self.project_data
Returns data for all users including shared data files.
entailment
def download_member_project_data(cls, member_data, target_member_dir, max_size=MAX_SIZE_DEFAULT, id_filename=False): """ Download files to sync a local dir to match OH member project data. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory where data will be downloaded. :param max_size: This field is the maximum file size. It's default value is 128m. """ logging.debug('Download member project data...') sources_shared = member_data['sources_shared'] file_data = cls._get_member_file_data(member_data, id_filename=id_filename) for basename in file_data: # This is using a trick to identify a project's own data in an API # response, without knowing the project's identifier: if the data # isn't a shared data source, it must be the project's own data. if file_data[basename]['source'] in sources_shared: continue target_filepath = os.path.join(target_member_dir, basename) download_file(download_url=file_data[basename]['download_url'], target_filepath=target_filepath, max_bytes=parse_size(max_size))
Download files to sync a local dir to match OH member project data. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory where data will be downloaded. :param max_size: This field is the maximum file size. It's default value is 128m.
entailment
def download_member_shared(cls, member_data, target_member_dir, source=None, max_size=MAX_SIZE_DEFAULT, id_filename=False): """ Download files to sync a local dir to match OH member shared data. Files are downloaded to match their "basename" on Open Humans. If there are multiple files with the same name, the most recent is downloaded. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory where data will be downloaded. :param source: This field is the source from which to download data. :param max_size: This field is the maximum file size. It's default value is 128m. """ logging.debug('Download member shared data...') sources_shared = member_data['sources_shared'] file_data = cls._get_member_file_data(member_data, id_filename=id_filename) logging.info('Downloading member data to {}'.format(target_member_dir)) for basename in file_data: # If not in sources shared, it's the project's own data. Skip. if file_data[basename]['source'] not in sources_shared: continue # Filter source if specified. Determine target directory for file. if source: if source == file_data[basename]['source']: target_filepath = os.path.join(target_member_dir, basename) else: continue else: source_data_dir = os.path.join(target_member_dir, file_data[basename]['source']) if not os.path.exists(source_data_dir): os.mkdir(source_data_dir) target_filepath = os.path.join(source_data_dir, basename) download_file(download_url=file_data[basename]['download_url'], target_filepath=target_filepath, max_bytes=parse_size(max_size))
Download files to sync a local dir to match OH member shared data. Files are downloaded to match their "basename" on Open Humans. If there are multiple files with the same name, the most recent is downloaded. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory where data will be downloaded. :param source: This field is the source from which to download data. :param max_size: This field is the maximum file size. It's default value is 128m.
entailment
def download_all(self, target_dir, source=None, project_data=False, memberlist=None, excludelist=None, max_size=MAX_SIZE_DEFAULT, id_filename=False): """ Download data for all users including shared data files. :param target_dir: This field is the target directory to download data. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None. :param max_size: This field is the maximum file size. It's default value is 128m. """ members = self.project_data.keys() for member in members: if not (memberlist is None) and member not in memberlist: logging.debug('Skipping {}, not in memberlist'.format(member)) continue if excludelist and member in excludelist: logging.debug('Skipping {}, in excludelist'.format(member)) continue member_dir = os.path.join(target_dir, member) if not os.path.exists(member_dir): os.mkdir(member_dir) if project_data: self.download_member_project_data( member_data=self.project_data[member], target_member_dir=member_dir, max_size=max_size, id_filename=id_filename) else: self.download_member_shared( member_data=self.project_data[member], target_member_dir=member_dir, source=source, max_size=max_size, id_filename=id_filename)
Download data for all users including shared data files. :param target_dir: This field is the target directory to download data. :param source: This field is the data source. It's default value is None. :param project_data: This field is data related to particular project. It's default value is False. :param memberlist: This field is list of members whose data will be downloaded. It's default value is None. :param excludelist: This field is list of members whose data will be skipped. It's default value is None. :param max_size: This field is the maximum file size. It's default value is 128m.
entailment
def upload_member_from_dir(member_data, target_member_dir, metadata, access_token, mode='default', max_size=MAX_SIZE_DEFAULT): """ Upload files in target directory to an Open Humans member's account. The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. If the 'mode' parameter is 'safe': matching filenames will not be overwritten. If the 'mode' parameter is 'sync': files on Open Humans that are not in the local directory will be deleted. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory from where data will be uploaded. :param metadata: This field is metadata for files to be uploaded. :param access_token: This field is user specific access token. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param max_size: This field is the maximum file size. It's default value is 128m. """ if not validate_metadata(target_member_dir, metadata): raise ValueError('Metadata should match directory contents!') project_data = {f['basename']: f for f in member_data['data'] if f['source'] not in member_data['sources_shared']} for filename in metadata: if filename in project_data and mode == 'safe': logging.info('Skipping {}, remote exists with matching' ' name'.format(filename)) continue filepath = os.path.join(target_member_dir, filename) remote_file_info = (project_data[filename] if filename in project_data else None) upload_aws(target_filepath=filepath, metadata=metadata[filename], access_token=access_token, project_member_id=member_data['project_member_id'], remote_file_info=remote_file_info) if mode == 'sync': for filename in project_data: if filename not in metadata: logging.debug("Deleting {}".format(filename)) delete_file( file_basename=filename, access_token=access_token, project_member_id=member_data['project_member_id'])
Upload files in target directory to an Open Humans member's account. The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. If the 'mode' parameter is 'safe': matching filenames will not be overwritten. If the 'mode' parameter is 'sync': files on Open Humans that are not in the local directory will be deleted. :param member_data: This field is data related to member in a project. :param target_member_dir: This field is the target directory from where data will be uploaded. :param metadata: This field is metadata for files to be uploaded. :param access_token: This field is user specific access token. :param mode: This field takes three value default, sync, safe. It's default value is 'default'. :param max_size: This field is the maximum file size. It's default value is 128m.
entailment
def oauth2_auth_url(redirect_uri=None, client_id=None, base_url=OH_BASE_URL): """ Returns an OAuth2 authorization URL for a project, given Client ID. This function constructs an authorization URL for a user to follow. The user will be redirected to Authorize Open Humans data for our external application. An OAuth2 project on Open Humans is required for this to properly work. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param redirect_uri: This field is set to `None` by default. However, if provided, it appends it in the URL returned. :param client_id: This field is also set to `None` by default however, is a mandatory field for the final URL to work. It uniquely identifies a given OAuth2 project. :param base_url: It is this URL `https://www.openhumans.org`. """ if not client_id: client_id = os.getenv('OHAPI_CLIENT_ID') if not client_id: raise SettingsError( "Client ID not provided! Provide client_id as a parameter, " "or set OHAPI_CLIENT_ID in your environment.") params = OrderedDict([ ('client_id', client_id), ('response_type', 'code'), ]) if redirect_uri: params['redirect_uri'] = redirect_uri auth_url = urlparse.urljoin( base_url, '/direct-sharing/projects/oauth2/authorize/?{}'.format( urlparse.urlencode(params))) return auth_url
Returns an OAuth2 authorization URL for a project, given Client ID. This function constructs an authorization URL for a user to follow. The user will be redirected to Authorize Open Humans data for our external application. An OAuth2 project on Open Humans is required for this to properly work. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param redirect_uri: This field is set to `None` by default. However, if provided, it appends it in the URL returned. :param client_id: This field is also set to `None` by default however, is a mandatory field for the final URL to work. It uniquely identifies a given OAuth2 project. :param base_url: It is this URL `https://www.openhumans.org`.
entailment
def oauth2_token_exchange(client_id, client_secret, redirect_uri, base_url=OH_BASE_URL, code=None, refresh_token=None): """ Exchange code or refresh token for a new token and refresh token. For the first time when a project is created, code is required to generate refresh token. Once the refresh token is obtained, it can be used later on for obtaining new access token and refresh token. The user must store the refresh token to obtain the new access token. For more details visit: https://www.openhumans.org/direct-sharing/oauth2-setup/#setup-oauth2-authorization :param client_id: This field is the client id of user. :param client_secret: This field is the client secret of user. :param redirect_uri: This is the user redirect uri. :param base_url: It is this URL `https://www.openhumans.org` :param code: This field is used to obtain access_token for the first time. It's default value is none. :param refresh_token: This field is used to obtain a new access_token when the token expires. """ if not (code or refresh_token) or (code and refresh_token): raise ValueError("Either code or refresh_token must be specified.") if code: data = { 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri, 'code': code, } elif refresh_token: data = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, } token_url = urlparse.urljoin(base_url, '/oauth2/token/') req = requests.post( token_url, data=data, auth=requests.auth.HTTPBasicAuth(client_id, client_secret)) handle_error(req, 200) data = req.json() return data
Exchange code or refresh token for a new token and refresh token. For the first time when a project is created, code is required to generate refresh token. Once the refresh token is obtained, it can be used later on for obtaining new access token and refresh token. The user must store the refresh token to obtain the new access token. For more details visit: https://www.openhumans.org/direct-sharing/oauth2-setup/#setup-oauth2-authorization :param client_id: This field is the client id of user. :param client_secret: This field is the client secret of user. :param redirect_uri: This is the user redirect uri. :param base_url: It is this URL `https://www.openhumans.org` :param code: This field is used to obtain access_token for the first time. It's default value is none. :param refresh_token: This field is used to obtain a new access_token when the token expires.
entailment
def get_page(url): """ Get a single page of results. :param url: This field is the url from which data will be requested. """ response = requests.get(url) handle_error(response, 200) data = response.json() return data
Get a single page of results. :param url: This field is the url from which data will be requested.
entailment
def get_all_results(starting_page): """ Given starting API query for Open Humans, iterate to get all results. :param starting page: This field is the first page, starting from which results will be obtained. """ logging.info('Retrieving all results for {}'.format(starting_page)) page = starting_page results = [] while True: logging.debug('Getting data from: {}'.format(page)) data = get_page(page) logging.debug('JSON data: {}'.format(data)) results = results + data['results'] if data['next']: page = data['next'] else: break return results
Given starting API query for Open Humans, iterate to get all results. :param starting page: This field is the first page, starting from which results will be obtained.
entailment
def exchange_oauth2_member(access_token, base_url=OH_BASE_URL, all_files=False): """ Returns data for a specific user, including shared data files. :param access_token: This field is the user specific access_token. :param base_url: It is this URL `https://www.openhumans.org`. """ url = urlparse.urljoin( base_url, '/api/direct-sharing/project/exchange-member/?{}'.format( urlparse.urlencode({'access_token': access_token}))) member_data = get_page(url) returned = member_data.copy() # Get all file data if all_files is True. if all_files: while member_data['next']: member_data = get_page(member_data['next']) returned['data'] = returned['data'] + member_data['data'] logging.debug('JSON data: {}'.format(returned)) return returned
Returns data for a specific user, including shared data files. :param access_token: This field is the user specific access_token. :param base_url: It is this URL `https://www.openhumans.org`.
entailment
def delete_file(access_token, project_member_id=None, base_url=OH_BASE_URL, file_basename=None, file_id=None, all_files=False): """ Delete project member files by file_basename, file_id, or all_files. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/. :param access_token: This field is user specific access_token. :param project_member_id: This field is the project member id of user. It's default value is None. :param base_url: It is this URL `https://www.openhumans.org`. :param file_basename: This field is the name of the file to delete for the particular user for the particular project. :param file_id: This field is the id of the file to delete for the particular user for the particular project. :param all_files: This is a boolean field to delete all files for the particular user for the particular project. """ url = urlparse.urljoin( base_url, '/api/direct-sharing/project/files/delete/?{}'.format( urlparse.urlencode({'access_token': access_token}))) if not(project_member_id): response = exchange_oauth2_member(access_token, base_url=base_url) project_member_id = response['project_member_id'] data = {'project_member_id': project_member_id} if file_basename and not (file_id or all_files): data['file_basename'] = file_basename elif file_id and not (file_basename or all_files): data['file_id'] = file_id elif all_files and not (file_id or file_basename): data['all_files'] = True else: raise ValueError( "One (and only one) of the following must be specified: " "file_basename, file_id, or all_files is set to True.") response = requests.post(url, data=data) handle_error(response, 200) return response
Delete project member files by file_basename, file_id, or all_files. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/. :param access_token: This field is user specific access_token. :param project_member_id: This field is the project member id of user. It's default value is None. :param base_url: It is this URL `https://www.openhumans.org`. :param file_basename: This field is the name of the file to delete for the particular user for the particular project. :param file_id: This field is the id of the file to delete for the particular user for the particular project. :param all_files: This is a boolean field to delete all files for the particular user for the particular project.
entailment
def message(subject, message, access_token, all_members=False, project_member_ids=None, base_url=OH_BASE_URL): """ Send an email to individual users or in bulk. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param subject: This field is the subject of the email. :param message: This field is the body of the email. :param access_token: This is user specific access token/master token. :param all_members: This is a boolean field to send email to all members of the project. :param project_member_ids: This field is the list of project_member_id. :param base_url: It is this URL `https://www.openhumans.org`. """ url = urlparse.urljoin( base_url, '/api/direct-sharing/project/message/?{}'.format( urlparse.urlencode({'access_token': access_token}))) if not(all_members) and not(project_member_ids): response = requests.post(url, data={'subject': subject, 'message': message}) handle_error(response, 200) return response elif all_members and project_member_ids: raise ValueError( "One (and only one) of the following must be specified: " "project_members_id or all_members is set to True.") else: r = requests.post(url, data={'all_members': all_members, 'project_member_ids': project_member_ids, 'subject': subject, 'message': message}) handle_error(r, 200) return r
Send an email to individual users or in bulk. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/ :param subject: This field is the subject of the email. :param message: This field is the body of the email. :param access_token: This is user specific access token/master token. :param all_members: This is a boolean field to send email to all members of the project. :param project_member_ids: This field is the list of project_member_id. :param base_url: It is this URL `https://www.openhumans.org`.
entailment
def handle_error(r, expected_code): """ Helper function to match reponse of a request to the expected status code :param r: This field is the response of request. :param expected_code: This field is the expected status code for the function. """ code = r.status_code if code != expected_code: info = 'API response status code {}'.format(code) try: if 'detail' in r.json(): info = info + ": {}".format(r.json()['detail']) elif 'metadata' in r.json(): info = info + ": {}".format(r.json()['metadata']) except json.decoder.JSONDecodeError: info = info + ":\n{}".format(r.content) raise Exception(info)
Helper function to match reponse of a request to the expected status code :param r: This field is the response of request. :param expected_code: This field is the expected status code for the function.
entailment
def upload_stream(stream, filename, metadata, access_token, base_url=OH_BASE_URL, remote_file_info=None, project_member_id=None, max_bytes=MAX_FILE_DEFAULT, file_identifier=None): """ Upload a file object using the "direct upload" feature, which uploads to an S3 bucket URL provided by the Open Humans API. To learn more about this API endpoint see: * https://www.openhumans.org/direct-sharing/on-site-data-upload/ * https://www.openhumans.org/direct-sharing/oauth2-data-upload/ :param stream: This field is the stream (or file object) to be uploaded. :param metadata: This field is the metadata associated with the file. Description and tags are compulsory fields of metadata. :param access_token: This is user specific access token/master token. :param base_url: It is this URL `https://www.openhumans.org`. :param remote_file_info: This field is for for checking if a file with matching name and file size already exists. Its default value is none. :param project_member_id: This field is the list of project member id of all members of a project. Its default value is None. :param max_bytes: This field is the maximum file size a user can upload. Its default value is 128m. :param max_bytes: If provided, this is used in logging output. Its default value is None (in which case, filename is used). """ if not file_identifier: file_identifier = filename # Determine a stream's size using seek. # f is a file-like object. old_position = stream.tell() stream.seek(0, os.SEEK_END) filesize = stream.tell() stream.seek(old_position, os.SEEK_SET) if filesize == 0: raise Exception('The submitted file is empty.') # Check size, and possibly remote file match. if _exceeds_size(filesize, max_bytes, file_identifier): raise ValueError("Maximum file size exceeded") if remote_file_info: response = requests.get(remote_file_info['download_url'], stream=True) remote_size = int(response.headers['Content-Length']) if remote_size == filesize: info_msg = ('Skipping {}, remote exists with matching ' 'file size'.format(file_identifier)) logging.info(info_msg) return(info_msg) url = urlparse.urljoin( base_url, '/api/direct-sharing/project/files/upload/direct/?{}'.format( urlparse.urlencode({'access_token': access_token}))) if not(project_member_id): response = exchange_oauth2_member(access_token, base_url=base_url) project_member_id = response['project_member_id'] data = {'project_member_id': project_member_id, 'metadata': json.dumps(metadata), 'filename': filename} r1 = requests.post(url, data=data) handle_error(r1, 201) r2 = requests.put(url=r1.json()['url'], data=stream) handle_error(r2, 200) done = urlparse.urljoin( base_url, '/api/direct-sharing/project/files/upload/complete/?{}'.format( urlparse.urlencode({'access_token': access_token}))) r3 = requests.post(done, data={'project_member_id': project_member_id, 'file_id': r1.json()['id']}) handle_error(r3, 200) logging.info('Upload complete: {}'.format(file_identifier)) return r3
Upload a file object using the "direct upload" feature, which uploads to an S3 bucket URL provided by the Open Humans API. To learn more about this API endpoint see: * https://www.openhumans.org/direct-sharing/on-site-data-upload/ * https://www.openhumans.org/direct-sharing/oauth2-data-upload/ :param stream: This field is the stream (or file object) to be uploaded. :param metadata: This field is the metadata associated with the file. Description and tags are compulsory fields of metadata. :param access_token: This is user specific access token/master token. :param base_url: It is this URL `https://www.openhumans.org`. :param remote_file_info: This field is for for checking if a file with matching name and file size already exists. Its default value is none. :param project_member_id: This field is the list of project member id of all members of a project. Its default value is None. :param max_bytes: This field is the maximum file size a user can upload. Its default value is 128m. :param max_bytes: If provided, this is used in logging output. Its default value is None (in which case, filename is used).
entailment
def upload_file(target_filepath, metadata, access_token, base_url=OH_BASE_URL, remote_file_info=None, project_member_id=None, max_bytes=MAX_FILE_DEFAULT): """ Upload a file from a local filepath using the "direct upload" API. To learn more about this API endpoint see: * https://www.openhumans.org/direct-sharing/on-site-data-upload/ * https://www.openhumans.org/direct-sharing/oauth2-data-upload/ :param target_filepath: This field is the filepath of the file to be uploaded :param metadata: This field is a python dictionary with keys filename, description and tags for single user upload and filename, project member id, description and tags for multiple user upload. :param access_token: This is user specific access token/master token. :param base_url: It is this URL `https://www.openhumans.org`. :param remote_file_info: This field is for for checking if a file with matching name and file size already exists. Its default value is none. :param project_member_id: This field is the list of project member id of all members of a project. Its default value is None. :param max_bytes: This field is the maximum file size a user can upload. It's default value is 128m. """ with open(target_filepath, 'rb') as stream: filename = os.path.basename(target_filepath) return upload_stream(stream, filename, metadata, access_token, base_url, remote_file_info, project_member_id, max_bytes, file_identifier=target_filepath)
Upload a file from a local filepath using the "direct upload" API. To learn more about this API endpoint see: * https://www.openhumans.org/direct-sharing/on-site-data-upload/ * https://www.openhumans.org/direct-sharing/oauth2-data-upload/ :param target_filepath: This field is the filepath of the file to be uploaded :param metadata: This field is a python dictionary with keys filename, description and tags for single user upload and filename, project member id, description and tags for multiple user upload. :param access_token: This is user specific access token/master token. :param base_url: It is this URL `https://www.openhumans.org`. :param remote_file_info: This field is for for checking if a file with matching name and file size already exists. Its default value is none. :param project_member_id: This field is the list of project member id of all members of a project. Its default value is None. :param max_bytes: This field is the maximum file size a user can upload. It's default value is 128m.
entailment
def upload_aws(target_filepath, metadata, access_token, base_url=OH_BASE_URL, remote_file_info=None, project_member_id=None, max_bytes=MAX_FILE_DEFAULT): """ Upload a file from a local filepath using the "direct upload" API. Equivalent to upload_file. To learn more about this API endpoint see: * https://www.openhumans.org/direct-sharing/on-site-data-upload/ * https://www.openhumans.org/direct-sharing/oauth2-data-upload/ :param target_filepath: This field is the filepath of the file to be uploaded :param metadata: This field is the metadata associated with the file. Description and tags are compulsory fields of metadata. :param access_token: This is user specific access token/master token. :param base_url: It is this URL `https://www.openhumans.org`. :param remote_file_info: This field is for for checking if a file with matching name and file size already exists. Its default value is none. :param project_member_id: This field is the list of project member id of all members of a project. Its default value is None. :param max_bytes: This field is the maximum file size a user can upload. It's default value is 128m. """ return upload_file(target_filepath, metadata, access_token, base_url, remote_file_info, project_member_id, max_bytes)
Upload a file from a local filepath using the "direct upload" API. Equivalent to upload_file. To learn more about this API endpoint see: * https://www.openhumans.org/direct-sharing/on-site-data-upload/ * https://www.openhumans.org/direct-sharing/oauth2-data-upload/ :param target_filepath: This field is the filepath of the file to be uploaded :param metadata: This field is the metadata associated with the file. Description and tags are compulsory fields of metadata. :param access_token: This is user specific access token/master token. :param base_url: It is this URL `https://www.openhumans.org`. :param remote_file_info: This field is for for checking if a file with matching name and file size already exists. Its default value is none. :param project_member_id: This field is the list of project member id of all members of a project. Its default value is None. :param max_bytes: This field is the maximum file size a user can upload. It's default value is 128m.
entailment
def pack_list(from_, pack_type): """ Return the wire packed version of `from_`. `pack_type` should be some subclass of `xcffib.Struct`, or a string that can be passed to `struct.pack`. You must pass `size` if `pack_type` is a struct.pack string. """ # We need from_ to not be empty if len(from_) == 0: return bytes() if pack_type == 'c': if isinstance(from_, bytes): # Catch Python 3 bytes and Python 2 strings # PY3 is "helpful" in that when you do tuple(b'foo') you get # (102, 111, 111) instead of something more reasonable like # (b'f', b'o', b'o'), so we rebuild from_ as a tuple of bytes from_ = [six.int2byte(b) for b in six.iterbytes(from_)] elif isinstance(from_, six.string_types): # Catch Python 3 strings and Python 2 unicode strings, both of # which we encode to bytes as utf-8 # Here we create the tuple of bytes from the encoded string from_ = [six.int2byte(b) for b in bytearray(from_, 'utf-8')] elif isinstance(from_[0], six.integer_types): # Pack from_ as char array, where from_ may be an array of ints # possibly greater than 256 def to_bytes(v): for _ in range(4): v, r = divmod(v, 256) yield r from_ = [six.int2byte(b) for i in from_ for b in to_bytes(i)] if isinstance(pack_type, six.string_types): return struct.pack("=" + pack_type * len(from_), *from_) else: buf = six.BytesIO() for item in from_: # If we can't pack it, you'd better have packed it yourself. But # let's not confuse things which aren't our Probobjs for packable # things. if isinstance(item, Protobj) and hasattr(item, "pack"): buf.write(item.pack()) else: buf.write(item) return buf.getvalue()
Return the wire packed version of `from_`. `pack_type` should be some subclass of `xcffib.Struct`, or a string that can be passed to `struct.pack`. You must pass `size` if `pack_type` is a struct.pack string.
entailment
def ensure_connected(f): """ Check that the connection is valid both before and after the function is invoked. """ @functools.wraps(f) def wrapper(*args): self = args[0] self.invalid() try: return f(*args) finally: self.invalid() return wrapper
Check that the connection is valid both before and after the function is invoked.
entailment
def get_screen_pointers(self): """ Returns the xcb_screen_t for every screen useful for other bindings """ root_iter = lib.xcb_setup_roots_iterator(self._setup) screens = [root_iter.data] for i in range(self._setup.roots_len - 1): lib.xcb_screen_next(ffi.addressof((root_iter))) screens.append(root_iter.data) return screens
Returns the xcb_screen_t for every screen useful for other bindings
entailment
def hoist_event(self, e): """ Hoist an xcb_generic_event_t to the right xcffib structure. """ if e.response_type == 0: return self._process_error(ffi.cast("xcb_generic_error_t *", e)) # We mask off the high bit here because events sent with SendEvent have # this bit set. We don't actually care where the event came from, so we # just throw this away. Maybe we could expose this, if anyone actually # cares about it. event = self._event_offsets[e.response_type & 0x7f] buf = CffiUnpacker(e) return event(buf)
Hoist an xcb_generic_event_t to the right xcffib structure.
entailment
def serialize(self, value, greedy=True): """ Greedy serialization requires the value to either be a column or convertible to a column, whereas non-greedy serialization will pass through any string as-is and will only serialize Column objects. Non-greedy serialization is useful when preparing queries with custom filters or segments. """ if greedy and not isinstance(value, Column): value = self.normalize(value) if isinstance(value, Column): return value.id else: return value
Greedy serialization requires the value to either be a column or convertible to a column, whereas non-greedy serialization will pass through any string as-is and will only serialize Column objects. Non-greedy serialization is useful when preparing queries with custom filters or segments.
entailment
def describe(profile, description): """ Generate a query by describing it as a series of actions and parameters to those actions. These map directly to Query methods and arguments to those methods. This is an alternative to the chaining interface. Mostly useful if you'd like to put your queries in a file, rather than in Python code. """ api_type = description.pop('type', 'core') api = getattr(profile, api_type) return refine(api.query, description)
Generate a query by describing it as a series of actions and parameters to those actions. These map directly to Query methods and arguments to those methods. This is an alternative to the chaining interface. Mostly useful if you'd like to put your queries in a file, rather than in Python code.
entailment
def refine(query, description): """ Refine a query from a dictionary of parameters that describes it. See `describe` for more information. """ for attribute, arguments in description.items(): if hasattr(query, attribute): attribute = getattr(query, attribute) else: raise ValueError("Unknown query method: " + attribute) # query descriptions are often automatically generated, and # may include empty calls, which we skip if utils.isempty(arguments): continue if callable(attribute): method = attribute if isinstance(arguments, dict): query = method(**arguments) elif isinstance(arguments, list): query = method(*arguments) else: query = method(arguments) else: setattr(attribute, arguments) return query
Refine a query from a dictionary of parameters that describes it. See `describe` for more information.
entailment
def set(self, key=None, value=None, **kwargs): """ `set` is a way to add raw properties to the request, for features that this module does not support or supports incompletely. For convenience's sake, it will serialize Column objects but will leave any other kind of value alone. """ serialize = partial(self.api.columns.serialize, greedy=False) if key and value: self.raw[key] = serialize(value) elif key or kwargs: properties = key or kwargs for key, value in properties.items(): self.raw[key] = serialize(value) else: raise ValueError( "Query#set requires a key and value, a properties dictionary or keyword arguments.") return self
`set` is a way to add raw properties to the request, for features that this module does not support or supports incompletely. For convenience's sake, it will serialize Column objects but will leave any other kind of value alone.
entailment
def description(self): """ A list of the metrics this query will ask for. """ if 'metrics' in self.raw: metrics = self.raw['metrics'] head = metrics[0:-1] or metrics[0:1] text = ", ".join(head) if len(metrics) > 1: tail = metrics[-1] text = text + " and " + tail else: text = 'n/a' return text
A list of the metrics this query will ask for.
entailment
def sort(self, *columns, **options): """ Return a new query which will produce results sorted by one or more metrics or dimensions. You may use plain strings for the columns, or actual `Column`, `Metric` and `Dimension` objects. Add a minus in front of the metric (either the string or the object) to sort in descending order. ```python # sort using strings query.sort('pageviews', '-device type') # alternatively, ask for a descending sort in a keyword argument query.sort('pageviews', descending=True) # sort using metric, dimension or column objects pageviews = profile.core.metrics['pageviews'] query.sort(-pageviews) ``` """ sorts = self.meta.setdefault('sort', []) for column in columns: if isinstance(column, Column): identifier = column.id elif isinstance(column, utils.basestring): descending = column.startswith('-') or options.get('descending', False) identifier = self.api.columns[column.lstrip('-')].id else: raise ValueError("Can only sort on columns or column strings. Received: {}".format(column)) if descending: sign = '-' else: sign = '' sorts.append(sign + identifier) self.raw['sort'] = ",".join(sorts) return self
Return a new query which will produce results sorted by one or more metrics or dimensions. You may use plain strings for the columns, or actual `Column`, `Metric` and `Dimension` objects. Add a minus in front of the metric (either the string or the object) to sort in descending order. ```python # sort using strings query.sort('pageviews', '-device type') # alternatively, ask for a descending sort in a keyword argument query.sort('pageviews', descending=True) # sort using metric, dimension or column objects pageviews = profile.core.metrics['pageviews'] query.sort(-pageviews) ```
entailment
def filter(self, value=None, exclude=False, **selection): """ Most of the actual functionality lives on the Column object and the `all` and `any` functions. """ filters = self.meta.setdefault('filters', []) if value and len(selection): raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.") elif value: value = [value] elif len(selection): value = select(self.api.columns, selection, invert=exclude) filters.append(value) self.raw['filters'] = utils.paste(filters, ',', ';') return self
Most of the actual functionality lives on the Column object and the `all` and `any` functions.
entailment
def precision(self, precision): """ For queries that should run faster, you may specify a lower precision, and for those that need to be more precise, a higher precision: ```python # faster queries query.range('2014-01-01', '2014-01-31', precision=0) query.range('2014-01-01', '2014-01-31', precision='FASTER') # queries with the default level of precision (usually what you want) query.range('2014-01-01', '2014-01-31') query.range('2014-01-01', '2014-01-31', precision=1) query.range('2014-01-01', '2014-01-31', precision='DEFAULT') # queries that are more precise query.range('2014-01-01', '2014-01-31', precision=2) query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION') ``` """ if isinstance(precision, int): precision = self.PRECISION_LEVELS[precision] if precision not in self.PRECISION_LEVELS: levels = ", ".join(self.PRECISION_LEVELS) raise ValueError("Precision should be one of: " + levels) if precision != 'DEFAULT': self.raw.update({'samplingLevel': precision}) return self
For queries that should run faster, you may specify a lower precision, and for those that need to be more precise, a higher precision: ```python # faster queries query.range('2014-01-01', '2014-01-31', precision=0) query.range('2014-01-01', '2014-01-31', precision='FASTER') # queries with the default level of precision (usually what you want) query.range('2014-01-01', '2014-01-31') query.range('2014-01-01', '2014-01-31', precision=1) query.range('2014-01-01', '2014-01-31', precision='DEFAULT') # queries that are more precise query.range('2014-01-01', '2014-01-31', precision=2) query.range('2014-01-01', '2014-01-31', precision='HIGHER_PRECISION') ```
entailment
def interval(self, granularity): """ Note that if you don't specify a granularity (either through the `interval` method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly` shortcut methods) you will get only a single result, encompassing the entire date range, per metric. """ if granularity == 'total': return self if not isinstance(granularity, int): if granularity in self.GRANULARITY_LEVELS: granularity = self.GRANULARITY_LEVELS.index(granularity) else: levels = ", ".join(self.GRANULARITY_LEVELS) raise ValueError("Granularity should be one of: lifetime, " + levels) dimension = self.GRANULARITY_DIMENSIONS[granularity] self.raw['dimensions'].insert(0, dimension) return self
Note that if you don't specify a granularity (either through the `interval` method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly` shortcut methods) you will get only a single result, encompassing the entire date range, per metric.
entailment
def range(self, start=None, stop=None, months=0, days=0): """ Return a new query that fetches metrics within a certain date range. ```python query.range('2014-01-01', '2014-06-30') ``` If you don't specify a `stop` argument, the date range will end today. If instead you meant to fetch just a single day's results, try: ```python query.range('2014-01-01', days=1) ``` More generally, you can specify that you'd like a certain number of days, starting from a certain date: ```python query.range('2014-01-01', months=3) query.range('2014-01-01', days=28) ``` Note that if you don't specify a granularity (either through the `interval` method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly` shortcut methods) you will get only a single result, encompassing the entire date range, per metric. **Note:** it is currently not possible to easily specify that you'd like to query the last last full week(s), month(s) et cetera. This will be added sometime in the future. """ start, stop = utils.date.range(start, stop, months, days) self.raw.update({ 'start_date': start, 'end_date': stop, }) return self
Return a new query that fetches metrics within a certain date range. ```python query.range('2014-01-01', '2014-06-30') ``` If you don't specify a `stop` argument, the date range will end today. If instead you meant to fetch just a single day's results, try: ```python query.range('2014-01-01', days=1) ``` More generally, you can specify that you'd like a certain number of days, starting from a certain date: ```python query.range('2014-01-01', months=3) query.range('2014-01-01', days=28) ``` Note that if you don't specify a granularity (either through the `interval` method or through the `hourly`, `daily`, `weekly`, `monthly` or `yearly` shortcut methods) you will get only a single result, encompassing the entire date range, per metric. **Note:** it is currently not possible to easily specify that you'd like to query the last last full week(s), month(s) et cetera. This will be added sometime in the future.
entailment
def limit(self, *_range): """ Return a new query, limited to a certain number of results. ```python # first 100 query.limit(100) # 50 to 60 query.limit(50, 10) ``` Please note carefully that Google Analytics uses 1-indexing on its rows. """ # uses the same argument order as # LIMIT in a SQL database if len(_range) == 2: start, maximum = _range else: start = 1 maximum = _range[0] self.meta['limit'] = maximum self.raw.update({ 'start_index': start, 'max_results': maximum, }) return self
Return a new query, limited to a certain number of results. ```python # first 100 query.limit(100) # 50 to 60 query.limit(50, 10) ``` Please note carefully that Google Analytics uses 1-indexing on its rows.
entailment
def segment(self, value=None, scope=None, metric_scope=None, **selection): """ Return a new query, limited to a segment of all users or sessions. Accepts segment objects, filtered segment objects and segment names: ```python query.segment(account.segments['browser']) query.segment('browser') query.segment(account.segments['browser'].any('Chrome', 'Firefox')) ``` Segment can also accept a segment expression when you pass in a `type` argument. The type argument can be either `users` or `sessions`. This is pretty close to the metal. ```python # will be translated into `users::condition::perUser::ga:sessions>10` query.segment('condition::perUser::ga:sessions>10', type='users') ``` See the [Google Analytics dynamic segments documentation][segments] You can also use the `any`, `all`, `followed_by` and `immediately_followed_by` functions in this module to chain together segments. Everything about how segments get handled is still in flux. Feel free to propose ideas for a nicer interface on the [GitHub issues page][issues] [segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference [issues]: https://github.com/debrouwere/google-analytics/issues """ """ Technical note to self about segments: * users or sessions * sequence or condition * scope (perHit, perSession, perUser -- gte primary scope) Multiple conditions can be ANDed or ORed together; these two are equivalent users::condition::ga:revenue>10;ga:sessionDuration>60 users::condition::ga:revenue>10;users::condition::ga:sessionDuration>60 For sequences, prepending ^ means the first part of the sequence has to match the first session/hit/... * users and sessions conditions can be combined (but only with AND) * sequences and conditions can also be combined (but only with AND) sessions::sequence::ga:browser==Chrome; condition::perHit::ga:timeOnPage>5 ->> ga:deviceCategory==mobile;ga:revenue>10; users::sequence::ga:deviceCategory==desktop ->> ga:deviceCategory=mobile; ga:revenue>100; condition::ga:browser==Chrome Problem: keyword arguments are passed as a dictionary, not an ordered dictionary! So e.g. this is risky query.sessions(time_on_page__gt=5, device_category='mobile', followed_by=True) """ SCOPES = { 'hits': 'perHit', 'sessions': 'perSession', 'users': 'perUser', } segments = self.meta.setdefault('segments', []) if value and len(selection): raise ValueError("Cannot specify a filter string and a filter keyword selection at the same time.") elif value: value = [self.api.segments.serialize(value)] elif len(selection): if not scope: raise ValueError("Scope is required. Choose from: users, sessions.") if metric_scope: metric_scope = SCOPES[metric_scope] value = select(self.api.columns, selection) value = [[scope, 'condition', metric_scope, condition] for condition in value] value = ['::'.join(filter(None, condition)) for condition in value] segments.append(value) self.raw['segment'] = utils.paste(segments, ',', ';') return self
Return a new query, limited to a segment of all users or sessions. Accepts segment objects, filtered segment objects and segment names: ```python query.segment(account.segments['browser']) query.segment('browser') query.segment(account.segments['browser'].any('Chrome', 'Firefox')) ``` Segment can also accept a segment expression when you pass in a `type` argument. The type argument can be either `users` or `sessions`. This is pretty close to the metal. ```python # will be translated into `users::condition::perUser::ga:sessions>10` query.segment('condition::perUser::ga:sessions>10', type='users') ``` See the [Google Analytics dynamic segments documentation][segments] You can also use the `any`, `all`, `followed_by` and `immediately_followed_by` functions in this module to chain together segments. Everything about how segments get handled is still in flux. Feel free to propose ideas for a nicer interface on the [GitHub issues page][issues] [segments]: https://developers.google.com/analytics/devguides/reporting/core/v3/segments#reference [issues]: https://github.com/debrouwere/google-analytics/issues
entailment
def next(self): """ Return a new query with a modified `start_index`. Mainly used internally to paginate through results. """ step = self.raw.get('max_results', 1000) start = self.raw.get('start_index', 1) + step self.raw['start_index'] = start return self
Return a new query with a modified `start_index`. Mainly used internally to paginate through results.
entailment
def get(self): """ Run the query and return a `Report`. This method transparently handles paginated results, so even for results that are larger than the maximum amount of rows the Google Analytics API will return in a single request, or larger than the amount of rows as specified through `CoreQuery#step`, `get` will leaf through all pages, concatenate the results and produce a single Report instance. """ cursor = self report = None is_complete = False is_enough = False while not (is_enough or is_complete): chunk = cursor.execute() if report: report.append(chunk.raw[0], cursor) else: report = chunk is_enough = len(report.rows) >= self.meta.get('limit', float('inf')) is_complete = chunk.is_complete cursor = cursor.next() return report
Run the query and return a `Report`. This method transparently handles paginated results, so even for results that are larger than the maximum amount of rows the Google Analytics API will return in a single request, or larger than the amount of rows as specified through `CoreQuery#step`, `get` will leaf through all pages, concatenate the results and produce a single Report instance.
entailment
def limit(self, maximum): """ Return a new query, limited to a certain number of results. Unlike core reporting queries, you cannot specify a starting point for live queries, just the maximum results returned. ```python # first 50 query.limit(50) ``` """ self.meta['limit'] = maximum self.raw.update({ 'max_results': maximum, }) return self
Return a new query, limited to a certain number of results. Unlike core reporting queries, you cannot specify a starting point for live queries, just the maximum results returned. ```python # first 50 query.limit(50) ```
entailment
def valid(self): """ Valid credentials are not necessarily correct, but they contain all necessary information for an authentication attempt. """ two_legged = self.client_email and self.private_key three_legged = self.client_id and self.client_secret return two_legged or three_legged or False
Valid credentials are not necessarily correct, but they contain all necessary information for an authentication attempt.
entailment
def complete(self): """ Complete credentials are valid and are either two-legged or include a token. """ return self.valid and (self.access_token or self.refresh_token or self.type == 2)
Complete credentials are valid and are either two-legged or include a token.
entailment
def authenticate( client_id=None, client_secret=None, client_email=None, private_key=None, access_token=None, refresh_token=None, account=None, webproperty=None, profile=None, identity=None, prefix=None, suffix=None, interactive=False, save=False): """ The `authenticate` function will authenticate the user with the Google Analytics API, using a variety of strategies: keyword arguments provided to this function, credentials stored in in environment variables, credentials stored in the keychain and, finally, by asking for missing information interactively in a command-line prompt. If necessary (but only if `interactive=True`) this function will also allow the user to authorize this Python module to access Google Analytics data on their behalf, using an OAuth2 token. """ credentials = oauth.Credentials.find( valid=True, interactive=interactive, prefix=prefix, suffix=suffix, client_id=client_id, client_secret=client_secret, client_email=client_email, private_key=private_key, access_token=access_token, refresh_token=refresh_token, identity=identity, ) if credentials.incomplete: if interactive: credentials = authorize( client_id=credentials.client_id, client_secret=credentials.client_secret, save=save, identity=credentials.identity, prefix=prefix, suffix=suffix, ) elif credentials.type == 2: credentials = authorize( client_email=credentials.client_email, private_key=credentials.private_key, identity=credentials.identity, save=save, ) else: raise KeyError("Cannot authenticate: enable interactive authorization, pass a token or use a service account.") accounts = oauth.authenticate(credentials) scope = navigate(accounts, account=account, webproperty=webproperty, profile=profile) return scope
The `authenticate` function will authenticate the user with the Google Analytics API, using a variety of strategies: keyword arguments provided to this function, credentials stored in in environment variables, credentials stored in the keychain and, finally, by asking for missing information interactively in a command-line prompt. If necessary (but only if `interactive=True`) this function will also allow the user to authorize this Python module to access Google Analytics data on their behalf, using an OAuth2 token.
entailment
def revoke(client_id, client_secret, client_email=None, private_key=None, access_token=None, refresh_token=None, identity=None, prefix=None, suffix=None): """ Given a client id, client secret and either an access token or a refresh token, revoke OAuth access to the Google Analytics data and remove any stored credentials that use these tokens. """ if client_email and private_key: raise ValueError('Two-legged OAuth does not use revokable tokens.') credentials = oauth.Credentials.find( complete=True, interactive=False, identity=identity, client_id=client_id, client_secret=client_secret, access_token=access_token, refresh_token=refresh_token, prefix=prefix, suffix=suffix, ) retval = credentials.revoke() keyring.delete(credentials.identity) return retval
Given a client id, client secret and either an access token or a refresh token, revoke OAuth access to the Google Analytics data and remove any stored credentials that use these tokens.
entailment
def query(scope, blueprint, debug, output, with_metadata, realtime, **description): """ e.g. googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \ query pageviews \ --start yesterday --limit -10 --sort -pageviews \ --dimensions pagepath \ --debug """ if realtime: description['type'] = 'realtime' if blueprint: queries = from_blueprint(scope, blueprint) else: if not isinstance(scope, ga.account.Profile): raise ValueError("Account and webproperty needed for query.") queries = from_args(scope, **description) for query in queries: if debug: click.echo(query.build()) report = query.serialize(format=output, with_metadata=with_metadata) click.echo(report)
e.g. googleanalytics --identity debrouwere --account debrouwere --webproperty http://debrouwere.org \ query pageviews \ --start yesterday --limit -10 --sort -pageviews \ --dimensions pagepath \ --debug
entailment
def vectorize(fn): """ Allows a method to accept one or more values, but internally deal only with a single item, and returning a list or a single item depending on what is desired. """ @functools.wraps(fn) def vectorized_method(self, values, *vargs, **kwargs): wrap = not isinstance(values, (list, tuple)) should_unwrap = not kwargs.setdefault('wrap', False) unwrap = wrap and should_unwrap del kwargs['wrap'] if wrap: values = [values] results = [fn(self, value, *vargs, **kwargs) for value in values] if unwrap: results = results[0] return results return vectorized_method
Allows a method to accept one or more values, but internally deal only with a single item, and returning a list or a single item depending on what is desired.
entailment
def webproperties(self): """ A list of all web properties on this account. You may select a specific web property using its name, its id or an index. ```python account.webproperties[0] account.webproperties['UA-9234823-5'] account.webproperties['debrouwere.org'] ``` """ raw_properties = self.service.management().webproperties().list( accountId=self.id).execute()['items'] _webproperties = [WebProperty(raw, self) for raw in raw_properties] return addressable.List(_webproperties, indices=['id', 'name'], insensitive=True)
A list of all web properties on this account. You may select a specific web property using its name, its id or an index. ```python account.webproperties[0] account.webproperties['UA-9234823-5'] account.webproperties['debrouwere.org'] ```
entailment
def profiles(self): """ A list of all profiles on this web property. You may select a specific profile using its name, its id or an index. ```python property.profiles[0] property.profiles['9234823'] property.profiles['marketing profile'] ``` """ raw_profiles = self.account.service.management().profiles().list( accountId=self.account.id, webPropertyId=self.id).execute()['items'] profiles = [Profile(raw, self) for raw in raw_profiles] return addressable.List(profiles, indices=['id', 'name'], insensitive=True)
A list of all profiles on this web property. You may select a specific profile using its name, its id or an index. ```python property.profiles[0] property.profiles['9234823'] property.profiles['marketing profile'] ```
entailment
def check_output_input(*popenargs, **kwargs): """Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' There is an additional optional argument, "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when in the course of fooman events\n") b'when in the course of barman events\n' If universal_newlines=True is passed, the return value will be a string rather than bytes. """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') if 'input' in kwargs: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') inputdata = kwargs['input'] del kwargs['input'] kwargs['stdin'] = PIPE else: inputdata = None process = Popen(*popenargs, stdout=PIPE, **kwargs) try: output, unused_err = process.communicate(inputdata) except: process.kill() process.wait() raise retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd, output=output) return output
Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' There is an additional optional argument, "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it too will be used internally. Example: >>> check_output(["sed", "-e", "s/foo/bar/"], ... input=b"when in the course of fooman events\n") b'when in the course of barman events\n' If universal_newlines=True is passed, the return value will be a string rather than bytes.
entailment