Search is not available for this dataset
text
stringlengths
75
104k
def record_move_fields(rec, tag, field_positions_local, field_position_local=None): """ Move some fields to the position specified by 'field_position_local'. :param rec: a record structure as returned by create_record() :param tag: the tag of the fields to be moved :param field_positions_local: the positions of the fields to move :param field_position_local: insert the field before that field_position_local. If unspecified, appends the fields :return: the field_position_local is the operation was successful """ fields = record_delete_fields( rec, tag, field_positions_local=field_positions_local) return record_add_fields( rec, tag, fields, field_position_local=field_position_local)
def record_delete_subfield(rec, tag, subfield_code, ind1=' ', ind2=' '): """Delete all subfields with subfield_code in the record.""" ind1, ind2 = _wash_indicators(ind1, ind2) for field in rec.get(tag, []): if field[1] == ind1 and field[2] == ind2: field[0][:] = [subfield for subfield in field[0] if subfield_code != subfield[0]]
def record_get_field(rec, tag, field_position_global=None, field_position_local=None): """ Return the the matching field. One has to enter either a global field position or a local field position. :return: a list of subfield tuples (subfield code, value). :rtype: list """ if field_position_global is None and field_position_local is None: raise InvenioBibRecordFieldError( "A field position is required to " "complete this operation.") elif field_position_global is not None and \ field_position_local is not None: raise InvenioBibRecordFieldError( "Only one field position is required " "to complete this operation.") elif field_position_global: if tag not in rec: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) for field in rec[tag]: if field[4] == field_position_global: return field raise InvenioBibRecordFieldError( "No field has the tag '%s' and the " "global field position '%d'." % (tag, field_position_global)) else: try: return rec[tag][field_position_local] except KeyError: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) except IndexError: raise InvenioBibRecordFieldError( "No field has the tag '%s' and " "the local field position '%d'." % (tag, field_position_local))
def record_replace_field(rec, tag, new_field, field_position_global=None, field_position_local=None): """Replace a field with a new field.""" if field_position_global is None and field_position_local is None: raise InvenioBibRecordFieldError( "A field position is required to " "complete this operation.") elif field_position_global is not None and \ field_position_local is not None: raise InvenioBibRecordFieldError( "Only one field position is required " "to complete this operation.") elif field_position_global: if tag not in rec: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) replaced = False for position, field in enumerate(rec[tag]): if field[4] == field_position_global: rec[tag][position] = new_field replaced = True if not replaced: raise InvenioBibRecordFieldError( "No field has the tag '%s' and " "the global field position '%d'." % (tag, field_position_global)) else: try: rec[tag][field_position_local] = new_field except KeyError: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) except IndexError: raise InvenioBibRecordFieldError( "No field has the tag '%s' and " "the local field position '%d'." % (tag, field_position_local))
def record_get_subfields(rec, tag, field_position_global=None, field_position_local=None): """ Return the subfield of the matching field. One has to enter either a global field position or a local field position. :return: a list of subfield tuples (subfield code, value). :rtype: list """ field = record_get_field( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) return field[0]
def record_delete_subfield_from(rec, tag, subfield_position, field_position_global=None, field_position_local=None): """ Delete subfield from position specified. Specify the subfield by tag, field number and subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) try: del subfields[subfield_position] except IndexError: raise InvenioBibRecordFieldError( "The record does not contain the subfield " "'%(subfieldIndex)s' inside the field (local: " "'%(fieldIndexLocal)s, global: '%(fieldIndexGlobal)s' ) of tag " "'%(tag)s'." % {"subfieldIndex": subfield_position, "fieldIndexLocal": str(field_position_local), "fieldIndexGlobal": str(field_position_global), "tag": tag}) if not subfields: if field_position_global is not None: for position, field in enumerate(rec[tag]): if field[4] == field_position_global: del rec[tag][position] else: del rec[tag][field_position_local] if not rec[tag]: del rec[tag]
def record_add_subfield_into(rec, tag, subfield_code, value, subfield_position=None, field_position_global=None, field_position_local=None): """Add subfield into specified position. Specify the subfield by tag, field number and optionally by subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) if subfield_position is None: subfields.append((subfield_code, value)) else: subfields.insert(subfield_position, (subfield_code, value))
def record_modify_controlfield(rec, tag, controlfield_value, field_position_global=None, field_position_local=None): """Modify controlfield at position specified by tag and field number.""" field = record_get_field( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) new_field = (field[0], field[1], field[2], controlfield_value, field[4]) record_replace_field( rec, tag, new_field, field_position_global=field_position_global, field_position_local=field_position_local)
def record_modify_subfield(rec, tag, subfield_code, value, subfield_position, field_position_global=None, field_position_local=None): """Modify subfield at specified position. Specify the subfield by tag, field number and subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) try: subfields[subfield_position] = (subfield_code, value) except IndexError: raise InvenioBibRecordFieldError( "There is no subfield with position '%d'." % subfield_position)
def record_move_subfield(rec, tag, subfield_position, new_subfield_position, field_position_global=None, field_position_local=None): """Move subfield at specified position. Sspecify the subfield by tag, field number and subfield position to new subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) try: subfield = subfields.pop(subfield_position) subfields.insert(new_subfield_position, subfield) except IndexError: raise InvenioBibRecordFieldError( "There is no subfield with position '%d'." % subfield_position)
def record_get_field_value(rec, tag, ind1=" ", ind2=" ", code=""): """Return first (string) value that matches specified field of the record. Returns empty string if not found. Parameters (tag, ind1, ind2, code) can contain wildcard %. Difference between wildcard % and empty '': - Empty char specifies that we are not interested in a field which has one of the indicator(s)/subfield specified. - Wildcard specifies that we are interested in getting the value of the field whatever the indicator(s)/subfield is. For e.g. consider the following record in MARC:: 100C5 $$a val1 555AB $$a val2 555AB val3 555 $$a val4 555A val5 .. doctest:: >>> record_get_field_value(record, '555', 'A', '', '') "val5" >>> record_get_field_value(record, '555', 'A', '%', '') "val3" >>> record_get_field_value(record, '555', 'A', '%', '%') "val2" >>> record_get_field_value(record, '555', 'A', 'B', '') "val3" >>> record_get_field_value(record, '555', '', 'B', 'a') "" >>> record_get_field_value(record, '555', '', '', 'a') "val4" >>> record_get_field_value(record, '555', '', '', '') "" >>> record_get_field_value(record, '%%%', '%', '%', '%') "val1" :param rec: a record structure as returned by create_record() :param tag: a 3 characters long string :param ind1: a 1 character long string :param ind2: a 1 character long string :param code: a 1 character long string :return: string value (empty if nothing found) """ # Note: the code is quite redundant for speed reasons (avoid calling # functions or doing tests inside loops) ind1, ind2 = _wash_indicators(ind1, ind2) if '%' in tag: # Wild card in tag. Must find all corresponding fields if code == '': # Code not specified. for field_tag, fields in rec.items(): if _tag_matches_pattern(field_tag, tag): for field in fields: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]): # Return matching field value if not empty if field[3]: return field[3] elif code == '%': # Code is wildcard. Take first subfield of first matching field for field_tag, fields in rec.items(): if _tag_matches_pattern(field_tag, tag): for field in fields: if (ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and field[0]): return field[0][0][1] else: # Code is specified. Take corresponding one for field_tag, fields in rec.items(): if _tag_matches_pattern(field_tag, tag): for field in fields: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]): for subfield in field[0]: if subfield[0] == code: return subfield[1] else: # Tag is completely specified. Use tag as dict key if tag in rec: if code == '': # Code not specified. for field in rec[tag]: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]): # Return matching field value if not empty # or return "" empty if not exist. if field[3]: return field[3] elif code == '%': # Code is wildcard. Take first subfield of first matching field for field in rec[tag]: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and\ field[0]: return field[0][0][1] else: # Code is specified. Take corresponding one for field in rec[tag]: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]): for subfield in field[0]: if subfield[0] == code: return subfield[1] # Nothing was found return ""
def record_get_field_values(rec, tag, ind1=" ", ind2=" ", code="", filter_subfield_code="", filter_subfield_value="", filter_subfield_mode="e"): """Return the list of values for the specified field of the record. List can be filtered. Use filter_subfield_code and filter_subfield_value to search only in fields that have these values inside them as a subfield. filter_subfield_mode can have 3 different values: 'e' for exact search 's' for substring search 'r' for regexp search Returns empty list if nothing was found. Parameters (tag, ind1, ind2, code) can contain wildcard %. :param rec: a record structure as returned by create_record() :param tag: a 3 characters long string :param ind1: a 1 character long string :param ind2: a 1 character long string :param code: a 1 character long string :return: a list of strings """ tmp = [] ind1, ind2 = _wash_indicators(ind1, ind2) if filter_subfield_code and filter_subfield_mode == "r": reg_exp = re.compile(filter_subfield_value) tags = [] if '%' in tag: # Wild card in tag. Must find all corresponding tags and fields tags = [k for k in rec if _tag_matches_pattern(k, tag)] elif rec and tag in rec: tags = [tag] if code == '': # Code not specified. Consider field value (without subfields) for tag in tags: for field in rec[tag]: if (ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and field[3]): tmp.append(field[3]) elif code == '%': # Code is wildcard. Consider all subfields for tag in tags: for field in rec[tag]: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]): if filter_subfield_code: if filter_subfield_mode == "e": subfield_to_match = (filter_subfield_code, filter_subfield_value) if subfield_to_match in field[0]: for subfield in field[0]: tmp.append(subfield[1]) elif filter_subfield_mode == "s": if (dict(field[0]).get(filter_subfield_code, '')) \ .find(filter_subfield_value) > -1: for subfield in field[0]: tmp.append(subfield[1]) elif filter_subfield_mode == "r": if reg_exp.match(dict(field[0]) .get(filter_subfield_code, '')): for subfield in field[0]: tmp.append(subfield[1]) else: for subfield in field[0]: tmp.append(subfield[1]) else: # Code is specified. Consider all corresponding subfields for tag in tags: for field in rec[tag]: if ind1 in ('%', field[1]) and ind2 in ('%', field[2]): if filter_subfield_code: if filter_subfield_mode == "e": subfield_to_match = (filter_subfield_code, filter_subfield_value) if subfield_to_match in field[0]: for subfield in field[0]: if subfield[0] == code: tmp.append(subfield[1]) elif filter_subfield_mode == "s": if (dict(field[0]).get(filter_subfield_code, '')) \ .find(filter_subfield_value) > -1: for subfield in field[0]: if subfield[0] == code: tmp.append(subfield[1]) elif filter_subfield_mode == "r": if reg_exp.match(dict(field[0]) .get(filter_subfield_code, '')): for subfield in field[0]: if subfield[0] == code: tmp.append(subfield[1]) else: for subfield in field[0]: if subfield[0] == code: tmp.append(subfield[1]) # If tmp was not set, nothing was found return tmp
def record_xml_output(rec, tags=None, order_fn=None): """Generate the XML for record 'rec'. :param rec: record :param tags: list of tags to be printed :return: string """ if tags is None: tags = [] if isinstance(tags, str): tags = [tags] if tags and '001' not in tags: # Add the missing controlfield. tags.append('001') marcxml = ['<record>'] # Add the tag 'tag' to each field in rec[tag] fields = [] if rec is not None: for tag in rec: if not tags or tag in tags: for field in rec[tag]: fields.append((tag, field)) if order_fn is None: record_order_fields(fields) else: record_order_fields(fields, order_fn) for field in fields: marcxml.append(field_xml_output(field[1], field[0])) marcxml.append('</record>') return '\n'.join(marcxml)
def field_xml_output(field, tag): """Generate the XML for field 'field' and returns it as a string.""" marcxml = [] if field[3]: marcxml.append(' <controlfield tag="%s">%s</controlfield>' % (tag, MathMLParser.html_to_text(field[3]))) else: marcxml.append(' <datafield tag="%s" ind1="%s" ind2="%s">' % (tag, field[1], field[2])) marcxml += [_subfield_xml_output(subfield) for subfield in field[0]] marcxml.append(' </datafield>') return '\n'.join(marcxml)
def record_extract_dois(record): """Return the DOI(s) of the record.""" record_dois = [] tag = "024" ind1 = "7" ind2 = "_" subfield_source_code = "2" subfield_value_code = "a" identifiers_fields = record_get_field_instances(record, tag, ind1, ind2) for identifer_field in identifiers_fields: if 'doi' in [val.lower() for val in field_get_subfield_values(identifer_field, subfield_source_code)]: record_dois.extend( field_get_subfield_values( identifer_field, subfield_value_code)) return record_dois
def print_rec(rec, format=1, tags=None): """ Print a record. :param format: 1 XML, 2 HTML (not implemented) :param tags: list of tags to be printed """ if tags is None: tags = [] if format == 1: text = record_xml_output(rec, tags) else: return '' return text
def print_recs(listofrec, format=1, tags=None): """ Print a list of records. :param format: 1 XML, 2 HTML (not implemented) :param tags: list of tags to be printed if 'listofrec' is not a list it returns empty string """ if tags is None: tags = [] text = "" if type(listofrec).__name__ != 'list': return "" else: for rec in listofrec: text = "%s\n%s" % (text, print_rec(rec, format, tags)) return text
def record_find_field(rec, tag, field, strict=False): """ Return the global and local positions of the first occurrence of the field. :param rec: A record dictionary structure :type rec: dictionary :param tag: The tag of the field to search for :type tag: string :param field: A field tuple as returned by create_field() :type field: tuple :param strict: A boolean describing the search method. If strict is False, then the order of the subfields doesn't matter. Default search method is strict. :type strict: boolean :return: A tuple of (global_position, local_position) or a tuple (None, None) if the field is not present. :rtype: tuple :raise InvenioBibRecordFieldError: If the provided field is invalid. """ try: _check_field_validity(field) except InvenioBibRecordFieldError: raise for local_position, field1 in enumerate(rec.get(tag, [])): if _compare_fields(field, field1, strict): return (field1[4], local_position) return (None, None)
def record_match_subfields(rec, tag, ind1=" ", ind2=" ", sub_key=None, sub_value='', sub_key2=None, sub_value2='', case_sensitive=True): """ Find subfield instances in a particular field. It tests values in 1 of 3 possible ways: - Does a subfield code exist? (ie does 773__a exist?) - Does a subfield have a particular value? (ie 773__a == 'PhysX') - Do a pair of subfields have particular values? (ie 035__2 == 'CDS' and 035__a == '123456') Parameters: * rec - dictionary: a bibrecord structure * tag - string: the tag of the field (ie '773') * ind1, ind2 - char: a single characters for the MARC indicators * sub_key - char: subfield key to find * sub_value - string: subfield value of that key * sub_key2 - char: key of subfield to compare against * sub_value2 - string: expected value of second subfield * case_sensitive - bool: be case sensitive when matching values :return: false if no match found, else provides the field position (int) """ if sub_key is None: raise TypeError("None object passed for parameter sub_key.") if sub_key2 is not None and sub_value2 is '': raise TypeError("Parameter sub_key2 defined but sub_value2 is None, " + "function requires a value for comparrison.") ind1, ind2 = _wash_indicators(ind1, ind2) if not case_sensitive: sub_value = sub_value.lower() sub_value2 = sub_value2.lower() for field in record_get_field_instances(rec, tag, ind1, ind2): subfields = dict(field_get_subfield_instances(field)) if not case_sensitive: for k, v in subfields.iteritems(): subfields[k] = v.lower() if sub_key in subfields: if sub_value is '': return field[4] else: if sub_value == subfields[sub_key]: if sub_key2 is None: return field[4] else: if sub_key2 in subfields: if sub_value2 == subfields[sub_key2]: return field[4] return False
def record_strip_empty_volatile_subfields(rec): """Remove unchanged volatile subfields from the record.""" for tag in rec.keys(): for field in rec[tag]: field[0][:] = [subfield for subfield in field[0] if subfield[1][:9] != "VOLATILE:"]
def record_make_all_subfields_volatile(rec): """ Turns all subfields to volatile """ for tag in rec.keys(): for field_position, field in enumerate(rec[tag]): for subfield_position, subfield in enumerate(field[0]): if subfield[1][:9] != "VOLATILE:": record_modify_subfield(rec, tag, subfield[0], "VOLATILE:" + subfield[1], subfield_position, field_position_local=field_position)
def record_strip_empty_fields(rec, tag=None): """ Remove empty subfields and fields from the record. If 'tag' is not None, only a specific tag of the record will be stripped, otherwise the whole record. :param rec: A record dictionary structure :type rec: dictionary :param tag: The tag of the field to strip empty fields from :type tag: string """ # Check whole record if tag is None: tags = rec.keys() for tag in tags: record_strip_empty_fields(rec, tag) # Check specific tag of the record elif tag in rec: # in case of a controlfield if tag[:2] == '00': if len(rec[tag]) == 0 or not rec[tag][0][3]: del rec[tag] #in case of a normal field else: fields = [] for field in rec[tag]: subfields = [] for subfield in field[0]: # check if the subfield has been given a value if subfield[1]: # Always strip values subfield = (subfield[0], subfield[1].strip()) subfields.append(subfield) if len(subfields) > 0: new_field = create_field(subfields, field[1], field[2], field[3]) fields.append(new_field) if len(fields) > 0: rec[tag] = fields else: del rec[tag]
def record_strip_controlfields(rec): """ Remove all non-empty controlfields from the record. :param rec: A record dictionary structure :type rec: dictionary """ for tag in rec.keys(): if tag[:2] == '00' and rec[tag][0][3]: del rec[tag]
def record_order_subfields(rec, tag=None): """ Order subfields from a record alphabetically based on subfield code. If 'tag' is not None, only a specific tag of the record will be reordered, otherwise the whole record. :param rec: bibrecord :type rec: bibrec :param tag: tag where the subfields will be ordered :type tag: str """ if rec is None: return rec if tag is None: tags = rec.keys() for tag in tags: record_order_subfields(rec, tag) elif tag in rec: for i in xrange(len(rec[tag])): field = rec[tag][i] # Order subfields alphabetically by subfield code ordered_subfields = sorted(field[0], key=lambda subfield: subfield[0]) rec[tag][i] = (ordered_subfields, field[1], field[2], field[3], field[4])
def field_get_subfields(field): """ Given a field, will place all subfields into a dictionary Parameters: * field - tuple: The field to get subfields for Returns: a dictionary, codes as keys and a list of values as the value """ pairs = {} for key, value in field[0]: if key in pairs and pairs[key] != value: pairs[key].append(value) else: pairs[key] = [value] return pairs
def _compare_fields(field1, field2, strict=True): """ Compare 2 fields. If strict is True, then the order of the subfield will be taken care of, if not then the order of the subfields doesn't matter. :return: True if the field are equivalent, False otherwise. """ if strict: # Return a simple equal test on the field minus the position. return field1[:4] == field2[:4] else: if field1[1:4] != field2[1:4]: # Different indicators or controlfield value. return False else: # Compare subfields in a loose way. return set(field1[0]) == set(field2[0])
def _check_field_validity(field): """ Check if a field is well-formed. :param field: A field tuple as returned by create_field() :type field: tuple :raise InvenioBibRecordFieldError: If the field is invalid. """ if type(field) not in (list, tuple): raise InvenioBibRecordFieldError( "Field of type '%s' should be either " "a list or a tuple." % type(field)) if len(field) != 5: raise InvenioBibRecordFieldError( "Field of length '%d' should have 5 " "elements." % len(field)) if type(field[0]) not in (list, tuple): raise InvenioBibRecordFieldError( "Subfields of type '%s' should be " "either a list or a tuple." % type(field[0])) if type(field[1]) is not str: raise InvenioBibRecordFieldError( "Indicator 1 of type '%s' should be " "a string." % type(field[1])) if type(field[2]) is not str: raise InvenioBibRecordFieldError( "Indicator 2 of type '%s' should be " "a string." % type(field[2])) if type(field[3]) is not str: raise InvenioBibRecordFieldError( "Controlfield value of type '%s' " "should be a string." % type(field[3])) if type(field[4]) is not int: raise InvenioBibRecordFieldError( "Global position of type '%s' should " "be an int." % type(field[4])) for subfield in field[0]: if (type(subfield) not in (list, tuple) or len(subfield) != 2 or type(subfield[0]) is not str or type(subfield[1]) is not str): raise InvenioBibRecordFieldError( "Subfields are malformed. " "Should a list of tuples of 2 strings.")
def _shift_field_positions_global(record, start, delta=1): """ Shift all global field positions. Shift all global field positions with global field positions higher or equal to 'start' from the value 'delta'. """ if not delta: return for tag, fields in record.items(): newfields = [] for field in fields: if field[4] < start: newfields.append(field) else: # Increment the global field position by delta. newfields.append(tuple(list(field[:4]) + [field[4] + delta])) record[tag] = newfields
def _tag_matches_pattern(tag, pattern): """Return true if MARC 'tag' matches a 'pattern'. 'pattern' is plain text, with % as wildcard Both parameters must be 3 characters long strings. .. doctest:: >>> _tag_matches_pattern("909", "909") True >>> _tag_matches_pattern("909", "9%9") True >>> _tag_matches_pattern("909", "9%8") False :param tag: a 3 characters long string :param pattern: a 3 characters long string :return: False or True """ for char1, char2 in zip(tag, pattern): if char2 not in ('%', char1): return False return True
def _validate_record_field_positions_global(record): """ Check if the global field positions in the record are valid. I.e., no duplicate global field positions and local field positions in the list of fields are ascending. :param record: the record data structure :return: the first error found as a string or None if no error was found """ all_fields = [] for tag, fields in record.items(): previous_field_position_global = -1 for field in fields: if field[4] < previous_field_position_global: return ("Non ascending global field positions in tag '%s'." % tag) previous_field_position_global = field[4] if field[4] in all_fields: return ("Duplicate global field position '%d' in tag '%s'" % (field[4], tag))
def _record_sort_by_indicators(record): """Sort the fields inside the record by indicators.""" for tag, fields in record.items(): record[tag] = _fields_sort_by_indicators(fields)
def _fields_sort_by_indicators(fields): """Sort a set of fields by their indicators. Return a sorted list with correct global field positions. """ field_dict = {} field_positions_global = [] for field in fields: field_dict.setdefault(field[1:3], []).append(field) field_positions_global.append(field[4]) indicators = field_dict.keys() indicators.sort() field_list = [] for indicator in indicators: for field in field_dict[indicator]: field_list.append(field[:4] + (field_positions_global.pop(0),)) return field_list
def _create_record_lxml(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, correct=CFG_BIBRECORD_DEFAULT_CORRECT, keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS): """ Create a record object using the LXML parser. If correct == 1, then perform DTD validation If correct == 0, then do not perform DTD validation If verbose == 0, the parser will not give warnings. If 1 <= verbose <= 3, the parser will not give errors, but will warn the user about possible mistakes (implement me!) If verbose > 3 then the parser will be strict and will stop in case of well-formedness errors or DTD errors. """ parser = etree.XMLParser(dtd_validation=correct, recover=(verbose <= 3)) if correct: marcxml = '<?xml version="1.0" encoding="UTF-8"?>\n' \ '<collection>\n%s\n</collection>' % (marcxml,) try: tree = etree.parse(StringIO(marcxml), parser) # parser errors are located in parser.error_log # if 1 <= verbose <=3 then show them to the user? # if verbose == 0 then continue # if verbose >3 then an exception will be thrown except Exception as e: raise InvenioBibRecordParserError(str(e)) record = {} field_position_global = 0 controlfield_iterator = tree.iter(tag='{*}controlfield') for controlfield in controlfield_iterator: tag = controlfield.attrib.get('tag', '!').encode("UTF-8") ind1 = ' ' ind2 = ' ' text = controlfield.text if text is None: text = '' else: text = text.encode("UTF-8") subfields = [] if text or keep_singletons: field_position_global += 1 record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global)) datafield_iterator = tree.iter(tag='{*}datafield') for datafield in datafield_iterator: tag = datafield.attrib.get('tag', '!').encode("UTF-8") ind1 = datafield.attrib.get('ind1', '!').encode("UTF-8") ind2 = datafield.attrib.get('ind2', '!').encode("UTF-8") if ind1 in ('', '_'): ind1 = ' ' if ind2 in ('', '_'): ind2 = ' ' subfields = [] subfield_iterator = datafield.iter(tag='{*}subfield') for subfield in subfield_iterator: code = subfield.attrib.get('code', '!').encode("UTF-8") text = subfield.text if text is None: text = '' else: text = text.encode("UTF-8") if text or keep_singletons: subfields.append((code, text)) if subfields or keep_singletons: text = '' field_position_global += 1 record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global)) return record
def _get_children_by_tag_name(node, name): """Retrieve all children from node 'node' with name 'name'.""" try: return [child for child in node.childNodes if child.nodeName == name] except TypeError: return []
def _get_children_as_string(node): """Iterate through all the children of a node. Returns one string containing the values from all the text-nodes recursively. """ out = [] if node: for child in node: if child.nodeType == child.TEXT_NODE: out.append(child.data) else: out.append(_get_children_as_string(child.childNodes)) return ''.join(out)
def _correct_record(record): """ Check and correct the structure of the record. :param record: the record data structure :return: a list of errors found """ errors = [] for tag in record.keys(): upper_bound = '999' n = len(tag) if n > 3: i = n - 3 while i > 0: upper_bound = '%s%s' % ('0', upper_bound) i -= 1 # Missing tag. Replace it with dummy tag '000'. if tag == '!': errors.append((1, '(field number(s): ' + str([f[4] for f in record[tag]]) + ')')) record['000'] = record.pop(tag) tag = '000' elif not ('001' <= tag <= upper_bound or tag in ('FMT', 'FFT', 'BDR', 'BDM')): errors.append(2) record['000'] = record.pop(tag) tag = '000' fields = [] for field in record[tag]: # Datafield without any subfield. if field[0] == [] and field[3] == '': errors.append((8, '(field number: ' + str(field[4]) + ')')) subfields = [] for subfield in field[0]: if subfield[0] == '!': errors.append((3, '(field number: ' + str(field[4]) + ')')) newsub = ('', subfield[1]) else: newsub = subfield subfields.append(newsub) if field[1] == '!': errors.append((4, '(field number: ' + str(field[4]) + ')')) ind1 = " " else: ind1 = field[1] if field[2] == '!': errors.append((5, '(field number: ' + str(field[4]) + ')')) ind2 = " " else: ind2 = field[2] fields.append((subfields, ind1, ind2, field[3], field[4])) record[tag] = fields return errors
def _warning(code): """ Return a warning message of code 'code'. If code = (cd, str) it returns the warning message of code 'cd' and appends str at the end """ if isinstance(code, str): return code message = '' if isinstance(code, tuple): if isinstance(code[0], str): message = code[1] code = code[0] return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message
def _compare_lists(list1, list2, custom_cmp): """Compare twolists using given comparing function. :param list1: first list to compare :param list2: second list to compare :param custom_cmp: a function taking two arguments (element of list 1, element of list 2) and :return: True or False depending if the values are the same """ if len(list1) != len(list2): return False for element1, element2 in zip(list1, list2): if not custom_cmp(element1, element2): return False return True
def parse(self, path_to_xml=None): """Parse an XML document and clean any namespaces.""" if not path_to_xml: if not self.path: self.logger.error("No path defined!") return path_to_xml = self.path root = self._clean_xml(path_to_xml) # See first of this XML is clean or OAI request if root.tag.lower() == 'collection': tree = ET.ElementTree(root) self.records = element_tree_collection_to_records(tree) elif root.tag.lower() == 'record': new_root = ET.Element('collection') new_root.append(root) tree = ET.ElementTree(new_root) self.records = element_tree_collection_to_records(tree) else: # We have an OAI request header_subs = get_request_subfields(root) records = root.find('ListRecords') if records is None: records = root.find('GetRecord') if records is None: raise ValueError("Cannot find ListRecords or GetRecord!") tree = ET.ElementTree(records) for record, is_deleted in element_tree_oai_records(tree, header_subs): if is_deleted: # It was OAI deleted. Create special record self.deleted_records.append( self.create_deleted_record(record) ) else: self.records.append(record)
def _clean_xml(self, path_to_xml): """Clean MARCXML harvested from OAI. Allows the xml to be used with BibUpload or BibRecord. :param xml: either XML as a string or path to an XML file :return: ElementTree of clean data """ try: if os.path.isfile(path_to_xml): tree = ET.parse(path_to_xml) root = tree.getroot() else: root = ET.fromstring(path_to_xml) except Exception, e: self.logger.error("Could not read OAI XML, aborting filter!") raise e strip_xml_namespace(root) return root
def create_deleted_record(self, record): """Generate the record deletion if deleted form OAI-PMH.""" identifier = record_get_field_value(record, tag="037", code="a") recid = identifier.split(":")[-1] try: source = identifier.split(":")[1] except IndexError: source = "Unknown" record_add_field(record, "035", subfields=[("9", source), ("a", recid)]) record_add_field(record, "980", subfields=[("c", "DELETED")]) return record
def _login(self, session, get_request=False): """Return a session for yesss.at.""" req = session.post(self._login_url, data=self._logindata) if _LOGIN_ERROR_STRING in req.text or \ req.status_code == 403 or \ req.url == _LOGIN_URL: err_mess = "YesssSMS: login failed, username or password wrong" if _LOGIN_LOCKED_MESS in req.text: err_mess += ", page says: " + _LOGIN_LOCKED_MESS_ENG self._suspended = True raise self.AccountSuspendedError(err_mess) raise self.LoginError(err_mess) self._suspended = False # login worked return (session, req) if get_request else session
def login_data_valid(self): """Check for working login data.""" login_working = False try: with self._login(requests.Session()) as sess: sess.get(self._logout_url) except self.LoginError: pass else: login_working = True return login_working
def send(self, recipient, message): """Send an SMS.""" if self._logindata['login_rufnummer'] is None or \ self._logindata['login_passwort'] is None: err_mess = "YesssSMS: Login data required" raise self.LoginError(err_mess) if not recipient: raise self.NoRecipientError("YesssSMS: recipient number missing") if not isinstance(recipient, str): raise ValueError("YesssSMS: str expected as recipient number") if not message: raise self.EmptyMessageError("YesssSMS: message is empty") with self._login(requests.Session()) as sess: sms_data = {'to_nummer': recipient, 'nachricht': message} req = sess.post(self._websms_url, data=sms_data) if not (req.status_code == 200 or req.status_code == 302): raise self.SMSSendingError("YesssSMS: error sending SMS") if _UNSUPPORTED_CHARS_STRING in req.text: raise self.UnsupportedCharsError( "YesssSMS: message contains unsupported character(s)") if _SMS_SENDING_SUCCESSFUL_STRING not in req.text: raise self.SMSSendingError("YesssSMS: error sending SMS") sess.get(self._logout_url)
def get_date(self, filename): """Return the date of the article in file.""" try: self.document = parse(filename) return self._get_date() except DateNotFoundException: print("Date problem found in {0}".format(filename)) return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d")
def get_collection(self, journal): """Return this articles' collection.""" conference = '' for tag in self.document.getElementsByTagName('conference'): conference = xml_to_text(tag) if conference or journal == "International Journal of Modern Physics: Conference Series": return [('a', 'HEP'), ('a', 'ConferencePaper')] elif self._get_article_type() == "review-article": return [('a', 'HEP'), ('a', 'Review')] else: return [('a', 'HEP'), ('a', 'Published')]
def get_record(self, filename, ref_extract_callback=None): """Get the MARCXML of the files in xaml_jp directory. :param filename: the name of the file to parse. :type filename: string :param refextract_callback: callback to be used to extract unstructured references. It should return a marcxml formated string of the reference. :type refextract_callback: callable :returns: a string with the marc xml version of the file. """ self.document = parse(filename) article_type = self._get_article_type() if article_type not in ['research-article', 'corrected-article', 'original-article', 'introduction', 'letter', 'correction', 'addendum', 'review-article', 'rapid-communications']: return "" rec = create_record() title, subtitle, notes = self._get_title() subfields = [] if subtitle: subfields.append(('b', subtitle)) if title: title = fix_title_capitalization(title) subfields.append(('a', title)) record_add_field(rec, '245', subfields=subfields) for note_id in notes: note = self._get_note(note_id) if note: record_add_field(rec, '500', subfields=[('a', note)]) keywords = self._get_keywords() for keyword in keywords: record_add_field(rec, '653', ind1='1', subfields=[('a', keyword), ('9', 'author')]) journal, volume, issue, year, date, doi, page,\ fpage, lpage = self._get_publication_information() if date: record_add_field(rec, '260', subfields=[('c', date), ('t', 'published')]) if doi: record_add_field(rec, '024', ind1='7', subfields=[('a', doi), ('2', 'DOI')]) abstract = self._get_abstract() if abstract: abstract = convert_html_subscripts_to_latex(abstract) record_add_field(rec, '520', subfields=[('a', abstract), ('9', 'World Scientific')]) license, license_type, license_url = self._get_license() subfields = [] if license: subfields.append(('a', license)) if license_url: subfields.append(('u', license_url)) if subfields: record_add_field(rec, '540', subfields=subfields) if license_type == 'open-access': self._attach_fulltext(rec, doi) number_of_pages = self._get_page_count() if number_of_pages: record_add_field(rec, '300', subfields=[('a', number_of_pages)]) c_holder, c_year, c_statement = self._get_copyright() if c_holder and c_year: record_add_field(rec, '542', subfields=[('d', c_holder), ('g', c_year), ('e', 'Article')]) elif c_statement: record_add_field(rec, '542', subfields=[('f', c_statement), ('e', 'Article')]) subfields = [] if journal: subfields.append(('p', journal)) if issue: subfields.append(('n', issue)) if volume: subfields.append(('v', volume)) if fpage and lpage: subfields.append(('c', '%s-%s' % (fpage, lpage))) elif page: subfields.append(('c', page)) if year: subfields.append(('y', year)) if article_type == 'correction': subfields.append(('m', 'Erratum')) elif article_type == 'addendum': subfields.append(('m', 'Addendum')) record_add_field(rec, '773', subfields=subfields) collections = self.get_collection(journal) for collection in collections: record_add_field(rec, '980', subfields=[collection]) self._add_authors(rec) if article_type in ['correction', 'addendum']: related_article = self._get_related_article() if related_article: record_add_field(rec, '024', ind1='7', subfields=[('a', related_article), ('2', 'DOI')]) try: return record_xml_output(rec) except UnicodeDecodeError: message = "Found a bad char in the file for the article " + doi sys.stderr.write(message) return ""
def _attach_fulltext(self, rec, doi): """Attach fulltext FFT.""" url = os.path.join(self.url_prefix, doi) record_add_field(rec, 'FFT', subfields=[('a', url), ('t', 'INSPIRE-PUBLIC'), ('d', 'Fulltext')])
def convert_all(cls, records): """Convert the list of bibrecs into one MARCXML. >>> from harvestingkit.bibrecord import BibRecordPackage >>> from harvestingkit.inspire_cds_package import Inspire2CDS >>> bibrecs = BibRecordPackage("inspire.xml") >>> bibrecs.parse() >>> xml = Inspire2CDS.convert_all(bibrecs.get_records()) :param records: list of BibRecord dicts :type records: list :returns: MARCXML as string """ out = ["<collection>"] for rec in records: conversion = cls(rec) out.append(conversion.convert()) out.append("</collection>") return "\n".join(out)
def from_source(cls, source): """Yield single conversion objects from a MARCXML file or string. >>> from harvestingkit.inspire_cds_package import Inspire2CDS >>> for record in Inspire2CDS.from_source("inspire.xml"): >>> xml = record.convert() """ bibrecs = BibRecordPackage(source) bibrecs.parse() for bibrec in bibrecs.get_records(): yield cls(bibrec)
def get_config_item(cls, key, kb_name, allow_substring=True): """Return the opposite mapping by searching the imported KB.""" config_dict = cls.kbs.get(kb_name, None) if config_dict: if key in config_dict: return config_dict[key] elif allow_substring: res = [v for k, v in config_dict.items() if key in k] if res: return res[0] return key
def load_config(from_key, to_key): """Load configuration from config. Meant to run only once per system process as class variable in subclasses.""" from .mappings import mappings kbs = {} for key, values in mappings['config'].iteritems(): parse_dict = {} for mapping in values: # {'inspire': 'Norwegian', 'cds': 'nno'} # -> {"Norwegian": "nno"} parse_dict[mapping[from_key]] = mapping[to_key] kbs[key] = parse_dict return kbs
def match(self, query=None, **kwargs): """Try to match the current record to the database.""" from invenio.search_engine import perform_request_search if not query: # We use default setup recid = self.record["001"][0][3] return perform_request_search(p="035:%s" % (recid,), of="id") else: if "recid" not in kwargs: kwargs["recid"] = self.record["001"][0][3] return perform_request_search(p=query % kwargs, of="id")
def keep_only_fields(self): """Keep only fields listed in field_list.""" for tag in self.record.keys(): if tag not in self.fields_list: record_delete_fields(self.record, tag)
def strip_fields(self): """Clear any fields listed in field_list.""" for tag in self.record.keys(): if tag in self.fields_list: record_delete_fields(self.record, tag)
def add_systemnumber(self, source, recid=None): """Add 035 number from 001 recid with given source.""" if not recid: recid = self.get_recid() if not self.hidden and recid: record_add_field( self.record, tag='035', subfields=[('9', source), ('a', recid)] )
def add_control_number(self, tag, value): """Add a control-number 00x for given tag with value.""" record_add_field(self.record, tag, controlfield_value=value)
def update_subject_categories(self, primary, secondary, kb): """650 Translate Categories.""" category_fields = record_get_field_instances(self.record, tag='650', ind1='1', ind2='7') record_delete_fields(self.record, "650") for field in category_fields: for idx, (key, value) in enumerate(field[0]): if key == 'a': new_value = self.get_config_item(value, kb) if new_value != value: new_subs = [('2', secondary), ('a', new_value)] else: new_subs = [('2', primary), ('a', value)] record_add_field(self.record, "650", ind1="1", ind2="7", subfields=new_subs) break
def _get_reference(self, ref): """Retrieve the data for a reference.""" label = get_value_in_tag(ref, 'label') label = re.sub('\D', '', label) for innerref in ref.getElementsByTagName('mixed-citation'): ref_type = innerref.getAttribute('publication-type') institution = get_value_in_tag(innerref, 'institution') report_no = '' for tag in innerref.getElementsByTagName('pub-id'): if tag.getAttribute('pub-id-type') == 'other': if tag.hasChildNodes(): report_no = get_all_text(tag) doi = '' for tag in innerref.getElementsByTagName('pub-id'): if tag.getAttribute('pub-id-type') == 'doi': doi = xml_to_text(tag) collaboration = get_value_in_tag(innerref, 'collab') authors = [] person_groups = innerref.getElementsByTagName('person-group') for author_group in person_groups: if author_group.getAttribute('person-group-type') == 'author': for author in author_group.getElementsByTagName('string-name'): if author.hasChildNodes(): authors.append(get_all_text(author)) editors = [] for editor_group in person_groups: if editor_group.getAttribute('person-group-type') == 'editor': for editor in editor_group.getElementsByTagName('string-name'): if editor.hasChildNodes(): editors.append(get_all_text(editor)) journal = get_value_in_tag(innerref, 'source') journal, volume = fix_journal_name(journal, self.journal_mappings) volume += get_value_in_tag(innerref, 'volume') if journal == 'J.High Energy Phys.' or journal == 'JHEP': issue = get_value_in_tag(innerref, 'issue') volume = volume[2:] + issue journal = 'JHEP' page = get_value_in_tag(innerref, 'page-range') year = get_value_in_tag(innerref, 'year') external_link = get_value_in_tag(innerref, 'ext-link') arxiv = '' for tag in innerref.getElementsByTagName('pub-id'): if tag.getAttribute('pub-id-type') == 'arxiv': if tag.hasChildNodes(): arxiv = get_all_text(tag) arxiv = format_arxiv_id(arxiv) publisher = get_value_in_tag(innerref, 'publisher-name') publisher_location = get_value_in_tag(innerref, 'publisher-loc') if publisher_location: publisher = publisher_location + ': ' + publisher unstructured_text = [] for child in innerref.childNodes: if child.nodeType == child.TEXT_NODE: text = child.nodeValue.strip() text = re.sub(r'[\[\]\(\.;\)]', '', text).strip() if text.startswith(','): text = text[1:].strip() if text.endswith('Report No'): text = institution + " " + text institution = '' text = text.strip() elif text.endswith(' ed'): text += '.' elif text.endswith('PhD thesis,'): if institution: text += ' ' + institution institution = '' else: text = text[:-1] elif text.startswith('Seminar,'): article_title = get_value_in_tag(innerref, 'article-title') text = institution + " Seminar, \"" + article_title + "\"" institution = '' elif text == u'\u201d': text = '' ignore_text = ['in', 'pp', 'edited by'] if text.startswith('Vol'): temp = re.sub(r'\D', '', text) if temp: volume += temp elif len(text) > 1 and text not in ignore_text\ and not (text.isdigit() or text[:-1].isdigit()): unstructured_text.append(text) if unstructured_text: unstructured_text = " ".join(unstructured_text) if ref_type == 'book': if volume and not volume.lower().startswith('vol'): volume = 'Vol ' + volume if volume and page: volume = volume + ', pp ' + page yield ref_type, doi, authors, collaboration, journal, volume, page, year,\ label, arxiv, publisher, institution, unstructured_text, external_link,\ report_no, editors
def _add_references(self, rec): """ Adds the reference to the record """ for ref in self.document.getElementsByTagName('ref'): for ref_type, doi, authors, collaboration, journal, volume, page, year,\ label, arxiv, publisher, institution, unstructured_text,\ external_link, report_no, editors in self._get_reference(ref): subfields = [] if doi: subfields.append(('a', doi)) for author in authors: subfields.append(('h', author)) for editor in editors: subfields.append(('e', editor)) if year: subfields.append(('y', year)) if unstructured_text: if page: subfields.append(('m', unstructured_text + ', ' + page)) else: subfields.append(('m', unstructured_text)) if collaboration: subfields.append(('c', collaboration)) if institution: subfields.append(('m', institution)) if publisher: subfields.append(('p', publisher)) if arxiv: subfields.append(('r', arxiv)) if report_no: subfields.append(('r', report_no)) if external_link: subfields.append(('u', external_link)) if label: subfields.append(('o', label)) if ref_type == 'book': if journal: subfields.append(('t', journal)) if volume: subfields.append(('m', volume)) elif page and not unstructured_text: subfields.append(('m', page)) else: if volume and page: subfields.append(('s', journal + "," + volume + "," + page)) elif journal: subfields.append(('t', journal)) if ref_type: subfields.append(('d', ref_type)) if not subfields: #misc-type references try: r = ref.getElementsByTagName('mixed-citation')[0] text = xml_to_text(r) label = text.split()[0] text = " ".join(text.split()[1:]) subfields.append(('s', text)) record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) except IndexError: #references without 'mixed-citation' tag try: r = ref.getElementsByTagName('note')[0] subfields.append(('s', xml_to_text(r))) record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) except IndexError: #references without 'note' tag subfields.append(('s', xml_to_text(ref))) record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields) else: record_add_field(rec, '999', ind1='C', ind2='5', subfields=subfields)
def get_record(self, xml_file): """ Reads a xml file in JATS format and returns a xml string in marc format """ self.document = parse(xml_file) if get_value_in_tag(self.document, "meta"): raise ApsPackageXMLError("The XML format of %s is not correct" % (xml_file,)) page_count = self._get_page_count() rec = create_record() if page_count: record_add_field(rec, '300', subfields=[('a', page_count)]) pacscodes = self._get_pacscodes() for pacscode in pacscodes: record_add_field(rec, '084', subfields=[('2', 'PACS'), ('a', pacscode)]) subject = self._get_subject() if subject: record_add_field(rec, '650', ind1='1', ind2='7', subfields=[('2', 'APS'), ('a', subject)]) keywords = self._get_keywords() if keywords: record_add_field(rec, '653', ind1='1', subfields=[('a', ', '.join(keywords)), ('9', 'author')]) title, subtitle, _ = self._get_title() subfields = [] if subtitle: subfields.append(('b', subtitle)) if title: subfields.append(('a', title)) record_add_field(rec, '245', subfields=subfields) journal, volume, issue, year, start_date, doi,\ article_id, _, _ = self._get_publication_information() if start_date: record_add_field(rec, '260', subfields=[('c', start_date), ('t', 'published')]) if doi: record_add_field(rec, '024', ind1='7', subfields=[('a', doi), ('2', 'DOI')]) abstract = self._get_abstract() if abstract: record_add_field(rec, '520', subfields=[('a', abstract), ('9', 'APS')]) license, license_type, license_url = self._get_license() subfields = [] if license: subfields.append(('a', license)) if license_url: subfields.append(('u', license_url)) if subfields: record_add_field(rec, '540', subfields=subfields) c_holder, c_year, c_statement = self._get_copyright() c_holder, c_year, c_statement = self._get_copyright() if c_holder and c_year: record_add_field(rec, '542', subfields=[('d', c_holder), ('g', c_year), ('e', 'Article')]) elif c_statement: record_add_field(rec, '542', subfields=[('f', c_statement), ('e', 'Article')]) record_add_field(rec, '773', subfields=[('p', journal), ('v', volume), ('n', issue), ('y', year), ('c', article_id)]) record_add_field(rec, '980', subfields=[('a', 'HEP')]) record_add_field(rec, '980', subfields=[('a', 'Citeable')]) record_add_field(rec, '980', subfields=[('a', 'Published')]) self._add_authors(rec) self._add_references(rec) try: return record_xml_output(rec) except UnicodeDecodeError: sys.stderr.write("""Found a bad char in the file for the article """ + doi) return ""
def connect(self): """ Connects and logins to the server. """ self._ftp.connect() self._ftp.login(user=self._username, passwd=self._passwd)
def download_folder(self, folder='', target_folder=''): """ Downloads a whole folder from the server. FtpHandler.download_folder() will download all the files from the server in the working directory. :param folder: the absolute path for the folder on the server. :type folder: string :param target_folder: absolute or relative path for the destination folder default is the working directory. :type target_folder: string """ files, folders = self.ls(folder) for fl in files: self.download(join(folder, fl), target_folder) for fld in folders: self.download_folder(join(folder, fld), target_folder)
def download(self, source_file, target_folder=''): """ Downloads a file from the FTP server to target folder :param source_file: the absolute path for the file on the server it can be the one of the files coming from FtpHandler.dir(). :type source_file: string :param target_folder: relative or absolute path of the destination folder default is the working directory. :type target_folder: string """ current_folder = self._ftp.pwd() if not target_folder.startswith('/'): # relative path target_folder = join(getcwd(), target_folder) folder = os.path.dirname(source_file) self.cd(folder) if folder.startswith("/"): folder = folder[1:] destination_folder = join(target_folder, folder) if not os.path.exists(destination_folder): print("Creating folder", destination_folder) os.makedirs(destination_folder) source_file = os.path.basename(source_file) destination = join(destination_folder, source_file) try: with open(destination, 'wb') as result: self._ftp.retrbinary('RETR %s' % (source_file,), result.write) except error_perm as e: # source_file is a folder print(e) remove(join(target_folder, source_file)) raise self._ftp.cwd(current_folder)
def cd(self, folder): """ Changes the working directory on the server. :param folder: the desired directory. :type folder: string """ if folder.startswith('/'): self._ftp.cwd(folder) else: for subfolder in folder.split('/'): if subfolder: self._ftp.cwd(subfolder)
def ls(self, folder=''): """ Lists the files and folders of a specific directory default is the current working directory. :param folder: the folder to be listed. :type folder: string :returns: a tuple with the list of files in the folder and the list of subfolders in the folder. """ current_folder = self._ftp.pwd() self.cd(folder) contents = [] self._ftp.retrlines('LIST', lambda a: contents.append(a)) files = filter(lambda a: a.split()[0].startswith('-'), contents) folders = filter(lambda a: a.split()[0].startswith('d'), contents) files = map(lambda a: ' '.join(a.split()[8:]), files) folders = map(lambda a: ' '.join(a.split()[8:]), folders) self._ftp.cwd(current_folder) return files, folders
def dir(self, folder='', prefix=''): """ Lists all the files on the folder given as parameter. FtpHandler.dir() lists all the files on the server. :para folder: the folder to be listed. :type folder: string :param prefix: it does not belong to the interface, it is used to recursively list the subfolders. :returns: a list with all the files in the server. """ files, folders = self.ls(folder) result = files inner = [] for fld in folders: try: inner += self.dir(folder + '/' + fld, prefix + fld + '/') except: pass result += inner if prefix: result = map(lambda a: prefix + a, result) return result
def mkdir(self, folder): """ Creates a folder in the server :param folder: the folder to be created. :type folder: string """ current_folder = self._ftp.pwd() #creates the necessary folders on #the server if they don't exist folders = folder.split('/') for fld in folders: try: self.cd(fld) except error_perm: # folder does not exist self._ftp.mkd(fld) self.cd(fld) self.cd(current_folder)
def rm(self, filename): """ Delete a file from the server. :param filename: the file to be deleted. :type filename: string """ try: self._ftp.delete(filename) except error_perm: # target is either a directory # either it does not exist try: current_folder = self._ftp.pwd() self.cd(filename) except error_perm: print('550 Delete operation failed %s ' 'does not exist!' % (filename,)) else: self.cd(current_folder) print('550 Delete operation failed %s ' 'is a folder. Use rmdir function ' 'to delete it.' % (filename,))
def rmdir(self, foldername): """ Delete a folder from the server. :param foldername: the folder to be deleted. :type foldername: string """ current_folder = self._ftp.pwd() try: self.cd(foldername) except error_perm: print('550 Delete operation failed folder %s ' 'does not exist!' % (foldername,)) else: self.cd(current_folder) try: self._ftp.rmd(foldername) except error_perm: # folder not empty self.cd(foldername) contents = self.ls() #delete the files map(self._ftp.delete, contents[0]) #delete the subfolders map(self.rmdir, contents[1]) self.cd(current_folder) self._ftp.rmd(foldername)
def get_filesize(self, filename): """ Returns the filesize of a file :param filename: the full path to the file on the server. :type filename: string :returns: string representation of the filesize. """ result = [] def dir_callback(val): result.append(val.split()[4]) self._ftp.dir(filename, dir_callback) return result[0]
def upload(self, filename, location=''): """ Uploads a file on the server to the desired location :param filename: the name of the file to be uploaded. :type filename: string :param location: the directory in which the file will be stored. :type location: string """ current_folder = self._ftp.pwd() self.mkdir(location) self.cd(location) fl = open(filename, 'rb') filename = filename.split('/')[-1] self._ftp.storbinary('STOR %s' % filename, fl) fl.close() self.cd(current_folder)
def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links): """ Parses a block of text indiscriminately """ # create a dictionary of user urls -> rendered responses replacements = {} user_urls = set(re.findall(URL_RE, text)) for user_url in user_urls: try: resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight) except OEmbedException: if urlize_all_links: replacements[user_url] = '<a href="%(LINK)s">%(LINK)s</a>' % {'LINK': user_url} else: context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) replacement = self.render_oembed( resource, user_url, template_dir=template_dir, context=context ) replacements[user_url] = replacement.strip() # go through the text recording URLs that can be replaced # taking note of their start & end indexes user_urls = re.finditer(URL_RE, text) matches = [] for match in user_urls: if match.group() in replacements: matches.append([match.start(), match.end(), match.group()]) # replace the URLs in order, offsetting the indices each go for indx, (start, end, user_url) in enumerate(matches): replacement = replacements[user_url] difference = len(replacement) - len(user_url) # insert the replacement between two slices of text surrounding the # original url text = text[:start] + replacement + text[end:] # iterate through the rest of the matches offsetting their indices # based on the difference between replacement/original for j in xrange(indx + 1, len(matches)): matches[j][0] += difference matches[j][1] += difference return mark_safe(text)
def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links): """ Parses a block of text rendering links that occur on their own line normally but rendering inline links using a special template dir """ block_parser = TextBlockParser() lines = text.splitlines() parsed = [] for line in lines: if STANDALONE_URL_RE.match(line): user_url = line.strip() try: resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight) context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) except OEmbedException: if urlize_all_links: line = '<a href="%(LINK)s">%(LINK)s</a>' % {'LINK': user_url} else: context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) line = self.render_oembed( resource, user_url, template_dir=template_dir, context=context) else: line = block_parser.parse(line, maxwidth, maxheight, 'inline', context, urlize_all_links) parsed.append(line) return mark_safe('\n'.join(parsed))
def login(email=None, password=None, api_key=None, application='Default', url=None, verify_ssl_certificate=True): """ Do the legwork of logging into the Midas Server instance, storing the API key and token. :param email: (optional) Email address to login with. If not set, the console will be prompted. :type email: None | string :param password: (optional) User password to login with. If not set and no 'api_key' is set, the console will be prompted. :type password: None | string :param api_key: (optional) API key to login with. If not set, password login with be used. :type api_key: None | string :param application: (optional) Application name to be used with 'api_key'. :type application: string :param url: (optional) URL address of the Midas Server instance to login to. If not set, the console will be prompted. :type url: None | string :param verify_ssl_certificate: (optional) If True, the SSL certificate will be verified :type verify_ssl_certificate: bool :returns: API token. :rtype: string """ try: input_ = raw_input except NameError: input_ = input if url is None: url = input_('Server URL: ') url = url.rstrip('/') if session.communicator is None: session.communicator = Communicator(url) else: session.communicator.url = url session.communicator.verify_ssl_certificate = verify_ssl_certificate if email is None: email = input_('Email: ') session.email = email if api_key is None: if password is None: password = getpass.getpass() session.api_key = session.communicator.get_default_api_key( session.email, password) session.application = 'Default' else: session.api_key = api_key session.application = application return renew_token()
def renew_token(): """ Renew or get a token to use for transactions with the Midas Server instance. :returns: API token. :rtype: string """ session.token = session.communicator.login_with_api_key( session.email, session.api_key, application=session.application) if len(session.token) < 10: # HACK to check for mfa being enabled one_time_pass = getpass.getpass('One-Time Password: ') session.token = session.communicator.mfa_otp_login( session.token, one_time_pass) return session.token
def _create_or_reuse_item(local_file, parent_folder_id, reuse_existing=False): """ Create an item from the local file in the Midas Server folder corresponding to the parent folder id. :param local_file: full path to a file on the local file system :type local_file: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the item will be added :type parent_folder_id: int | long :param reuse_existing: (optional) whether to accept an existing item of the same name in the same location, or create a new one instead :type reuse_existing: bool """ local_item_name = os.path.basename(local_file) item_id = None if reuse_existing: # check by name to see if the item already exists in the folder children = session.communicator.folder_children( session.token, parent_folder_id) items = children['items'] for item in items: if item['name'] == local_item_name: item_id = item['item_id'] break if item_id is None: # create the item for the subdir new_item = session.communicator.create_item( session.token, local_item_name, parent_folder_id) item_id = new_item['item_id'] return item_id
def _create_or_reuse_folder(local_folder, parent_folder_id, reuse_existing=False): """ Create a folder from the local file in the midas folder corresponding to the parent folder id. :param local_folder: full path to a directory on the local file system :type local_folder: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the folder will be added :type parent_folder_id: int | long :param reuse_existing: (optional) whether to accept an existing folder of the same name in the same location, or create a new one instead :type reuse_existing: bool """ local_folder_name = os.path.basename(local_folder) folder_id = None if reuse_existing: # check by name to see if the folder already exists in the folder children = session.communicator.folder_children( session.token, parent_folder_id) folders = children['folders'] for folder in folders: if folder['name'] == local_folder_name: folder_id = folder['folder_id'] break if folder_id is None: # create the item for the subdir new_folder = session.communicator.create_folder(session.token, local_folder_name, parent_folder_id) folder_id = new_folder['folder_id'] return folder_id
def _streaming_file_md5(file_path): """ Create and return a hex checksum using the MD5 sum of the passed in file. This will stream the file, rather than load it all into memory. :param file_path: full path to the file :type file_path: string :returns: a hex checksum :rtype: string """ md5 = hashlib.md5() with open(file_path, 'rb') as f: # iter needs an empty byte string for the returned iterator to halt at # EOF for chunk in iter(lambda: f.read(128 * md5.block_size), b''): md5.update(chunk) return md5.hexdigest()
def _create_bitstream(file_path, local_file, item_id, log_ind=None): """ Create a bitstream in the given item. :param file_path: full path to the local file :type file_path: string :param local_file: name of the local file :type local_file: string :param log_ind: (optional) any additional message to log upon creation of the bitstream :type log_ind: None | string """ checksum = _streaming_file_md5(file_path) upload_token = session.communicator.generate_upload_token( session.token, item_id, local_file, checksum) if upload_token != '': log_trace = 'Uploading bitstream from {0}'.format(file_path) # only need to perform the upload if we haven't uploaded before # in this cae, the upload token would not be empty session.communicator.perform_upload( upload_token, local_file, filepath=file_path, itemid=item_id) else: log_trace = 'Adding a bitstream link in this item to an existing ' \ 'bitstream from {0}'.format(file_path) if log_ind is not None: log_trace += log_ind print(log_trace)
def _upload_as_item(local_file, parent_folder_id, file_path, reuse_existing=False): """ Function for doing an upload of a file as an item. This should be a building block for user-level functions. :param local_file: name of local file to upload :type local_file: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the item will be added :type parent_folder_id: int | long :param file_path: full path to the file :type file_path: string :param reuse_existing: (optional) whether to accept an existing item of the same name in the same location, or create a new one instead :type reuse_existing: bool """ current_item_id = _create_or_reuse_item(local_file, parent_folder_id, reuse_existing) _create_bitstream(file_path, local_file, current_item_id) for callback in session.item_upload_callbacks: callback(session.communicator, session.token, current_item_id)
def _create_folder(local_folder, parent_folder_id): """ Function for creating a remote folder and returning the id. This should be a building block for user-level functions. :param local_folder: full path to a local folder :type local_folder: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the new folder will be added :type parent_folder_id: int | long :returns: id of the remote folder that was created :rtype: int | long """ new_folder = session.communicator.create_folder( session.token, os.path.basename(local_folder), parent_folder_id) return new_folder['folder_id']
def _upload_folder_recursive(local_folder, parent_folder_id, leaf_folders_as_items=False, reuse_existing=False): """ Function to recursively upload a folder and all of its descendants. :param local_folder: full path to local folder to be uploaded :type local_folder: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the new folder will be added :type parent_folder_id: int | long :param leaf_folders_as_items: (optional) whether leaf folders should have all files uploaded as single items :type leaf_folders_as_items: bool :param reuse_existing: (optional) whether to accept an existing item of the same name in the same location, or create a new one instead :type reuse_existing: bool """ if leaf_folders_as_items and _has_only_files(local_folder): print('Creating item from {0}'.format(local_folder)) _upload_folder_as_item(local_folder, parent_folder_id, reuse_existing) return else: # do not need to check if folder exists, if it does, an attempt to # create it will just return the existing id print('Creating folder from {0}'.format(local_folder)) new_folder_id = _create_or_reuse_folder(local_folder, parent_folder_id, reuse_existing) for entry in sorted(os.listdir(local_folder)): full_entry = os.path.join(local_folder, entry) if os.path.islink(full_entry): # os.walk skips symlinks by default continue elif os.path.isdir(full_entry): _upload_folder_recursive(full_entry, new_folder_id, leaf_folders_as_items, reuse_existing) else: print('Uploading item from {0}'.format(full_entry)) _upload_as_item(entry, new_folder_id, full_entry, reuse_existing)
def _has_only_files(local_folder): """ Return whether a folder contains only files. This will be False if the folder contains any subdirectories. :param local_folder: full path to the local folder :type local_folder: string :returns: True if the folder contains only files :rtype: bool """ return not any(os.path.isdir(os.path.join(local_folder, entry)) for entry in os.listdir(local_folder))
def _upload_folder_as_item(local_folder, parent_folder_id, reuse_existing=False): """ Upload a folder as a new item. Take a folder and use its base name as the name of a new item. Then, upload its containing files into the new item as bitstreams. :param local_folder: The path to the folder to be uploaded :type local_folder: string :param parent_folder_id: The id of the destination folder for the new item. :type parent_folder_id: int | long :param reuse_existing: (optional) whether to accept an existing item of the same name in the same location, or create a new one instead :type reuse_existing: bool """ item_id = _create_or_reuse_item(local_folder, parent_folder_id, reuse_existing) subdir_contents = sorted(os.listdir(local_folder)) # for each file in the subdir, add it to the item filecount = len(subdir_contents) for (ind, current_file) in enumerate(subdir_contents): file_path = os.path.join(local_folder, current_file) log_ind = '({0} of {1})'.format(ind + 1, filecount) _create_bitstream(file_path, current_file, item_id, log_ind) for callback in session.item_upload_callbacks: callback(session.communicator, session.token, item_id)
def upload(file_pattern, destination='Private', leaf_folders_as_items=False, reuse_existing=False): """ Upload a pattern of files. This will recursively walk down every tree in the file pattern to create a hierarchy on the server. As of right now, this places the file into the currently logged in user's home directory. :param file_pattern: a glob type pattern for files :type file_pattern: string :param destination: (optional) name of the midas destination folder, defaults to Private :type destination: string :param leaf_folders_as_items: (optional) whether leaf folders should have all files uploaded as single items :type leaf_folders_as_items: bool :param reuse_existing: (optional) whether to accept an existing item of the same name in the same location, or create a new one instead :type reuse_existing: bool """ session.token = verify_credentials() # Logic for finding the proper folder to place the files in. parent_folder_id = None user_folders = session.communicator.list_user_folders(session.token) if destination.startswith('/'): parent_folder_id = _find_resource_id_from_path(destination) else: for cur_folder in user_folders: if cur_folder['name'] == destination: parent_folder_id = cur_folder['folder_id'] if parent_folder_id is None: print('Unable to locate specified destination. Defaulting to {0}.' .format(user_folders[0]['name'])) parent_folder_id = user_folders[0]['folder_id'] for current_file in glob.iglob(file_pattern): current_file = os.path.normpath(current_file) if os.path.isfile(current_file): print('Uploading item from {0}'.format(current_file)) _upload_as_item(os.path.basename(current_file), parent_folder_id, current_file, reuse_existing) else: _upload_folder_recursive(current_file, parent_folder_id, leaf_folders_as_items, reuse_existing)
def _descend_folder_for_id(parsed_path, folder_id): """ Descend a path to return a folder id starting from the given folder id. :param parsed_path: a list of folders from top to bottom of a hierarchy :type parsed_path: list[string] :param folder_id: The id of the folder from which to start the descent :type folder_id: int | long :returns: The id of the found folder or -1 :rtype: int | long """ if len(parsed_path) == 0: return folder_id session.token = verify_credentials() base_folder = session.communicator.folder_get(session.token, folder_id) cur_folder_id = -1 for path_part in parsed_path: cur_folder_id = base_folder['folder_id'] cur_children = session.communicator.folder_children( session.token, cur_folder_id) for inner_folder in cur_children['folders']: if inner_folder['name'] == path_part: base_folder = session.communicator.folder_get( session.token, inner_folder['folder_id']) cur_folder_id = base_folder['folder_id'] break else: return -1 return cur_folder_id
def _search_folder_for_item_or_folder(name, folder_id): """ Find an item or folder matching the name. A folder will be found first if both are present. :param name: The name of the resource :type name: string :param folder_id: The folder to search within :type folder_id: int | long :returns: A tuple indicating whether the resource is an item an the id of said resource. i.e. (True, item_id) or (False, folder_id). Note that in the event that we do not find a result return (False, -1) :rtype: (bool, int | long) """ session.token = verify_credentials() children = session.communicator.folder_children(session.token, folder_id) for folder in children['folders']: if folder['name'] == name: return False, folder['folder_id'] # Found a folder for item in children['items']: if item['name'] == name: return True, item['item_id'] # Found an item return False, -1
def _find_resource_id_from_path(path): """ Get a folder id from a path on the server. Warning: This is NOT efficient at all. The schema for this path is: path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/} name := <firstname> , "_" , <lastname> :param path: The virtual path on the server. :type path: string :returns: a tuple indicating True or False about whether the resource is an item and id of the resource i.e. (True, item_id) or (False, folder_id) :rtype: (bool, int | long) """ session.token = verify_credentials() parsed_path = path.split('/') if parsed_path[-1] == '': parsed_path.pop() if path.startswith('/users/'): parsed_path.pop(0) # remove '' before / parsed_path.pop(0) # remove 'users' name = parsed_path.pop(0) # remove '<firstname>_<lastname>' firstname, lastname = name.split('_') end = parsed_path.pop() user = session.communicator.get_user_by_name(firstname, lastname) leaf_folder_id = _descend_folder_for_id(parsed_path, user['folder_id']) return _search_folder_for_item_or_folder(end, leaf_folder_id) elif path.startswith('/communities/'): print(parsed_path) parsed_path.pop(0) # remove '' before / parsed_path.pop(0) # remove 'communities' community_name = parsed_path.pop(0) # remove '<community>' end = parsed_path.pop() community = session.communicator.get_community_by_name(community_name) leaf_folder_id = _descend_folder_for_id(parsed_path, community['folder_id']) return _search_folder_for_item_or_folder(end, leaf_folder_id) else: return False, -1
def _download_folder_recursive(folder_id, path='.'): """ Download a folder to the specified path along with any children. :param folder_id: The id of the target folder :type folder_id: int | long :param path: (optional) the location to download the folder :type path: string """ session.token = verify_credentials() cur_folder = session.communicator.folder_get(session.token, folder_id) # Replace any '/' in the folder name. folder_path = os.path.join(path, cur_folder['name'].replace('/', '_')) print('Creating folder at {0}'.format(folder_path)) try: os.mkdir(folder_path) except OSError as e: if e.errno == errno.EEXIST and session.allow_existing_download_paths: pass else: raise cur_children = session.communicator.folder_children( session.token, folder_id) for item in cur_children['items']: _download_item(item['item_id'], folder_path, item=item) for folder in cur_children['folders']: _download_folder_recursive(folder['folder_id'], folder_path) for callback in session.folder_download_callbacks: callback(session.communicator, session.token, cur_folder, folder_path)
def _download_item(item_id, path='.', item=None): """ Download the requested item to the specified path. :param item_id: The id of the item to be downloaded :type item_id: int | long :param path: (optional) the location to download the item :type path: string :param item: The dict of item info :type item: dict | None """ session.token = verify_credentials() filename, content_iter = session.communicator.download_item( item_id, session.token) item_path = os.path.join(path, filename) print('Creating file at {0}'.format(item_path)) out_file = open(item_path, 'wb') for block in content_iter: out_file.write(block) out_file.close() for callback in session.item_download_callbacks: if not item: item = session.communicator.item_get(session.token, item_id) callback(session.communicator, session.token, item, item_path)
def download(server_path, local_path='.'): """ Recursively download a file or item from the Midas Server instance. :param server_path: The location on the server to find the resource to download :type server_path: string :param local_path: The location on the client to store the downloaded data :type local_path: string """ session.token = verify_credentials() is_item, resource_id = _find_resource_id_from_path(server_path) if resource_id == -1: print('Unable to locate {0}'.format(server_path)) else: if is_item: _download_item(resource_id, local_path) else: _download_folder_recursive(resource_id, local_path)
def request(self, method, parameters=None, file_payload=None): """ Do the generic processing of a request to the server. If file_payload is specified, it will be PUT to the server. :param method: Desired API method :type method: string :param parameters: (optional) Parameters to pass in the HTTP body :type parameters: None | dict[string, string] :param file_payload: (optional) File-like object to be sent with the HTTP request :type file_payload: None | file | FileIO :returns: Dictionary representing the JSON response to the request :rtype: dict :raises pydas.exceptions.PydasException: if the request failed """ method_url = self.full_url + method response = None try: if file_payload: response = requests.put(method_url, data=file_payload.read(), params=parameters, allow_redirects=True, verify=self._verify_ssl_certificate, auth=self.auth) else: response = requests.post(method_url, params=parameters, allow_redirects=True, verify=self._verify_ssl_certificate, auth=self.auth) except requests.exceptions.SSLError: exception = pydas.exceptions.SSLVerificationFailed( 'Request failed with an SSL verification error') exception.method = method exception.request = response.request raise exception except requests.exceptions.ConnectionError: exception = pydas.exceptions.RequestError( 'Request failed with a connection error') exception.method = method if response is not None: exception.request = response.request raise exception status_code = response.status_code try: response.raise_for_status() except requests.exceptions.HTTPError: error_code = None message = 'Request failed with HTTP status code {0}'.format( status_code) try: content = response.json() if 'code' in content: error_code = int(content['code']) message = 'Request failed with HTTP status code {0}, ' \ 'Midas Server error code {1}, and response content ' \ '{2}'.format(status_code, error_code, response.content) except ValueError: pass exception = pydas.exceptions \ .get_exception_from_status_and_error_codes(status_code, error_code, message) exception.code = error_code exception.method = method exception.response = response raise exception try: content = response.json() except ValueError: exception = pydas.exceptions.ParseError( 'Request failed with HTTP status code {0} and response ' 'content {1}'.format(status_code, response.content)) exception.method = method exception.response = response raise exception if 'stat' not in content: exception = pydas.exceptions.ParseError( 'Request failed with HTTP status code {0} and response ' 'content {1}'.format(status_code, response.content)) exception.method = method raise exception if content['stat'] != 'ok': if 'code' in content: error_code = int(content['code']) message = 'Request failed with HTTP status code {0}, Midas ' \ 'Server error code {1}, and response content {2}' \ .format(status_code, error_code, response.content) else: error_code = None message = 'Request failed with HTTP status code {0} and ' \ 'response content {1}'.format(status_code, response.content) exception = pydas.exceptions \ .get_exception_from_status_and_error_codes(status_code, error_code, message) exception.method = method exception.response = response raise exception if 'data' not in content: exception = pydas.exceptions.ParseError( 'Request failed with HTTP status code {0} and response ' 'content {1}'.format(status_code, response.content)) exception.method = method exception.response = response raise exception return content['data']
def login_with_api_key(self, email, api_key, application='Default'): """ Login and get a token. If you do not specify a specific application, 'Default' will be used. :param email: Email address of the user :type email: string :param api_key: API key assigned to the user :type api_key: string :param application: (optional) Application designated for this API key :type application: string :returns: Token to be used for interaction with the API until expiration :rtype: string """ parameters = dict() parameters['email'] = BaseDriver.email = email # Cache email parameters['apikey'] = BaseDriver.apikey = api_key # Cache API key parameters['appname'] = application response = self.request('midas.login', parameters) if 'token' in response: # normal case return response['token'] if 'mfa_token_id': # case with multi-factor authentication return response['mfa_token_id']
def list_user_folders(self, token): """ List the folders in the users home area. :param token: A valid token for the user in question. :type token: string :returns: List of dictionaries containing folder information. :rtype: list[dict] """ parameters = dict() parameters['token'] = token response = self.request('midas.user.folders', parameters) return response
def get_default_api_key(self, email, password): """ Get the default API key for a user. :param email: The email of the user. :type email: string :param password: The user's password. :type password: string :returns: API key to confirm that it was fetched successfully. :rtype: string """ parameters = dict() parameters['email'] = email parameters['password'] = password response = self.request('midas.user.apikey.default', parameters) return response['apikey']
def list_users(self, limit=20): """ List the public users in the system. :param limit: (optional) The number of users to fetch. :type limit: int | long :returns: The list of users. :rtype: list[dict] """ parameters = dict() parameters['limit'] = limit response = self.request('midas.user.list', parameters) return response
def get_user_by_name(self, firstname, lastname): """ Get a user by the first and last name of that user. :param firstname: The first name of the user. :type firstname: string :param lastname: The last name of the user. :type lastname: string :returns: The user requested. :rtype: dict """ parameters = dict() parameters['firstname'] = firstname parameters['lastname'] = lastname response = self.request('midas.user.get', parameters) return response
def get_user_by_id(self, user_id): """ Get a user by the first and last name of that user. :param user_id: The id of the desired user. :type user_id: int | long :returns: The user requested. :rtype: dict """ parameters = dict() parameters['user_id'] = user_id response = self.request('midas.user.get', parameters) return response
def get_user_by_email(self, email): """ Get a user by the email of that user. :param email: The email of the desired user. :type email: string :returns: The user requested. :rtype: dict """ parameters = dict() parameters['email'] = email response = self.request('midas.user.get', parameters) return response