repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
license
def license(self, key, value): """Populate the ``license`` key.""" def _get_license(value): a_values = force_list(value.get('a')) oa_licenses = [el for el in a_values if el == 'OA' or el == 'Open Access'] other_licenses = [el for el in a_values if el != 'OA' and el != 'Open Access'] if not other_licenses: return force_single_element(oa_licenses) return force_single_element(other_licenses) def _get_material(value): material = value.get('3', '').lower() if material == 'article': return 'publication' return material return { 'imposing': value.get('b'), 'license': _get_license(value), 'material': _get_material(value), 'url': value.get('u'), }
python
def license(self, key, value): """Populate the ``license`` key.""" def _get_license(value): a_values = force_list(value.get('a')) oa_licenses = [el for el in a_values if el == 'OA' or el == 'Open Access'] other_licenses = [el for el in a_values if el != 'OA' and el != 'Open Access'] if not other_licenses: return force_single_element(oa_licenses) return force_single_element(other_licenses) def _get_material(value): material = value.get('3', '').lower() if material == 'article': return 'publication' return material return { 'imposing': value.get('b'), 'license': _get_license(value), 'material': _get_material(value), 'url': value.get('u'), }
Populate the ``license`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L221-L244
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
license2marc
def license2marc(self, key, value): """Populate the ``540`` MARC field.""" return { 'a': value.get('license'), 'b': value.get('imposing'), 'u': value.get('url'), '3': value.get('material'), }
python
def license2marc(self, key, value): """Populate the ``540`` MARC field.""" return { 'a': value.get('license'), 'b': value.get('imposing'), 'u': value.get('url'), '3': value.get('material'), }
Populate the ``540`` MARC field.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L249-L256
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
copyright
def copyright(self, key, value): """Populate the ``copyright`` key.""" MATERIAL_MAP = { 'Article': 'publication', 'Published thesis as a book': 'publication', } material = value.get('e') or value.get('3') return { 'holder': value.get('d'), 'material': MATERIAL_MAP.get(material), 'statement': value.get('f'), 'url': value.get('u'), 'year': maybe_int(value.get('g')), }
python
def copyright(self, key, value): """Populate the ``copyright`` key.""" MATERIAL_MAP = { 'Article': 'publication', 'Published thesis as a book': 'publication', } material = value.get('e') or value.get('3') return { 'holder': value.get('d'), 'material': MATERIAL_MAP.get(material), 'statement': value.get('f'), 'url': value.get('u'), 'year': maybe_int(value.get('g')), }
Populate the ``copyright`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L261-L276
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
copyright2marc
def copyright2marc(self, key, value): """Populate the ``542`` MARC field.""" E_MAP = { 'publication': 'Article', } e_value = value.get('material') return { 'd': value.get('holder'), 'e': E_MAP.get(e_value), 'f': value.get('statement'), 'g': value.get('year'), 'u': value.get('url'), }
python
def copyright2marc(self, key, value): """Populate the ``542`` MARC field.""" E_MAP = { 'publication': 'Article', } e_value = value.get('material') return { 'd': value.get('holder'), 'e': E_MAP.get(e_value), 'f': value.get('statement'), 'g': value.get('year'), 'u': value.get('url'), }
Populate the ``542`` MARC field.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L281-L295
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
_private_notes
def _private_notes(self, key, value): """Populate the ``_private_notes`` key. Also populates the ``_export_to`` key through side effects. """ def _is_for_cds(value): normalized_c_values = [el.upper() for el in force_list(value.get('c'))] return 'CDS' in normalized_c_values def _is_for_hal(value): normalized_c_values = [el.upper() for el in force_list(value.get('c'))] return 'HAL' in normalized_c_values def _is_not_for_hal(value): normalized_c_values = [el.upper() for el in force_list(value.get('c'))] return 'NOT HAL' in normalized_c_values _private_notes = self.get('_private_notes', []) _export_to = self.get('_export_to', {}) for value in force_list(value): if _is_for_cds(value): _export_to['CDS'] = True if _is_for_hal(value): _export_to['HAL'] = True elif _is_not_for_hal(value): _export_to['HAL'] = False source = force_single_element(value.get('9')) for _private_note in force_list(value.get('a')): _private_notes.append({ 'source': source, 'value': _private_note, }) self['_export_to'] = _export_to return _private_notes
python
def _private_notes(self, key, value): """Populate the ``_private_notes`` key. Also populates the ``_export_to`` key through side effects. """ def _is_for_cds(value): normalized_c_values = [el.upper() for el in force_list(value.get('c'))] return 'CDS' in normalized_c_values def _is_for_hal(value): normalized_c_values = [el.upper() for el in force_list(value.get('c'))] return 'HAL' in normalized_c_values def _is_not_for_hal(value): normalized_c_values = [el.upper() for el in force_list(value.get('c'))] return 'NOT HAL' in normalized_c_values _private_notes = self.get('_private_notes', []) _export_to = self.get('_export_to', {}) for value in force_list(value): if _is_for_cds(value): _export_to['CDS'] = True if _is_for_hal(value): _export_to['HAL'] = True elif _is_not_for_hal(value): _export_to['HAL'] = False source = force_single_element(value.get('9')) for _private_note in force_list(value.get('a')): _private_notes.append({ 'source': source, 'value': _private_note, }) self['_export_to'] = _export_to return _private_notes
Populate the ``_private_notes`` key. Also populates the ``_export_to`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L299-L336
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
_private_notes2marc
def _private_notes2marc(self, key, value): """Populate the ``595`` MARC key. Also populates the `595_H` MARC key through side effects. """ def _is_from_hal(value): return value.get('source') == 'HAL' if not _is_from_hal(value): return { '9': value.get('source'), 'a': value.get('value'), } self.setdefault('595_H', []).append({'a': value.get('value')})
python
def _private_notes2marc(self, key, value): """Populate the ``595`` MARC key. Also populates the `595_H` MARC key through side effects. """ def _is_from_hal(value): return value.get('source') == 'HAL' if not _is_from_hal(value): return { '9': value.get('source'), 'a': value.get('value'), } self.setdefault('595_H', []).append({'a': value.get('value')})
Populate the ``595`` MARC key. Also populates the `595_H` MARC key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L341-L355
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
_export_to2marc
def _export_to2marc(self, key, value): """Populate the ``595`` MARC field.""" def _is_for_cds(value): return 'CDS' in value def _is_for_hal(value): return 'HAL' in value and value['HAL'] def _is_not_for_hal(value): return 'HAL' in value and not value['HAL'] result = [] if _is_for_cds(value): result.append({'c': 'CDS'}) if _is_for_hal(value): result.append({'c': 'HAL'}) elif _is_not_for_hal(value): result.append({'c': 'not HAL'}) return result
python
def _export_to2marc(self, key, value): """Populate the ``595`` MARC field.""" def _is_for_cds(value): return 'CDS' in value def _is_for_hal(value): return 'HAL' in value and value['HAL'] def _is_not_for_hal(value): return 'HAL' in value and not value['HAL'] result = [] if _is_for_cds(value): result.append({'c': 'CDS'}) if _is_for_hal(value): result.append({'c': 'HAL'}) elif _is_not_for_hal(value): result.append({'c': 'not HAL'}) return result
Populate the ``595`` MARC field.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L359-L380
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
_desy_bookkeeping
def _desy_bookkeeping(self, key, value): """Populate the ``_desy_bookkeeping`` key.""" return { 'date': normalize_date(value.get('d')), 'expert': force_single_element(value.get('a')), 'status': value.get('s'), }
python
def _desy_bookkeeping(self, key, value): """Populate the ``_desy_bookkeeping`` key.""" return { 'date': normalize_date(value.get('d')), 'expert': force_single_element(value.get('a')), 'status': value.get('s'), }
Populate the ``_desy_bookkeeping`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L385-L391
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd5xx.py
_desy_bookkeeping2marc
def _desy_bookkeeping2marc(self, key, value): """Populate the ``595_D`` MARC field. Also populates the ``035`` MARC field through side effects. """ if 'identifier' not in value: return { 'a': value.get('expert'), 'd': value.get('date'), 's': value.get('status'), } self.setdefault('035', []).append({ '9': 'DESY', 'z': value['identifier'] })
python
def _desy_bookkeeping2marc(self, key, value): """Populate the ``595_D`` MARC field. Also populates the ``035`` MARC field through side effects. """ if 'identifier' not in value: return { 'a': value.get('expert'), 'd': value.get('date'), 's': value.get('status'), } self.setdefault('035', []).append({ '9': 'DESY', 'z': value['identifier'] })
Populate the ``595_D`` MARC field. Also populates the ``035`` MARC field through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd5xx.py#L396-L411
inspirehep/inspire-dojson
inspire_dojson/experiments/rules.py
_dates
def _dates(self, key, value): """Don't populate any key through the return value. On the other hand, populates the ``date_proposed``, ``date_approved``, ``date_started``, ``date_cancelled``, and the ``date_completed`` keys through side effects. """ if value.get('q'): self['date_proposed'] = normalize_date(value['q']) if value.get('r'): self['date_approved'] = normalize_date(value['r']) if value.get('s'): self['date_started'] = normalize_date(value['s']) if value.get('c'): self['date_cancelled'] = normalize_date(value['c']) if value.get('t'): self['date_completed'] = normalize_date(value['t']) raise IgnoreKey
python
def _dates(self, key, value): """Don't populate any key through the return value. On the other hand, populates the ``date_proposed``, ``date_approved``, ``date_started``, ``date_cancelled``, and the ``date_completed`` keys through side effects. """ if value.get('q'): self['date_proposed'] = normalize_date(value['q']) if value.get('r'): self['date_approved'] = normalize_date(value['r']) if value.get('s'): self['date_started'] = normalize_date(value['s']) if value.get('c'): self['date_cancelled'] = normalize_date(value['c']) if value.get('t'): self['date_completed'] = normalize_date(value['t']) raise IgnoreKey
Don't populate any key through the return value. On the other hand, populates the ``date_proposed``, ``date_approved``, ``date_started``, ``date_cancelled``, and the ``date_completed`` keys through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/experiments/rules.py#L42-L60
inspirehep/inspire-dojson
inspire_dojson/experiments/rules.py
experiment
def experiment(self, key, values): """Populate the ``experiment`` key. Also populates the ``legacy_name``, the ``accelerator``, and the ``institutions`` keys through side effects. """ experiment = self.get('experiment', {}) legacy_name = self.get('legacy_name', '') accelerator = self.get('accelerator', {}) institutions = self.get('institutions', []) for value in force_list(values): if value.get('c'): experiment['value'] = value.get('c') if value.get('d'): experiment['short_name'] = value.get('d') if value.get('a'): legacy_name = value.get('a') if value.get('b'): accelerator['value'] = value.get('b') institution = {} if value.get('u'): institution['value'] = value.get('u') if value.get('z'): record = get_record_ref(maybe_int(value.get('z')), 'institutions') if record: institution['curated_relation'] = True institution['record'] = record institutions.append(institution) self['legacy_name'] = legacy_name self['accelerator'] = accelerator self['institutions'] = institutions return experiment
python
def experiment(self, key, values): """Populate the ``experiment`` key. Also populates the ``legacy_name``, the ``accelerator``, and the ``institutions`` keys through side effects. """ experiment = self.get('experiment', {}) legacy_name = self.get('legacy_name', '') accelerator = self.get('accelerator', {}) institutions = self.get('institutions', []) for value in force_list(values): if value.get('c'): experiment['value'] = value.get('c') if value.get('d'): experiment['short_name'] = value.get('d') if value.get('a'): legacy_name = value.get('a') if value.get('b'): accelerator['value'] = value.get('b') institution = {} if value.get('u'): institution['value'] = value.get('u') if value.get('z'): record = get_record_ref(maybe_int(value.get('z')), 'institutions') if record: institution['curated_relation'] = True institution['record'] = record institutions.append(institution) self['legacy_name'] = legacy_name self['accelerator'] = accelerator self['institutions'] = institutions return experiment
Populate the ``experiment`` key. Also populates the ``legacy_name``, the ``accelerator``, and the ``institutions`` keys through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/experiments/rules.py#L64-L100
inspirehep/inspire-dojson
inspire_dojson/experiments/rules.py
core
def core(self, key, value): """Populate the ``core`` key. Also populates the ``deleted`` and ``project_type`` keys through side effects. """ core = self.get('core') deleted = self.get('deleted') project_type = self.get('project_type', []) if not core: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'CORE' in normalized_a_values: core = True if not deleted: normalized_c_values = [el.upper() for el in force_list(value.get('c'))] if 'DELETED' in normalized_c_values: deleted = True if not project_type: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'ACCELERATOR' in normalized_a_values: project_type.append('accelerator') self['project_type'] = project_type self['deleted'] = deleted return core
python
def core(self, key, value): """Populate the ``core`` key. Also populates the ``deleted`` and ``project_type`` keys through side effects. """ core = self.get('core') deleted = self.get('deleted') project_type = self.get('project_type', []) if not core: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'CORE' in normalized_a_values: core = True if not deleted: normalized_c_values = [el.upper() for el in force_list(value.get('c'))] if 'DELETED' in normalized_c_values: deleted = True if not project_type: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'ACCELERATOR' in normalized_a_values: project_type.append('accelerator') self['project_type'] = project_type self['deleted'] = deleted return core
Populate the ``core`` key. Also populates the ``deleted`` and ``project_type`` keys through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/experiments/rules.py#L170-L197
inspirehep/inspire-dojson
inspire_dojson/common/rules.py
control_number
def control_number(endpoint): """Populate the ``control_number`` key. Also populates the ``self`` key through side effects. """ def _control_number(self, key, value): self['self'] = get_record_ref(int(value), endpoint) return int(value) return _control_number
python
def control_number(endpoint): """Populate the ``control_number`` key. Also populates the ``self`` key through side effects. """ def _control_number(self, key, value): self['self'] = get_record_ref(int(value), endpoint) return int(value) return _control_number
Populate the ``control_number`` key. Also populates the ``self`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/common/rules.py#L586-L595
inspirehep/inspire-dojson
inspire_dojson/common/rules.py
acquisition_source
def acquisition_source(self, key, value): """Populate the ``acquisition_source`` key.""" def _get_datetime(value): d_value = force_single_element(value.get('d', '')) if d_value: try: date = PartialDate.loads(d_value) except ValueError: return d_value else: datetime_ = datetime(year=date.year, month=date.month, day=date.day) return datetime_.isoformat() internal_uid, orcid, source = None, None, None a_values = force_list(value.get('a')) for a_value in a_values: if IS_INTERNAL_UID.match(a_value): if a_value.startswith('inspire:uid:'): internal_uid = int(a_value[12:]) else: internal_uid = int(a_value) elif IS_ORCID.match(a_value): if a_value.startswith('orcid:'): orcid = a_value[6:] else: orcid = a_value else: source = a_value c_value = force_single_element(value.get('c', '')) normalized_c_value = c_value.lower() if normalized_c_value == 'batchupload': method = 'batchuploader' elif normalized_c_value == 'submission': method = 'submitter' else: method = normalized_c_value return { 'datetime': _get_datetime(value), 'email': value.get('b'), 'internal_uid': internal_uid, 'method': method, 'orcid': orcid, 'source': source, 'submission_number': value.get('e'), }
python
def acquisition_source(self, key, value): """Populate the ``acquisition_source`` key.""" def _get_datetime(value): d_value = force_single_element(value.get('d', '')) if d_value: try: date = PartialDate.loads(d_value) except ValueError: return d_value else: datetime_ = datetime(year=date.year, month=date.month, day=date.day) return datetime_.isoformat() internal_uid, orcid, source = None, None, None a_values = force_list(value.get('a')) for a_value in a_values: if IS_INTERNAL_UID.match(a_value): if a_value.startswith('inspire:uid:'): internal_uid = int(a_value[12:]) else: internal_uid = int(a_value) elif IS_ORCID.match(a_value): if a_value.startswith('orcid:'): orcid = a_value[6:] else: orcid = a_value else: source = a_value c_value = force_single_element(value.get('c', '')) normalized_c_value = c_value.lower() if normalized_c_value == 'batchupload': method = 'batchuploader' elif normalized_c_value == 'submission': method = 'submitter' else: method = normalized_c_value return { 'datetime': _get_datetime(value), 'email': value.get('b'), 'internal_uid': internal_uid, 'method': method, 'orcid': orcid, 'source': source, 'submission_number': value.get('e'), }
Populate the ``acquisition_source`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/common/rules.py#L634-L682
inspirehep/inspire-dojson
inspire_dojson/common/rules.py
public_notes_500
def public_notes_500(self, key, value): """Populate the ``public_notes`` key.""" return [ { 'source': value.get('9'), 'value': public_note, } for public_note in force_list(value.get('a')) ]
python
def public_notes_500(self, key, value): """Populate the ``public_notes`` key.""" return [ { 'source': value.get('9'), 'value': public_note, } for public_note in force_list(value.get('a')) ]
Populate the ``public_notes`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/common/rules.py#L719-L726
inspirehep/inspire-dojson
inspire_dojson/common/rules.py
_private_notes_595
def _private_notes_595(self, key, value): """Populate the ``_private_notes`` key.""" return [ { 'source': value.get('9'), 'value': _private_note, } for _private_note in force_list(value.get('a')) ]
python
def _private_notes_595(self, key, value): """Populate the ``_private_notes`` key.""" return [ { 'source': value.get('9'), 'value': _private_note, } for _private_note in force_list(value.get('a')) ]
Populate the ``_private_notes`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/common/rules.py#L747-L754
inspirehep/inspire-dojson
inspire_dojson/common/rules.py
external_system_identifiers
def external_system_identifiers(endpoint): """Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects. """ @utils.flatten @utils.for_each_value def _external_system_identifiers(self, key, value): new_recid = maybe_int(value.get('d')) if new_recid: self['new_record'] = get_record_ref(new_recid, endpoint) return [ { 'schema': 'SPIRES', 'value': ext_sys_id, } for ext_sys_id in force_list(value.get('a')) ] return _external_system_identifiers
python
def external_system_identifiers(endpoint): """Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects. """ @utils.flatten @utils.for_each_value def _external_system_identifiers(self, key, value): new_recid = maybe_int(value.get('d')) if new_recid: self['new_record'] = get_record_ref(new_recid, endpoint) return [ { 'schema': 'SPIRES', 'value': ext_sys_id, } for ext_sys_id in force_list(value.get('a')) ] return _external_system_identifiers
Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/common/rules.py#L901-L920
inspirehep/inspire-dojson
inspire_dojson/common/rules.py
deleted_records
def deleted_records(endpoint): """Populate the ``deleted_records`` key.""" @utils.for_each_value def _deleted_records(self, key, value): deleted_recid = maybe_int(value.get('a')) if deleted_recid: return get_record_ref(deleted_recid, endpoint) return _deleted_records
python
def deleted_records(endpoint): """Populate the ``deleted_records`` key.""" @utils.for_each_value def _deleted_records(self, key, value): deleted_recid = maybe_int(value.get('a')) if deleted_recid: return get_record_ref(deleted_recid, endpoint) return _deleted_records
Populate the ``deleted_records`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/common/rules.py#L944-L952
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd6xx.py
accelerator_experiments
def accelerator_experiments(self, key, value): """Populate the ``accelerator_experiments`` key.""" result = [] a_value = force_single_element(value.get('a')) e_values = [el for el in force_list(value.get('e')) if el != '-'] zero_values = force_list(value.get('0')) if a_value and not e_values: result.append({'accelerator': a_value}) # XXX: we zip only when they have the same length, otherwise # we might match a value with the wrong recid. if len(e_values) == len(zero_values): for e_value, zero_value in zip(e_values, zero_values): result.append({ 'legacy_name': e_value, 'record': get_record_ref(zero_value, 'experiments'), }) else: for e_value in e_values: result.append({'legacy_name': e_value}) return result
python
def accelerator_experiments(self, key, value): """Populate the ``accelerator_experiments`` key.""" result = [] a_value = force_single_element(value.get('a')) e_values = [el for el in force_list(value.get('e')) if el != '-'] zero_values = force_list(value.get('0')) if a_value and not e_values: result.append({'accelerator': a_value}) # XXX: we zip only when they have the same length, otherwise # we might match a value with the wrong recid. if len(e_values) == len(zero_values): for e_value, zero_value in zip(e_values, zero_values): result.append({ 'legacy_name': e_value, 'record': get_record_ref(zero_value, 'experiments'), }) else: for e_value in e_values: result.append({'legacy_name': e_value}) return result
Populate the ``accelerator_experiments`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd6xx.py#L53-L76
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd6xx.py
keywords
def keywords(self, key, values): """Populate the ``keywords`` key. Also populates the ``energy_ranges`` key through side effects. """ keywords = self.get('keywords', []) energy_ranges = self.get('energy_ranges', []) for value in force_list(values): if value.get('a'): schema = force_single_element(value.get('2', '')).upper() sources = force_list(value.get('9')) a_values = force_list(value.get('a')) if 'conference' not in sources: for a_value in a_values: keywords.append({ 'schema': schema, 'source': force_single_element(sources), 'value': a_value, }) if value.get('e'): energy_ranges.append(ENERGY_RANGES_MAP.get(value.get('e'))) self['energy_ranges'] = energy_ranges return keywords
python
def keywords(self, key, values): """Populate the ``keywords`` key. Also populates the ``energy_ranges`` key through side effects. """ keywords = self.get('keywords', []) energy_ranges = self.get('energy_ranges', []) for value in force_list(values): if value.get('a'): schema = force_single_element(value.get('2', '')).upper() sources = force_list(value.get('9')) a_values = force_list(value.get('a')) if 'conference' not in sources: for a_value in a_values: keywords.append({ 'schema': schema, 'source': force_single_element(sources), 'value': a_value, }) if value.get('e'): energy_ranges.append(ENERGY_RANGES_MAP.get(value.get('e'))) self['energy_ranges'] = energy_ranges return keywords
Populate the ``keywords`` key. Also populates the ``energy_ranges`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd6xx.py#L90-L117
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd6xx.py
keywords2marc
def keywords2marc(self, key, values): """Populate the ``695`` MARC field. Also populates the ``084`` and ``6531`` MARC fields through side effects. """ result_695 = self.get('695', []) result_084 = self.get('084', []) result_6531 = self.get('6531', []) for value in values: schema = value.get('schema') source = value.get('source') keyword = value.get('value') if schema == 'PACS' or schema == 'PDG': result_084.append({ '2': schema, '9': source, 'a': keyword, }) elif schema == 'JACOW': result_6531.append({ '2': 'JACoW', '9': source, 'a': keyword, }) elif schema == 'INSPIRE': result_695.append({ '2': 'INSPIRE', '9': source, 'a': keyword, }) elif schema == 'INIS': result_695.append({ '2': 'INIS', '9': source, 'a': keyword, }) elif source != 'magpie': result_6531.append({ '9': source, 'a': keyword, }) self['6531'] = result_6531 self['084'] = result_084 return result_695
python
def keywords2marc(self, key, values): """Populate the ``695`` MARC field. Also populates the ``084`` and ``6531`` MARC fields through side effects. """ result_695 = self.get('695', []) result_084 = self.get('084', []) result_6531 = self.get('6531', []) for value in values: schema = value.get('schema') source = value.get('source') keyword = value.get('value') if schema == 'PACS' or schema == 'PDG': result_084.append({ '2': schema, '9': source, 'a': keyword, }) elif schema == 'JACOW': result_6531.append({ '2': 'JACoW', '9': source, 'a': keyword, }) elif schema == 'INSPIRE': result_695.append({ '2': 'INSPIRE', '9': source, 'a': keyword, }) elif schema == 'INIS': result_695.append({ '2': 'INIS', '9': source, 'a': keyword, }) elif source != 'magpie': result_6531.append({ '9': source, 'a': keyword, }) self['6531'] = result_6531 self['084'] = result_084 return result_695
Populate the ``695`` MARC field. Also populates the ``084`` and ``6531`` MARC fields through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd6xx.py#L132-L178
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
collaborations
def collaborations(self, key, value): """Populate the ``collaborations`` key.""" result = [] for g_value in force_list(value.get('g')): collaborations = normalize_collaboration(g_value) if len(collaborations) == 1: result.append({ 'record': get_record_ref(maybe_int(value.get('0')), 'experiments'), 'value': collaborations[0], }) else: result.extend({'value': collaboration} for collaboration in collaborations) return result
python
def collaborations(self, key, value): """Populate the ``collaborations`` key.""" result = [] for g_value in force_list(value.get('g')): collaborations = normalize_collaboration(g_value) if len(collaborations) == 1: result.append({ 'record': get_record_ref(maybe_int(value.get('0')), 'experiments'), 'value': collaborations[0], }) else: result.extend({'value': collaboration} for collaboration in collaborations) return result
Populate the ``collaborations`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L49-L63
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
publication_info
def publication_info(self, key, value): """Populate the ``publication_info`` key.""" def _get_cnum(value): w_value = force_single_element(value.get('w', '')) normalized_w_value = w_value.replace('/', '-').upper() return normalized_w_value def _get_material(value): schema = load_schema('elements/material') valid_materials = schema['enum'] m_value = force_single_element(value.get('m', '')) normalized_m_value = m_value.lower() if normalized_m_value in valid_materials: return normalized_m_value def _get_parent_isbn(value): z_value = force_single_element(value.get('z', '')) if z_value: return normalize_isbn(z_value) def _get_pubinfo_freetext(value): x_value = force_single_element(value.get('x', '')) if not x_value.startswith('#DONE'): return x_value page_start, page_end, artid = split_page_artid(value.get('c')) parent_recid = maybe_int(force_single_element(value.get('0'))) parent_record = get_record_ref(parent_recid, 'literature') journal_recid = maybe_int(force_single_element(value.get('1'))) journal_record = get_record_ref(journal_recid, 'journals') conference_recid = maybe_int(force_single_element(value.get('2'))) conference_record = get_record_ref(conference_recid, 'conferences') return { 'artid': artid, 'cnum': _get_cnum(value), 'conf_acronym': force_single_element(value.get('q')), 'conference_record': conference_record, 'hidden': key.startswith('7731') or None, 'journal_issue': force_single_element(value.get('n')), 'journal_record': journal_record, 'journal_title': force_single_element(value.get('p')), 'journal_volume': force_single_element(value.get('v')), 'material': _get_material(value), 'page_end': page_end, 'page_start': page_start, 'parent_isbn': _get_parent_isbn(value), 'parent_record': parent_record, 'parent_report_number': force_single_element(value.get('r')), 'pubinfo_freetext': _get_pubinfo_freetext(value), 'year': maybe_int(force_single_element(value.get('y'))), }
python
def publication_info(self, key, value): """Populate the ``publication_info`` key.""" def _get_cnum(value): w_value = force_single_element(value.get('w', '')) normalized_w_value = w_value.replace('/', '-').upper() return normalized_w_value def _get_material(value): schema = load_schema('elements/material') valid_materials = schema['enum'] m_value = force_single_element(value.get('m', '')) normalized_m_value = m_value.lower() if normalized_m_value in valid_materials: return normalized_m_value def _get_parent_isbn(value): z_value = force_single_element(value.get('z', '')) if z_value: return normalize_isbn(z_value) def _get_pubinfo_freetext(value): x_value = force_single_element(value.get('x', '')) if not x_value.startswith('#DONE'): return x_value page_start, page_end, artid = split_page_artid(value.get('c')) parent_recid = maybe_int(force_single_element(value.get('0'))) parent_record = get_record_ref(parent_recid, 'literature') journal_recid = maybe_int(force_single_element(value.get('1'))) journal_record = get_record_ref(journal_recid, 'journals') conference_recid = maybe_int(force_single_element(value.get('2'))) conference_record = get_record_ref(conference_recid, 'conferences') return { 'artid': artid, 'cnum': _get_cnum(value), 'conf_acronym': force_single_element(value.get('q')), 'conference_record': conference_record, 'hidden': key.startswith('7731') or None, 'journal_issue': force_single_element(value.get('n')), 'journal_record': journal_record, 'journal_title': force_single_element(value.get('p')), 'journal_volume': force_single_element(value.get('v')), 'material': _get_material(value), 'page_end': page_end, 'page_start': page_start, 'parent_isbn': _get_parent_isbn(value), 'parent_record': parent_record, 'parent_report_number': force_single_element(value.get('r')), 'pubinfo_freetext': _get_pubinfo_freetext(value), 'year': maybe_int(force_single_element(value.get('y'))), }
Populate the ``publication_info`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L75-L132
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
publication_info2marc
def publication_info2marc(self, key, values): """Populate the ``773`` MARC field. Also populates the ``7731`` MARC field through side effects. """ result_773 = self.get('773', []) result_7731 = self.get('7731', []) for value in force_list(convert_new_publication_info_to_old(values)): page_artid = [] if value.get('page_start') and value.get('page_end'): page_artid.append(u'{page_start}-{page_end}'.format(**value)) elif value.get('page_start'): page_artid.append(u'{page_start}'.format(**value)) elif value.get('artid'): page_artid.append(u'{artid}'.format(**value)) result = { '0': get_recid_from_ref(value.get('parent_record')), 'c': page_artid, 'm': value.get('material'), 'n': value.get('journal_issue'), 'p': value.get('journal_title'), 'q': value.get('conf_acronym'), 'r': value.get('parent_report_number'), 'v': value.get('journal_volume'), 'w': value.get('cnum'), 'x': value.get('pubinfo_freetext'), 'y': value.get('year'), 'z': value.get('parent_isbn'), } if value.get('hidden'): result_7731.append(result) else: result_773.append(result) self['7731'] = result_7731 return result_773
python
def publication_info2marc(self, key, values): """Populate the ``773`` MARC field. Also populates the ``7731`` MARC field through side effects. """ result_773 = self.get('773', []) result_7731 = self.get('7731', []) for value in force_list(convert_new_publication_info_to_old(values)): page_artid = [] if value.get('page_start') and value.get('page_end'): page_artid.append(u'{page_start}-{page_end}'.format(**value)) elif value.get('page_start'): page_artid.append(u'{page_start}'.format(**value)) elif value.get('artid'): page_artid.append(u'{artid}'.format(**value)) result = { '0': get_recid_from_ref(value.get('parent_record')), 'c': page_artid, 'm': value.get('material'), 'n': value.get('journal_issue'), 'p': value.get('journal_title'), 'q': value.get('conf_acronym'), 'r': value.get('parent_report_number'), 'v': value.get('journal_volume'), 'w': value.get('cnum'), 'x': value.get('pubinfo_freetext'), 'y': value.get('year'), 'z': value.get('parent_isbn'), } if value.get('hidden'): result_7731.append(result) else: result_773.append(result) self['7731'] = result_7731 return result_773
Populate the ``773`` MARC field. Also populates the ``7731`` MARC field through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L136-L174
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
related_records_78002
def related_records_78002(self, key, value): """Populate the ``related_records`` key.""" record = get_record_ref(maybe_int(value.get('w')), 'literature') if record: return { 'curated_relation': record is not None, 'record': record, 'relation': 'predecessor', }
python
def related_records_78002(self, key, value): """Populate the ``related_records`` key.""" record = get_record_ref(maybe_int(value.get('w')), 'literature') if record: return { 'curated_relation': record is not None, 'record': record, 'relation': 'predecessor', }
Populate the ``related_records`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L179-L187
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
related_records_78502
def related_records_78502(self, key, value): """Populate the ``related_records`` key.""" record = get_record_ref(maybe_int(value.get('w')), 'literature') if record: return { 'curated_relation': record is not None, 'record': record, 'relation': 'successor', }
python
def related_records_78502(self, key, value): """Populate the ``related_records`` key.""" record = get_record_ref(maybe_int(value.get('w')), 'literature') if record: return { 'curated_relation': record is not None, 'record': record, 'relation': 'successor', }
Populate the ``related_records`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L192-L200
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
related_records_78708
def related_records_78708(self, key, value): """Populate the ``related_records`` key.""" record = get_record_ref(maybe_int(value.get('w')), 'literature') if record: return { 'curated_relation': record is not None, 'record': record, 'relation_freetext': value.get('i'), }
python
def related_records_78708(self, key, value): """Populate the ``related_records`` key.""" record = get_record_ref(maybe_int(value.get('w')), 'literature') if record: return { 'curated_relation': record is not None, 'record': record, 'relation_freetext': value.get('i'), }
Populate the ``related_records`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L205-L213
inspirehep/inspire-dojson
inspire_dojson/hep/rules/bd7xx.py
related_records2marc
def related_records2marc(self, key, value): """Populate the ``78708`` MARC field Also populates the ``78002``, ``78502`` MARC fields through side effects. """ if value.get('relation_freetext'): return { 'i': value.get('relation_freetext'), 'w': get_recid_from_ref(value.get('record')), } elif value.get('relation') == 'successor': self.setdefault('78502', []).append({ 'i': 'superseded by', 'w': get_recid_from_ref(value.get('record')), }) elif value.get('relation') == 'predecessor': self.setdefault('78002', []).append({ 'i': 'supersedes', 'w': get_recid_from_ref(value.get('record')), }) else: raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get('relation')))
python
def related_records2marc(self, key, value): """Populate the ``78708`` MARC field Also populates the ``78002``, ``78502`` MARC fields through side effects. """ if value.get('relation_freetext'): return { 'i': value.get('relation_freetext'), 'w': get_recid_from_ref(value.get('record')), } elif value.get('relation') == 'successor': self.setdefault('78502', []).append({ 'i': 'superseded by', 'w': get_recid_from_ref(value.get('record')), }) elif value.get('relation') == 'predecessor': self.setdefault('78002', []).append({ 'i': 'supersedes', 'w': get_recid_from_ref(value.get('record')), }) else: raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get('relation')))
Populate the ``78708`` MARC field Also populates the ``78002``, ``78502`` MARC fields through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/hep/rules/bd7xx.py#L218-L239
inspirehep/inspire-dojson
inspire_dojson/journals/rules.py
_private_notes
def _private_notes(self, key, value): """Populate the ``_private_notes`` key.""" return [ { 'source': value.get('9'), 'value': _private_note, } for _private_note in force_list(value.get('x')) ]
python
def _private_notes(self, key, value): """Populate the ``_private_notes`` key.""" return [ { 'source': value.get('9'), 'value': _private_note, } for _private_note in force_list(value.get('x')) ]
Populate the ``_private_notes`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/journals/rules.py#L149-L156
inspirehep/inspire-dojson
inspire_dojson/journals/rules.py
proceedings
def proceedings(self, key, value): """Populate the ``proceedings`` key. Also populates the ``refereed`` key through side effects. """ proceedings = self.get('proceedings') refereed = self.get('refereed') if not proceedings: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'PROCEEDINGS' in normalized_a_values: proceedings = True if not refereed: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'PEER REVIEW' in normalized_a_values: refereed = True elif 'NON-PUBLISHED' in normalized_a_values: refereed = False self['refereed'] = refereed return proceedings
python
def proceedings(self, key, value): """Populate the ``proceedings`` key. Also populates the ``refereed`` key through side effects. """ proceedings = self.get('proceedings') refereed = self.get('refereed') if not proceedings: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'PROCEEDINGS' in normalized_a_values: proceedings = True if not refereed: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'PEER REVIEW' in normalized_a_values: refereed = True elif 'NON-PUBLISHED' in normalized_a_values: refereed = False self['refereed'] = refereed return proceedings
Populate the ``proceedings`` key. Also populates the ``refereed`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/journals/rules.py#L178-L199
inspirehep/inspire-dojson
inspire_dojson/journals/rules.py
short_title
def short_title(self, key, value): """Populate the ``short_title`` key. Also populates the ``title_variants`` key through side effects. """ short_title = value.get('a') title_variants = self.get('title_variants', []) if value.get('u'): short_title = value.get('u') title_variants.append(value.get('a')) self['title_variants'] = title_variants return short_title
python
def short_title(self, key, value): """Populate the ``short_title`` key. Also populates the ``title_variants`` key through side effects. """ short_title = value.get('a') title_variants = self.get('title_variants', []) if value.get('u'): short_title = value.get('u') title_variants.append(value.get('a')) self['title_variants'] = title_variants return short_title
Populate the ``short_title`` key. Also populates the ``title_variants`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/journals/rules.py#L203-L216
inspirehep/inspire-dojson
inspire_dojson/journals/rules.py
deleted
def deleted(self, key, value): """Populate the ``deleted`` key. Also populates the ``book_series`` key through side effects. """ deleted = self.get('deleted') book_series = self.get('book_series') if not deleted: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] normalized_c_values = [el.upper() for el in force_list(value.get('c'))] if 'DELETED' in normalized_a_values or 'DELETED' in normalized_c_values: deleted = True if not book_series: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'BOOKSERIES' in normalized_a_values: book_series = True self['book_series'] = book_series return deleted
python
def deleted(self, key, value): """Populate the ``deleted`` key. Also populates the ``book_series`` key through side effects. """ deleted = self.get('deleted') book_series = self.get('book_series') if not deleted: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] normalized_c_values = [el.upper() for el in force_list(value.get('c'))] if 'DELETED' in normalized_a_values or 'DELETED' in normalized_c_values: deleted = True if not book_series: normalized_a_values = [el.upper() for el in force_list(value.get('a'))] if 'BOOKSERIES' in normalized_a_values: book_series = True self['book_series'] = book_series return deleted
Populate the ``deleted`` key. Also populates the ``book_series`` key through side effects.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/journals/rules.py#L229-L249
inspirehep/inspire-dojson
inspire_dojson/jobs/rules.py
ranks
def ranks(self, key, value): """Populate the ``ranks`` key.""" return [normalize_rank(el) for el in force_list(value.get('a'))]
python
def ranks(self, key, value): """Populate the ``ranks`` key.""" return [normalize_rank(el) for el in force_list(value.get('a'))]
Populate the ``ranks`` key.
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/jobs/rules.py#L177-L179
walkr/oi
oi/core.py
BaseProgram.new_parser
def new_parser(self): """ Create a command line argument parser Add a few default flags, such as --version for displaying the program version when invoked """ parser = argparse.ArgumentParser(description=self.description) parser.add_argument( '--version', help='show version and exit', default=False, action='store_true') parser.add_argument( '--debug', help='enable debugging', default=False, action='store_true') return parser
python
def new_parser(self): """ Create a command line argument parser Add a few default flags, such as --version for displaying the program version when invoked """ parser = argparse.ArgumentParser(description=self.description) parser.add_argument( '--version', help='show version and exit', default=False, action='store_true') parser.add_argument( '--debug', help='enable debugging', default=False, action='store_true') return parser
Create a command line argument parser Add a few default flags, such as --version for displaying the program version when invoked
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L48-L61
walkr/oi
oi/core.py
BaseProgram.add_command
def add_command(self, command, function, description=None): """ Register a new function with a the name `command` and `description` (which will be shown then help is invoked). """ self.registered[command] = { 'function': function, 'description': description }
python
def add_command(self, command, function, description=None): """ Register a new function with a the name `command` and `description` (which will be shown then help is invoked). """ self.registered[command] = { 'function': function, 'description': description }
Register a new function with a the name `command` and `description` (which will be shown then help is invoked).
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L63-L69
walkr/oi
oi/core.py
BaseProgram.run
def run(self, args=None): """ Parse command line arguments if necessary then run program. By default this method will just take of the --version flag. The logic for other flags should be handled by your subclass """ args = args or self.parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) if args.version: print(version.VERSION) sys.exit(0)
python
def run(self, args=None): """ Parse command line arguments if necessary then run program. By default this method will just take of the --version flag. The logic for other flags should be handled by your subclass """ args = args or self.parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) if args.version: print(version.VERSION) sys.exit(0)
Parse command line arguments if necessary then run program. By default this method will just take of the --version flag. The logic for other flags should be handled by your subclass
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L71-L84
walkr/oi
oi/core.py
Program.help_function
def help_function(self, command=None): """ Show help for all available commands or just a single one """ if command: return self.registered[command].get( 'description', 'No help available' ) return ', '.join(sorted(self.registered))
python
def help_function(self, command=None): """ Show help for all available commands or just a single one """ if command: return self.registered[command].get( 'description', 'No help available' ) return ', '.join(sorted(self.registered))
Show help for all available commands or just a single one
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L115-L121
walkr/oi
oi/core.py
Program.add_command
def add_command(self, command, function, description=None): """ Register a new function for command """ super(Program, self).add_command(command, function, description) self.service.register(command, function)
python
def add_command(self, command, function, description=None): """ Register a new function for command """ super(Program, self).add_command(command, function, description) self.service.register(command, function)
Register a new function for command
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L123-L126
walkr/oi
oi/core.py
Program.run
def run(self, args=None): """ Parse comand line arguments/flags and run program """ args = args or self.parser.parse_args() super(Program, self).run(args) # Read configuration file if any if args.config is not None: filepath = args.config self.config.read(filepath) # Start workers then wait until they finish work [w.start() for w in self.workers] [w.join() for w in self.workers]
python
def run(self, args=None): """ Parse comand line arguments/flags and run program """ args = args or self.parser.parse_args() super(Program, self).run(args) # Read configuration file if any if args.config is not None: filepath = args.config self.config.read(filepath) # Start workers then wait until they finish work [w.start() for w in self.workers] [w.join() for w in self.workers]
Parse comand line arguments/flags and run program
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L128-L141
walkr/oi
oi/core.py
ClientWrapper.create_client
def create_client(self, addr, timeout): """ Create client(s) based on addr """ def make(addr): c = Client(addr) c.socket._set_recv_timeout(timeout) return c if ',' in addr: addrs = addr.split(',') addrs = [a.strip() for a in addrs] return {a: make(a) for a in addrs} return make(addr)
python
def create_client(self, addr, timeout): """ Create client(s) based on addr """ def make(addr): c = Client(addr) c.socket._set_recv_timeout(timeout) return c if ',' in addr: addrs = addr.split(',') addrs = [a.strip() for a in addrs] return {a: make(a) for a in addrs} return make(addr)
Create client(s) based on addr
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L151-L163
walkr/oi
oi/core.py
ClientWrapper._call_single
def _call_single(self, client, command, *args): """ Call single """ try: return client.call(command, *args) except Exception as e: return None, str(e)
python
def _call_single(self, client, command, *args): """ Call single """ try: return client.call(command, *args) except Exception as e: return None, str(e)
Call single
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L165-L170
walkr/oi
oi/core.py
ClientWrapper._call_multi
def _call_multi(self, clients, command, *args): """ Call multi """ responses, errors = {}, {} for addr, client in clients.items(): res, err = self._call_single(client, command, *args) responses[addr] = res errors[addr] = err return responses, errors
python
def _call_multi(self, clients, command, *args): """ Call multi """ responses, errors = {}, {} for addr, client in clients.items(): res, err = self._call_single(client, command, *args) responses[addr] = res errors[addr] = err return responses, errors
Call multi
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L172-L179
walkr/oi
oi/core.py
ClientWrapper.call
def call(self, command, *args): """ Call remote service(s) """ if isinstance(self.c, dict): return self._call_multi(self.c, command, *args) return self._call_single(self.c, command, *args)
python
def call(self, command, *args): """ Call remote service(s) """ if isinstance(self.c, dict): return self._call_multi(self.c, command, *args) return self._call_single(self.c, command, *args)
Call remote service(s)
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L181-L185
walkr/oi
oi/core.py
ClientWrapper.close
def close(self): """ Close socket(s) """ if isinstance(self.c, dict): for client in self.c.values(): client.sock.close() return self.c.socket.close()
python
def close(self): """ Close socket(s) """ if isinstance(self.c, dict): for client in self.c.values(): client.sock.close() return self.c.socket.close()
Close socket(s)
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L191-L197
walkr/oi
oi/core.py
Response._show
def _show(self, res, err, prefix='', colored=False): """ Show result or error """ if self.kind is 'local': what = res if not err else err print(what) return if self.kind is 'remote': if colored: red, green, reset = Fore.RED, Fore.GREEN, Fore.RESET else: red = green = reset = '' if err: what = prefix + red + 'remote err: {}'.format(err) + reset else: what = prefix + green + str(res) + reset print(what)
python
def _show(self, res, err, prefix='', colored=False): """ Show result or error """ if self.kind is 'local': what = res if not err else err print(what) return if self.kind is 'remote': if colored: red, green, reset = Fore.RED, Fore.GREEN, Fore.RESET else: red = green = reset = '' if err: what = prefix + red + 'remote err: {}'.format(err) + reset else: what = prefix + green + str(res) + reset print(what)
Show result or error
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L210-L227
walkr/oi
oi/core.py
CtlProgram.call
def call(self, command, *args): """ Execute local OR remote command and show response """ if not command: return # Look for local methods first try: res = self.registered[command]['function'](self, *args) return Response('local', res, None) # Method not found, try remote except KeyError: # Execute remote command res, err = self.client.call(command, *args) return Response('remote', res, err, self.client.is_multi()) # Local exception except Exception as e: return Response('local', res, str(e))
python
def call(self, command, *args): """ Execute local OR remote command and show response """ if not command: return # Look for local methods first try: res = self.registered[command]['function'](self, *args) return Response('local', res, None) # Method not found, try remote except KeyError: # Execute remote command res, err = self.client.call(command, *args) return Response('remote', res, err, self.client.is_multi()) # Local exception except Exception as e: return Response('local', res, str(e))
Execute local OR remote command and show response
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L267-L287
walkr/oi
oi/core.py
CtlProgram.parse_input
def parse_input(self, text): """ Parse ctl user input. Double quotes are used to group together multi words arguments. """ parts = util.split(text) command = parts[0] if text and parts else None command = command.lower() if command else None args = parts[1:] if len(parts) > 1 else [] return (command, args)
python
def parse_input(self, text): """ Parse ctl user input. Double quotes are used to group together multi words arguments. """ parts = util.split(text) command = parts[0] if text and parts else None command = command.lower() if command else None args = parts[1:] if len(parts) > 1 else [] return (command, args)
Parse ctl user input. Double quotes are used to group together multi words arguments.
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L289-L298
walkr/oi
oi/core.py
CtlProgram.loop
def loop(self): """ Enter loop, read user input then run command. Repeat """ while True: text = compat.input('ctl > ') command, args = self.parse_input(text) if not command: continue response = self.call(command, *args) response.show()
python
def loop(self): """ Enter loop, read user input then run command. Repeat """ while True: text = compat.input('ctl > ') command, args = self.parse_input(text) if not command: continue response = self.call(command, *args) response.show()
Enter loop, read user input then run command. Repeat
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/core.py#L300-L309
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/namespaces.py
namespace_for
def namespace_for(uri: Union[URIRef, Namespace, str]) -> str: """ Reverse namespace lookup. Note that returned namespace may not be unique :param uri: namespace URI :return: namespace """ uri = str(uri) if uri not in namespaces.values(): namespaces[AnonNS().ns] = uri return [k for k, v in namespaces.items() if uri == v][0]
python
def namespace_for(uri: Union[URIRef, Namespace, str]) -> str: """ Reverse namespace lookup. Note that returned namespace may not be unique :param uri: namespace URI :return: namespace """ uri = str(uri) if uri not in namespaces.values(): namespaces[AnonNS().ns] = uri return [k for k, v in namespaces.items() if uri == v][0]
Reverse namespace lookup. Note that returned namespace may not be unique :param uri: namespace URI :return: namespace
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/namespaces.py#L66-L75
walkr/oi
oi/util.py
split
def split(text): """ Split text into arguments accounting for muti-word arguments which are double quoted """ # Cleanup text text = text.strip() text = re.sub('\s+', ' ', text) # collpse multiple spaces space, quote, parts = ' ', '"', [] part, quoted = '', False for char in text: # Encoutered beginning double quote if char is quote and quoted is False: quoted = True continue # Encountered the ending double quote if char is quote and quoted is True: quoted = False parts.append(part.strip()) part = '' continue # Found space in quoted if char is space and quoted is True: part += char continue # Found space but not quoted if char is space: if part: parts.append(part) part = '' continue # Found other character if char is not space: part += char continue if part: parts.append(part.strip()) return parts
python
def split(text): """ Split text into arguments accounting for muti-word arguments which are double quoted """ # Cleanup text text = text.strip() text = re.sub('\s+', ' ', text) # collpse multiple spaces space, quote, parts = ' ', '"', [] part, quoted = '', False for char in text: # Encoutered beginning double quote if char is quote and quoted is False: quoted = True continue # Encountered the ending double quote if char is quote and quoted is True: quoted = False parts.append(part.strip()) part = '' continue # Found space in quoted if char is space and quoted is True: part += char continue # Found space but not quoted if char is space: if part: parts.append(part) part = '' continue # Found other character if char is not space: part += char continue if part: parts.append(part.strip()) return parts
Split text into arguments accounting for muti-word arguments which are double quoted
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/oi/util.py#L6-L51
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdflibformats.py
known_formats
def known_formats(use: Union[Serializer, Parser]=Serializer, include_mime_types: bool = False) -> List[str]: """ Return a list of available formats in rdflib for the required task :param use: task (typically Serializer or Parser) :param include_mime_types: whether mime types are included in the return list :return: list of formats """ return sorted([name for name, kind in plugin._plugins.keys() if kind == use and (include_mime_types or '/' not in name)])
python
def known_formats(use: Union[Serializer, Parser]=Serializer, include_mime_types: bool = False) -> List[str]: """ Return a list of available formats in rdflib for the required task :param use: task (typically Serializer or Parser) :param include_mime_types: whether mime types are included in the return list :return: list of formats """ return sorted([name for name, kind in plugin._plugins.keys() if kind == use and (include_mime_types or '/' not in name)])
Return a list of available formats in rdflib for the required task :param use: task (typically Serializer or Parser) :param include_mime_types: whether mime types are included in the return list :return: list of formats
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdflibformats.py#L47-L54
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/signature.py
file_signature
def file_signature(file_name: str) -> Optional[Tuple]: """ Return an identity signature for file name :param file_name: name of file :return: mode, size, last modified time if file exists, otherwise none """ try: st = os.stat(file_name) except FileNotFoundError: return None return stat.S_IFMT(st.st_mode), st.st_size, st.st_mtime
python
def file_signature(file_name: str) -> Optional[Tuple]: """ Return an identity signature for file name :param file_name: name of file :return: mode, size, last modified time if file exists, otherwise none """ try: st = os.stat(file_name) except FileNotFoundError: return None return stat.S_IFMT(st.st_mode), st.st_size, st.st_mtime
Return an identity signature for file name :param file_name: name of file :return: mode, size, last modified time if file exists, otherwise none
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/signature.py#L53-L63
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/signature.py
url_signature
def url_signature(url: str) -> Optional[Tuple]: """ Return an identify signature for url :param url: item to get signature for :return: tuple containing last modified, length and, if present, etag """ request = urllib.request.Request(url) request.get_method = lambda: 'HEAD' response = None try: response = urllib.request.urlopen(request) except urllib.error.HTTPError: return None return response.info()['Last-Modified'], response.info()['Content-Length'], response.info().get('ETag')
python
def url_signature(url: str) -> Optional[Tuple]: """ Return an identify signature for url :param url: item to get signature for :return: tuple containing last modified, length and, if present, etag """ request = urllib.request.Request(url) request.get_method = lambda: 'HEAD' response = None try: response = urllib.request.urlopen(request) except urllib.error.HTTPError: return None return response.info()['Last-Modified'], response.info()['Content-Length'], response.info().get('ETag')
Return an identify signature for url :param url: item to get signature for :return: tuple containing last modified, length and, if present, etag
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/signature.py#L66-L79
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/signature.py
signature
def signature(name: str) -> Optional[Tuple]: """ Return the file or URL signature for name :param name: :return: """ return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None
python
def signature(name: str) -> Optional[Tuple]: """ Return the file or URL signature for name :param name: :return: """ return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None
Return the file or URL signature for name :param name: :return:
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/signature.py#L82-L88
walkr/oi
setup.py
read_long_description
def read_long_description(readme_file): """ Read package long description from README file """ try: import pypandoc except (ImportError, OSError) as e: print('No pypandoc or pandoc: %s' % (e,)) if is_py3: fh = open(readme_file, encoding='utf-8') else: fh = open(readme_file) long_description = fh.read() fh.close() return long_description else: return pypandoc.convert(readme_file, 'rst')
python
def read_long_description(readme_file): """ Read package long description from README file """ try: import pypandoc except (ImportError, OSError) as e: print('No pypandoc or pandoc: %s' % (e,)) if is_py3: fh = open(readme_file, encoding='utf-8') else: fh = open(readme_file) long_description = fh.read() fh.close() return long_description else: return pypandoc.convert(readme_file, 'rst')
Read package long description from README file
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/setup.py#L17-L31
walkr/oi
setup.py
read_version
def read_version(): """ Read package version """ with open('./oi/version.py') as fh: for line in fh: if line.startswith('VERSION'): return line.split('=')[1].strip().strip("'")
python
def read_version(): """ Read package version """ with open('./oi/version.py') as fh: for line in fh: if line.startswith('VERSION'): return line.split('=')[1].strip().strip("'")
Read package version
https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/setup.py#L34-L39
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/prettygraph.py
PrettyGraph.strip_prefixes
def strip_prefixes(g: Graph): """ Remove the prefixes from the graph for aesthetics """ return re.sub(r'^@prefix .* .\n', '', g.serialize(format="turtle").decode(), flags=re.MULTILINE).strip()
python
def strip_prefixes(g: Graph): """ Remove the prefixes from the graph for aesthetics """ return re.sub(r'^@prefix .* .\n', '', g.serialize(format="turtle").decode(), flags=re.MULTILINE).strip()
Remove the prefixes from the graph for aesthetics
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/prettygraph.py#L98-L102
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
FHIRResource.add_prefixes
def add_prefixes(self, nsmap: Dict[str, Namespace]) -> None: """ Add the required prefix definitions :return: """ [self._g.bind(e[0], e[1]) for e in nsmap.items()]
python
def add_prefixes(self, nsmap: Dict[str, Namespace]) -> None: """ Add the required prefix definitions :return: """ [self._g.bind(e[0], e[1]) for e in nsmap.items()]
Add the required prefix definitions :return:
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L132-L137
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
FHIRResource.add
def add(self, subj: Node, pred: URIRef, obj: Node) -> "FHIRResource": """ Shortcut to rdflib add function :param subj: :param pred: :param obj: :return: self for chaining """ self._g.add((subj, pred, obj)) return self
python
def add(self, subj: Node, pred: URIRef, obj: Node) -> "FHIRResource": """ Shortcut to rdflib add function :param subj: :param pred: :param obj: :return: self for chaining """ self._g.add((subj, pred, obj)) return self
Shortcut to rdflib add function :param subj: :param pred: :param obj: :return: self for chaining
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L146-L155
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
FHIRResource.add_value_node
def add_value_node(self, subj: Node, pred: URIRef, val: Union[JsonObj, str, List], valuetype: Optional[URIRef]= None) -> None: """ Expand val according to the range of pred and add it to the graph :param subj: graph subject :param pred: graph predicate :param val: JSON representation of target object :param valuetype: predicate type if it can't be directly determined """ pred_type = self._meta.predicate_type(pred) if not valuetype else valuetype # Transform generic resources into specific types if pred_type == FHIR.Resource: pred_type = FHIR[val.resourceType] val_meta = FHIRMetaVocEntry(self._vocabulary, pred_type) for k, p in val_meta.predicates().items(): if k in val: self.add_val(subj, p, val, k) if pred == FHIR.CodeableConcept.coding: self.add_type_arc(subj, val) elif k == "value" and val_meta.predicate_type(p) == FHIR.Element: # value / Element is the wild card combination -- if there is a "value[x]" in val, emit it where the # type comes from 'x' for vk in val._as_dict.keys(): if vk.startswith(k): self.add_val(subj, FHIR['Extension.' + vk], val, vk, self._meta.value_predicate_to_type(vk)) else: # Can have an extension only without a primary value self.add_extension_val(subj, val, k, p)
python
def add_value_node(self, subj: Node, pred: URIRef, val: Union[JsonObj, str, List], valuetype: Optional[URIRef]= None) -> None: """ Expand val according to the range of pred and add it to the graph :param subj: graph subject :param pred: graph predicate :param val: JSON representation of target object :param valuetype: predicate type if it can't be directly determined """ pred_type = self._meta.predicate_type(pred) if not valuetype else valuetype # Transform generic resources into specific types if pred_type == FHIR.Resource: pred_type = FHIR[val.resourceType] val_meta = FHIRMetaVocEntry(self._vocabulary, pred_type) for k, p in val_meta.predicates().items(): if k in val: self.add_val(subj, p, val, k) if pred == FHIR.CodeableConcept.coding: self.add_type_arc(subj, val) elif k == "value" and val_meta.predicate_type(p) == FHIR.Element: # value / Element is the wild card combination -- if there is a "value[x]" in val, emit it where the # type comes from 'x' for vk in val._as_dict.keys(): if vk.startswith(k): self.add_val(subj, FHIR['Extension.' + vk], val, vk, self._meta.value_predicate_to_type(vk)) else: # Can have an extension only without a primary value self.add_extension_val(subj, val, k, p)
Expand val according to the range of pred and add it to the graph :param subj: graph subject :param pred: graph predicate :param val: JSON representation of target object :param valuetype: predicate type if it can't be directly determined
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L157-L185
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
FHIRResource.add_reference
def add_reference(self, subj: Node, val: str) -> None: """ Add a fhir:link and RDF type arc if it can be determined :param subj: reference subject :param val: reference value """ match = FHIR_RESOURCE_RE.match(val) ref_uri_str = res_type = None if match: ref_uri_str = val if match.group(FHIR_RE_BASE) else (self._base_uri + urllib.parse.quote(val)) res_type = match.group(FHIR_RE_RESOURCE) elif '://' in val: ref_uri_str = val res_type = "Resource" elif self._base_uri and not val.startswith('#') and not val.startswith('/'): ref_uri_str = self._base_uri + urllib.parse.quote(val) res_type = val.split('/', 1)[0] if '/' in val else "Resource" if ref_uri_str: ref_uri = URIRef(ref_uri_str) self.add(subj, FHIR.link, ref_uri) self.add(ref_uri, RDF.type, FHIR[res_type])
python
def add_reference(self, subj: Node, val: str) -> None: """ Add a fhir:link and RDF type arc if it can be determined :param subj: reference subject :param val: reference value """ match = FHIR_RESOURCE_RE.match(val) ref_uri_str = res_type = None if match: ref_uri_str = val if match.group(FHIR_RE_BASE) else (self._base_uri + urllib.parse.quote(val)) res_type = match.group(FHIR_RE_RESOURCE) elif '://' in val: ref_uri_str = val res_type = "Resource" elif self._base_uri and not val.startswith('#') and not val.startswith('/'): ref_uri_str = self._base_uri + urllib.parse.quote(val) res_type = val.split('/', 1)[0] if '/' in val else "Resource" if ref_uri_str: ref_uri = URIRef(ref_uri_str) self.add(subj, FHIR.link, ref_uri) self.add(ref_uri, RDF.type, FHIR[res_type])
Add a fhir:link and RDF type arc if it can be determined :param subj: reference subject :param val: reference value
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L187-L207
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
FHIRResource.add_val
def add_val(self, subj: Node, pred: URIRef, json_obj: JsonObj, json_key: str, valuetype: Optional[URIRef] = None) -> Optional[BNode]: """ Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are represented as a list of BNODE objects with a fhir:index discrimanant :param subj: graph subject :param pred: predicate :param json_obj: object containing json_key :param json_key: name of the value in the JSON resource :param valuetype: value type if NOT determinable by predicate :return: value node if target is a BNode else None """ if json_key not in json_obj: print("Expecting to find object named '{}' in JSON:".format(json_key)) print(json_obj._as_json_dumps()) print("entry skipped") return None val = json_obj[json_key] if isinstance(val, List): list_idx = 0 for lv in val: entry_bnode = BNode() # TODO: this is getting messy. Refactor and clean this up if pred == FHIR.Bundle.entry: entry_subj = URIRef(lv.fullUrl) self.add(entry_bnode, FHIR.index, Literal(list_idx)) self.add_val(entry_bnode, FHIR.Bundle.entry.fullUrl, lv, 'fullUrl') self.add(entry_bnode, FHIR.Bundle.entry.resource, entry_subj) self.add(subj, pred, entry_bnode) entry_mv = FHIRMetaVocEntry(self._vocabulary, FHIR.BundleEntryComponent) for k, p in entry_mv.predicates().items(): if k not in ['resource', 'fullUrl'] and k in lv: print("---> adding {}".format(k)) self.add_val(subj, p, lv, k) FHIRResource(self._vocabulary, None, self._base_uri, lv.resource, self._g, False, self._replace_narrative_text, False, resource_uri=entry_subj) else: self.add(entry_bnode, FHIR.index, Literal(list_idx)) if isinstance(lv, JsonObj): self.add_value_node(entry_bnode, pred, lv, valuetype) else: vt = self._meta.predicate_type(pred) atom_type = self._meta.primitive_datatype_nostring(vt) if vt else None self.add(entry_bnode, FHIR.value, Literal(lv, datatype=atom_type)) self.add(subj, pred, entry_bnode) list_idx += 1 else: vt = self._meta.predicate_type(pred) if not valuetype else valuetype if self._meta.is_atom(pred): if self._replace_narrative_text and pred == FHIR.Narrative.div and len(val) > 120: val = REPLACED_NARRATIVE_TEXT self.add(subj, pred, Literal(val)) else: v = BNode() if self._meta.is_primitive(vt): self.add(v, FHIR.value, Literal(str(val), datatype=self._meta.primitive_datatype_nostring(vt, val))) else: self.add_value_node(v, pred, val, valuetype) self.add(subj, pred, v) if pred == FHIR.Reference.reference: self.add_reference(subj, val) self.add_extension_val(v, json_obj, json_key) return v return None
python
def add_val(self, subj: Node, pred: URIRef, json_obj: JsonObj, json_key: str, valuetype: Optional[URIRef] = None) -> Optional[BNode]: """ Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are represented as a list of BNODE objects with a fhir:index discrimanant :param subj: graph subject :param pred: predicate :param json_obj: object containing json_key :param json_key: name of the value in the JSON resource :param valuetype: value type if NOT determinable by predicate :return: value node if target is a BNode else None """ if json_key not in json_obj: print("Expecting to find object named '{}' in JSON:".format(json_key)) print(json_obj._as_json_dumps()) print("entry skipped") return None val = json_obj[json_key] if isinstance(val, List): list_idx = 0 for lv in val: entry_bnode = BNode() # TODO: this is getting messy. Refactor and clean this up if pred == FHIR.Bundle.entry: entry_subj = URIRef(lv.fullUrl) self.add(entry_bnode, FHIR.index, Literal(list_idx)) self.add_val(entry_bnode, FHIR.Bundle.entry.fullUrl, lv, 'fullUrl') self.add(entry_bnode, FHIR.Bundle.entry.resource, entry_subj) self.add(subj, pred, entry_bnode) entry_mv = FHIRMetaVocEntry(self._vocabulary, FHIR.BundleEntryComponent) for k, p in entry_mv.predicates().items(): if k not in ['resource', 'fullUrl'] and k in lv: print("---> adding {}".format(k)) self.add_val(subj, p, lv, k) FHIRResource(self._vocabulary, None, self._base_uri, lv.resource, self._g, False, self._replace_narrative_text, False, resource_uri=entry_subj) else: self.add(entry_bnode, FHIR.index, Literal(list_idx)) if isinstance(lv, JsonObj): self.add_value_node(entry_bnode, pred, lv, valuetype) else: vt = self._meta.predicate_type(pred) atom_type = self._meta.primitive_datatype_nostring(vt) if vt else None self.add(entry_bnode, FHIR.value, Literal(lv, datatype=atom_type)) self.add(subj, pred, entry_bnode) list_idx += 1 else: vt = self._meta.predicate_type(pred) if not valuetype else valuetype if self._meta.is_atom(pred): if self._replace_narrative_text and pred == FHIR.Narrative.div and len(val) > 120: val = REPLACED_NARRATIVE_TEXT self.add(subj, pred, Literal(val)) else: v = BNode() if self._meta.is_primitive(vt): self.add(v, FHIR.value, Literal(str(val), datatype=self._meta.primitive_datatype_nostring(vt, val))) else: self.add_value_node(v, pred, val, valuetype) self.add(subj, pred, v) if pred == FHIR.Reference.reference: self.add_reference(subj, val) self.add_extension_val(v, json_obj, json_key) return v return None
Add the RDF representation of val to the graph as a target of subj, pred. Note that FHIR lists are represented as a list of BNODE objects with a fhir:index discrimanant :param subj: graph subject :param pred: predicate :param json_obj: object containing json_key :param json_key: name of the value in the JSON resource :param valuetype: value type if NOT determinable by predicate :return: value node if target is a BNode else None
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L229-L292
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
FHIRResource.add_extension_val
def add_extension_val(self, subj: Node, json_obj: Union[JsonObj, List[JsonObjTypes]], key: str, pred: Optional[URIRef] = None) -> None: """ Add any extensions for the supplied object. This can be called in following situations: 1) Single extended value "key" : (value), "_key" : { "extension": [ { "url": "http://...", "value[x]": "......" } ] } 2) Single extension only "_key" : { "extension": [ { "url": "http://...", "value[x]": "......" } ] } 3) Multiple extended values: (TBD) 4) Multiple extensions only "_key" : [ { "extension": [ { "url": "http://...", "value[x]": "......" } ] } ] :param subj: Node containing subject :param json_obj: Object (potentially) containing "_key" :param key: name of element that is possibly extended (as indicated by "_" prefix) :param pred: predicate for the contained elements. Only used in situations 3) (?) and 4 """ extendee_name = "_" + key if extendee_name in json_obj: if not isinstance(subj, BNode): raise NotImplementedError("Extension to something other than a simple BNode") if isinstance(json_obj[extendee_name], list): if not pred: raise NotImplemented("Case 3 not implemented") entry_idx = 0 for extension in json_obj[extendee_name]: entry = BNode() self.add(entry, FHIR.index, Literal(entry_idx)) self.add_val(entry, FHIR.Element.extension, extension, 'extension') self.add(subj, pred, entry) entry_idx += 1 elif 'fhir_comments' in json_obj[extendee_name] and len(json_obj[extendee_name]) == 1: # TODO: determine whether and how fhir comments should be represented in RDF. # for the moment we just drop them print("fhir_comment ignored") print(json_obj[extendee_name]._as_json_dumps()) pass else: self.add_val(subj, FHIR.Element.extension, json_obj[extendee_name], 'extension')
python
def add_extension_val(self, subj: Node, json_obj: Union[JsonObj, List[JsonObjTypes]], key: str, pred: Optional[URIRef] = None) -> None: """ Add any extensions for the supplied object. This can be called in following situations: 1) Single extended value "key" : (value), "_key" : { "extension": [ { "url": "http://...", "value[x]": "......" } ] } 2) Single extension only "_key" : { "extension": [ { "url": "http://...", "value[x]": "......" } ] } 3) Multiple extended values: (TBD) 4) Multiple extensions only "_key" : [ { "extension": [ { "url": "http://...", "value[x]": "......" } ] } ] :param subj: Node containing subject :param json_obj: Object (potentially) containing "_key" :param key: name of element that is possibly extended (as indicated by "_" prefix) :param pred: predicate for the contained elements. Only used in situations 3) (?) and 4 """ extendee_name = "_" + key if extendee_name in json_obj: if not isinstance(subj, BNode): raise NotImplementedError("Extension to something other than a simple BNode") if isinstance(json_obj[extendee_name], list): if not pred: raise NotImplemented("Case 3 not implemented") entry_idx = 0 for extension in json_obj[extendee_name]: entry = BNode() self.add(entry, FHIR.index, Literal(entry_idx)) self.add_val(entry, FHIR.Element.extension, extension, 'extension') self.add(subj, pred, entry) entry_idx += 1 elif 'fhir_comments' in json_obj[extendee_name] and len(json_obj[extendee_name]) == 1: # TODO: determine whether and how fhir comments should be represented in RDF. # for the moment we just drop them print("fhir_comment ignored") print(json_obj[extendee_name]._as_json_dumps()) pass else: self.add_val(subj, FHIR.Element.extension, json_obj[extendee_name], 'extension')
Add any extensions for the supplied object. This can be called in following situations: 1) Single extended value "key" : (value), "_key" : { "extension": [ { "url": "http://...", "value[x]": "......" } ] } 2) Single extension only "_key" : { "extension": [ { "url": "http://...", "value[x]": "......" } ] } 3) Multiple extended values: (TBD) 4) Multiple extensions only "_key" : [ { "extension": [ { "url": "http://...", "value[x]": "......" } ] } ] :param subj: Node containing subject :param json_obj: Object (potentially) containing "_key" :param key: name of element that is possibly extended (as indicated by "_" prefix) :param pred: predicate for the contained elements. Only used in situations 3) (?) and 4
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L294-L362
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/fhirgraphutils.py
link
def link(g: Graph, subject: Node, predicate: URIRef) -> Tuple[Optional[URIRef], Optional[URIRef]]: """ Return the link URI and link type for subject and predicate :param g: graph context :param subject: subject of linke :param predicate: link predicate :return: URI and optional type URI. URI is None if not a link """ link_node = g.value(subject, predicate) if link_node: l = g.value(link_node, FHIR.link) if l: typ = g.value(l, RDF.type) return l, typ return None, None
python
def link(g: Graph, subject: Node, predicate: URIRef) -> Tuple[Optional[URIRef], Optional[URIRef]]: """ Return the link URI and link type for subject and predicate :param g: graph context :param subject: subject of linke :param predicate: link predicate :return: URI and optional type URI. URI is None if not a link """ link_node = g.value(subject, predicate) if link_node: l = g.value(link_node, FHIR.link) if l: typ = g.value(l, RDF.type) return l, typ return None, None
Return the link URI and link type for subject and predicate :param g: graph context :param subject: subject of linke :param predicate: link predicate :return: URI and optional type URI. URI is None if not a link
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/fhirgraphutils.py#L90-L104
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/fhirgraphutils.py
codeable_concept_code
def codeable_concept_code(g: Graph, subject: Node, predicate: URIRef, system: Optional[str]=None) \ -> List[CodeableConcept]: """ Return a list of CodeableConcept entries for the supplied subject and predicate in graph g :param g: graph containing the data :param subject: subject :param predicate: predicate :param system: coding system. If present, only concepts in this system will be returned :return: system, code and optional URI of matching concept(s) """ # EXAMPLE: # fhir:Patient.maritalStatus [ # fhir:CodeableConcept.coding [ # fhir:index 0; # a sct:36629006; # fhir:Coding.system [ fhir:value "http://snomed.info/sct" ]; # fhir:Coding.code [ fhir:value "36629006" ]; # fhir:Coding.display [ fhir:value "Legally married" ] # ], [ # fhir:index 1; # fhir:Coding.system [ fhir:value "http://hl7.org/fhir/v3/MaritalStatus" ]; # fhir:Coding.code [ fhir:value "M" ] # ] # ]; rval = [] coded_entry = g.value(subject, predicate, any=False) if coded_entry: for codeable_concept in list(g.objects(coded_entry, FHIR.CodeableConcept.coding)): coding_system = value(g, codeable_concept, FHIR.Coding.system) coding_code = value(g, codeable_concept, FHIR.Coding.code) if coding_system and coding_code and (system is None or system == coding_system): rval.append(CodeableConcept(coding_system, coding_code, g.value(codeable_concept, RDF.type, any=False))) return rval
python
def codeable_concept_code(g: Graph, subject: Node, predicate: URIRef, system: Optional[str]=None) \ -> List[CodeableConcept]: """ Return a list of CodeableConcept entries for the supplied subject and predicate in graph g :param g: graph containing the data :param subject: subject :param predicate: predicate :param system: coding system. If present, only concepts in this system will be returned :return: system, code and optional URI of matching concept(s) """ # EXAMPLE: # fhir:Patient.maritalStatus [ # fhir:CodeableConcept.coding [ # fhir:index 0; # a sct:36629006; # fhir:Coding.system [ fhir:value "http://snomed.info/sct" ]; # fhir:Coding.code [ fhir:value "36629006" ]; # fhir:Coding.display [ fhir:value "Legally married" ] # ], [ # fhir:index 1; # fhir:Coding.system [ fhir:value "http://hl7.org/fhir/v3/MaritalStatus" ]; # fhir:Coding.code [ fhir:value "M" ] # ] # ]; rval = [] coded_entry = g.value(subject, predicate, any=False) if coded_entry: for codeable_concept in list(g.objects(coded_entry, FHIR.CodeableConcept.coding)): coding_system = value(g, codeable_concept, FHIR.Coding.system) coding_code = value(g, codeable_concept, FHIR.Coding.code) if coding_system and coding_code and (system is None or system == coding_system): rval.append(CodeableConcept(coding_system, coding_code, g.value(codeable_concept, RDF.type, any=False))) return rval
Return a list of CodeableConcept entries for the supplied subject and predicate in graph g :param g: graph containing the data :param subject: subject :param predicate: predicate :param system: coding system. If present, only concepts in this system will be returned :return: system, code and optional URI of matching concept(s)
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/fhirgraphutils.py#L127-L159
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry._to_str
def _to_str(uri: URIRef) -> str: """ Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name """ local_name = str(uri).replace(str(FHIR), '') return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name
python
def _to_str(uri: URIRef) -> str: """ Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name """ local_name = str(uri).replace(str(FHIR), '') return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name
Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference :param uri: URI to convert :return: tag name
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L60-L68
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.predicates
def predicates(self) -> Dict[str, URIRef]: """ Return the tag names and corresponding URI's for all properties that can be associated with subject :return: Map from tag name (JSON object identifier) to corresponding URI """ rval = dict() for parent in self._o.objects(self._subj, RDFS.subClassOf): if isinstance(parent, URIRef) and not str(parent).startswith(str(W5)): rval.update(**FHIRMetaVocEntry(self._o, parent).predicates()) for s in self._o.subjects(RDFS.domain, self._subj): rval[self._to_str(s)] = s return rval
python
def predicates(self) -> Dict[str, URIRef]: """ Return the tag names and corresponding URI's for all properties that can be associated with subject :return: Map from tag name (JSON object identifier) to corresponding URI """ rval = dict() for parent in self._o.objects(self._subj, RDFS.subClassOf): if isinstance(parent, URIRef) and not str(parent).startswith(str(W5)): rval.update(**FHIRMetaVocEntry(self._o, parent).predicates()) for s in self._o.subjects(RDFS.domain, self._subj): rval[self._to_str(s)] = s return rval
Return the tag names and corresponding URI's for all properties that can be associated with subject :return: Map from tag name (JSON object identifier) to corresponding URI
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L70-L81
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.predicate_type
def predicate_type(self, pred: URIRef) -> URIRef: """ Return the type of pred :param pred: predicate to map :return: """ return self._o.value(pred, RDFS.range)
python
def predicate_type(self, pred: URIRef) -> URIRef: """ Return the type of pred :param pred: predicate to map :return: """ return self._o.value(pred, RDFS.range)
Return the type of pred :param pred: predicate to map :return:
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L83-L89
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.is_valid
def is_valid(self, t: URIRef) -> bool: """ Raise an exception if 't' is unrecognized :param t: metadata URI """ if not self.has_type(t): raise TypeError("Unrecognized FHIR type: {}".format(t)) return True
python
def is_valid(self, t: URIRef) -> bool: """ Raise an exception if 't' is unrecognized :param t: metadata URI """ if not self.has_type(t): raise TypeError("Unrecognized FHIR type: {}".format(t)) return True
Raise an exception if 't' is unrecognized :param t: metadata URI
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L94-L101
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.is_primitive
def is_primitive(self, t: URIRef) -> bool: """ Determine whether type "t" is a FHIR primitive type :param t: type to test :return: """ return FHIR.Primitive in self._o.objects(t, RDFS.subClassOf)
python
def is_primitive(self, t: URIRef) -> bool: """ Determine whether type "t" is a FHIR primitive type :param t: type to test :return: """ return FHIR.Primitive in self._o.objects(t, RDFS.subClassOf)
Determine whether type "t" is a FHIR primitive type :param t: type to test :return:
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L103-L109
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.value_predicate_to_type
def value_predicate_to_type(self, value_pred: str) -> URIRef: """ Convert a predicate in the form of "fhir:[...].value[type] to fhir:type, covering the downshift on the first character if necessary :param value_pred: Predicate associated with the value :return: corresponding type or None if not found """ if value_pred.startswith('value'): vp_datatype = value_pred.replace('value', '') if vp_datatype: if self.has_type(FHIR[vp_datatype]): return FHIR[vp_datatype] else: vp_datatype = vp_datatype[0].lower() + vp_datatype[1:] if self.has_type(FHIR[vp_datatype]): return FHIR[vp_datatype] if self.is_valid(FHIR[value_pred]): return FHIR[value_pred]
python
def value_predicate_to_type(self, value_pred: str) -> URIRef: """ Convert a predicate in the form of "fhir:[...].value[type] to fhir:type, covering the downshift on the first character if necessary :param value_pred: Predicate associated with the value :return: corresponding type or None if not found """ if value_pred.startswith('value'): vp_datatype = value_pred.replace('value', '') if vp_datatype: if self.has_type(FHIR[vp_datatype]): return FHIR[vp_datatype] else: vp_datatype = vp_datatype[0].lower() + vp_datatype[1:] if self.has_type(FHIR[vp_datatype]): return FHIR[vp_datatype] if self.is_valid(FHIR[value_pred]): return FHIR[value_pred]
Convert a predicate in the form of "fhir:[...].value[type] to fhir:type, covering the downshift on the first character if necessary :param value_pred: Predicate associated with the value :return: corresponding type or None if not found
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L111-L128
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.is_atom
def is_atom(self, pred: URIRef) -> bool: """ Determine whether predicate is an 'atomic' type -- i.e it doesn't use a FHIR value representation :param pred: type to test :return: """ if not self.has_type(pred): if '.value' in str(pred): # synthetic values (valueString, valueDate, ...) return False else: raise TypeError("Unrecognized FHIR predicate: {}".format(pred)) return pred == FHIR.nodeRole or OWL.DatatypeProperty in set(self._o.objects(pred, RDF.type))
python
def is_atom(self, pred: URIRef) -> bool: """ Determine whether predicate is an 'atomic' type -- i.e it doesn't use a FHIR value representation :param pred: type to test :return: """ if not self.has_type(pred): if '.value' in str(pred): # synthetic values (valueString, valueDate, ...) return False else: raise TypeError("Unrecognized FHIR predicate: {}".format(pred)) return pred == FHIR.nodeRole or OWL.DatatypeProperty in set(self._o.objects(pred, RDF.type))
Determine whether predicate is an 'atomic' type -- i.e it doesn't use a FHIR value representation :param pred: type to test :return:
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L130-L141
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.primitive_datatype
def primitive_datatype(self, t: URIRef) -> Optional[URIRef]: """ Return the data type for primitive type t, if any :param t: type :return: corresponding data type """ for sco in self._o.objects(t, RDFS.subClassOf): sco_type = self._o.value(sco, RDF.type) sco_prop = self._o.value(sco, OWL.onProperty) if sco_type == OWL.Restriction and sco_prop == FHIR.value: # The older versions of fhir.ttl (incorrectly) referenced the datatype directly restriction_type = self._o.value(sco, OWL.allValuesFrom) if not restriction_type: restriction_dt_entry = self._o.value(sco, OWL.someValuesFrom) restriction_type = self._o.value(restriction_dt_entry, OWL.onDatatype) return restriction_type return None
python
def primitive_datatype(self, t: URIRef) -> Optional[URIRef]: """ Return the data type for primitive type t, if any :param t: type :return: corresponding data type """ for sco in self._o.objects(t, RDFS.subClassOf): sco_type = self._o.value(sco, RDF.type) sco_prop = self._o.value(sco, OWL.onProperty) if sco_type == OWL.Restriction and sco_prop == FHIR.value: # The older versions of fhir.ttl (incorrectly) referenced the datatype directly restriction_type = self._o.value(sco, OWL.allValuesFrom) if not restriction_type: restriction_dt_entry = self._o.value(sco, OWL.someValuesFrom) restriction_type = self._o.value(restriction_dt_entry, OWL.onDatatype) return restriction_type return None
Return the data type for primitive type t, if any :param t: type :return: corresponding data type
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L143-L159
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/fhirmetavoc.py
FHIRMetaVocEntry.primitive_datatype_nostring
def primitive_datatype_nostring(self, t: URIRef, v: Optional[str] = None) -> Optional[URIRef]: """ Return the data type for primitive type t, if any, defaulting string to no type :param t: type :param v: value - for munging dates if we're doing FHIR official output :return: corresponding data type """ vt = self.primitive_datatype(t) if self.fhir_dates and vt == XSD.dateTime and v: return XSD.gYear if len(v) == 4 else XSD.gYearMonth if len(v) == 7 \ else XSD.date if (len(v) == 10 or (len(v) > 10 and v[10] in '+-')) else XSD.dateTime # For some reason the oid datatype is represented as a string as well if self.fhir_oids and vt == XSD.anyURI: vt = None return None if vt == XSD.string else vt
python
def primitive_datatype_nostring(self, t: URIRef, v: Optional[str] = None) -> Optional[URIRef]: """ Return the data type for primitive type t, if any, defaulting string to no type :param t: type :param v: value - for munging dates if we're doing FHIR official output :return: corresponding data type """ vt = self.primitive_datatype(t) if self.fhir_dates and vt == XSD.dateTime and v: return XSD.gYear if len(v) == 4 else XSD.gYearMonth if len(v) == 7 \ else XSD.date if (len(v) == 10 or (len(v) > 10 and v[10] in '+-')) else XSD.dateTime # For some reason the oid datatype is represented as a string as well if self.fhir_oids and vt == XSD.anyURI: vt = None return None if vt == XSD.string else vt
Return the data type for primitive type t, if any, defaulting string to no type :param t: type :param v: value - for munging dates if we're doing FHIR official output :return: corresponding data type
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L161-L175
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/picklejar.py
_PickleJar.add
def add(self, name: str, sig: Tuple, obj: object) -> None: """ Add a file to the cache :param name: name of the object to be pickled :param sig: signature for object :param obj: object to pickle """ if self._cache_directory is not None: if name in self._cache: os.remove(os.path.join(self._cache_directory, self._cache[name].loc)) fname = os.path.join(self._cache_directory, str(uuid.uuid4())) with open(fname, 'wb') as f: pickle.dump(obj, f) self._cache[name] = _PickleJar.CacheEntry(sig, fname) self._update()
python
def add(self, name: str, sig: Tuple, obj: object) -> None: """ Add a file to the cache :param name: name of the object to be pickled :param sig: signature for object :param obj: object to pickle """ if self._cache_directory is not None: if name in self._cache: os.remove(os.path.join(self._cache_directory, self._cache[name].loc)) fname = os.path.join(self._cache_directory, str(uuid.uuid4())) with open(fname, 'wb') as f: pickle.dump(obj, f) self._cache[name] = _PickleJar.CacheEntry(sig, fname) self._update()
Add a file to the cache :param name: name of the object to be pickled :param sig: signature for object :param obj: object to pickle
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/picklejar.py#L91-L105
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/picklejar.py
_PickleJar.get
def get(self, name: str, sig: Tuple) -> Optional[object]: """ Return the object representing name if it is cached :param name: name of object :param sig: unique signature of object :return: object if exists and signature matches """ if name not in self._cache: return None if self._cache[name].sig != sig: del self._cache[name] self._update() return None with open(self._cache[name].loc, 'rb') as f: return pickle.load(f)
python
def get(self, name: str, sig: Tuple) -> Optional[object]: """ Return the object representing name if it is cached :param name: name of object :param sig: unique signature of object :return: object if exists and signature matches """ if name not in self._cache: return None if self._cache[name].sig != sig: del self._cache[name] self._update() return None with open(self._cache[name].loc, 'rb') as f: return pickle.load(f)
Return the object representing name if it is cached :param name: name of object :param sig: unique signature of object :return: object if exists and signature matches
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/picklejar.py#L107-L121
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/picklejar.py
_PickleJar.clear
def clear(self) -> None: """ Clear all cache entries for directory and, if it is a 'pure' directory, remove the directory itself """ if self._cache_directory is not None: # Safety - if there isn't a cache directory file, this probably isn't a valid cache assert os.path.exists(self._cache_directory_index), "Attempt to clear a non-existent cache" self._load() # Shouldn't have any impact but... for e in self._cache.values(): if os.path.exists(e.loc): os.remove(e.loc) self._cache.clear() self._update() self._cache = {}
python
def clear(self) -> None: """ Clear all cache entries for directory and, if it is a 'pure' directory, remove the directory itself """ if self._cache_directory is not None: # Safety - if there isn't a cache directory file, this probably isn't a valid cache assert os.path.exists(self._cache_directory_index), "Attempt to clear a non-existent cache" self._load() # Shouldn't have any impact but... for e in self._cache.values(): if os.path.exists(e.loc): os.remove(e.loc) self._cache.clear() self._update() self._cache = {}
Clear all cache entries for directory and, if it is a 'pure' directory, remove the directory itself
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/picklejar.py#L135-L148
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirjsonloader.py
fhir_json_to_rdf
def fhir_json_to_rdf(json_fname: str, base_uri: str = "http://hl7.org/fhir/", target_graph: Optional[Graph] = None, add_ontology_header: bool = True, do_continuations: bool = True, replace_narrative_text: bool = False, metavoc: Optional[Union[Graph, FHIRMetaVoc]] = None) -> Graph: """ Convert a FHIR JSON resource image to RDF :param json_fname: Name or URI of the file to convert :param base_uri: Base URI to use for relative references. :param target_graph: If supplied, add RDF to this graph. If not, start with an empty graph. :param add_ontology_header: True means add owl:Ontology declaration to output :param do_continuations: True means follow continuation records on bundles and queries :param replace_narrative_text: True means replace any narrative text longer than 120 characters with '<div xmlns="http://www.w3.org/1999/xhtml">(removed)</div>' :param metavoc: FHIR Metadata Vocabulary (fhir.ttl) graph :return: resulting graph """ def check_for_continuation(data_: JsonObj) -> Optional[str]: if do_continuations and 'link' in data_ and isinstance(data_.link, list): for link_e in data_.link: if 'relation' in link_e and link_e.relation == 'next': return link_e.url return None if target_graph is None: target_graph = Graph() if metavoc is None: metavoc = FHIRMetaVoc().g elif isinstance(metavoc, FHIRMetaVoc): metavoc = metavoc.g page_fname = json_fname while page_fname: data = load(page_fname) if 'resourceType' in data and data.resourceType != 'Bundle': FHIRResource(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header, replace_narrative_text=replace_narrative_text) page_fname = check_for_continuation(data) elif 'entry' in data and isinstance(data.entry, list) and 'resource' in data.entry[0]: FHIRCollection(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header if 'resourceType' in data else False, replace_narrative_text=replace_narrative_text) page_fname = check_for_continuation(data) else: page_fname = None target_graph = None return target_graph
python
def fhir_json_to_rdf(json_fname: str, base_uri: str = "http://hl7.org/fhir/", target_graph: Optional[Graph] = None, add_ontology_header: bool = True, do_continuations: bool = True, replace_narrative_text: bool = False, metavoc: Optional[Union[Graph, FHIRMetaVoc]] = None) -> Graph: """ Convert a FHIR JSON resource image to RDF :param json_fname: Name or URI of the file to convert :param base_uri: Base URI to use for relative references. :param target_graph: If supplied, add RDF to this graph. If not, start with an empty graph. :param add_ontology_header: True means add owl:Ontology declaration to output :param do_continuations: True means follow continuation records on bundles and queries :param replace_narrative_text: True means replace any narrative text longer than 120 characters with '<div xmlns="http://www.w3.org/1999/xhtml">(removed)</div>' :param metavoc: FHIR Metadata Vocabulary (fhir.ttl) graph :return: resulting graph """ def check_for_continuation(data_: JsonObj) -> Optional[str]: if do_continuations and 'link' in data_ and isinstance(data_.link, list): for link_e in data_.link: if 'relation' in link_e and link_e.relation == 'next': return link_e.url return None if target_graph is None: target_graph = Graph() if metavoc is None: metavoc = FHIRMetaVoc().g elif isinstance(metavoc, FHIRMetaVoc): metavoc = metavoc.g page_fname = json_fname while page_fname: data = load(page_fname) if 'resourceType' in data and data.resourceType != 'Bundle': FHIRResource(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header, replace_narrative_text=replace_narrative_text) page_fname = check_for_continuation(data) elif 'entry' in data and isinstance(data.entry, list) and 'resource' in data.entry[0]: FHIRCollection(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header if 'resourceType' in data else False, replace_narrative_text=replace_narrative_text) page_fname = check_for_continuation(data) else: page_fname = None target_graph = None return target_graph
Convert a FHIR JSON resource image to RDF :param json_fname: Name or URI of the file to convert :param base_uri: Base URI to use for relative references. :param target_graph: If supplied, add RDF to this graph. If not, start with an empty graph. :param add_ontology_header: True means add owl:Ontology declaration to output :param do_continuations: True means follow continuation records on bundles and queries :param replace_narrative_text: True means replace any narrative text longer than 120 characters with '<div xmlns="http://www.w3.org/1999/xhtml">(removed)</div>' :param metavoc: FHIR Metadata Vocabulary (fhir.ttl) graph :return: resulting graph
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirjsonloader.py#L38-L88
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdfcompare.py
subj_pred_idx_to_uri
def subj_pred_idx_to_uri(s: URIRef, p: URIRef, idx: Optional[int] = None) -> URIRef: """ Convert FHIR subject, predicate and entry index into a URI. The resulting element can be substituted for the name of the target BNODE :param s: Subject URI (e.g. "fhir:Patient/f201", "fhir:Patient/f201.Patient.identifier_0", ...) :param p: Predicate URI (e.g. "fhir:Patient.identifier", "fhir.Identifier.use :param idx: Relative position of BNODE if in a list :return: URI that can replace the BNODE (e.g. "fhir:Patient/f201 """ return URIRef(str(s) + '.' + str(p).rsplit('/', 1)[1] + ("_{}".format(idx) if idx is not None else ''))
python
def subj_pred_idx_to_uri(s: URIRef, p: URIRef, idx: Optional[int] = None) -> URIRef: """ Convert FHIR subject, predicate and entry index into a URI. The resulting element can be substituted for the name of the target BNODE :param s: Subject URI (e.g. "fhir:Patient/f201", "fhir:Patient/f201.Patient.identifier_0", ...) :param p: Predicate URI (e.g. "fhir:Patient.identifier", "fhir.Identifier.use :param idx: Relative position of BNODE if in a list :return: URI that can replace the BNODE (e.g. "fhir:Patient/f201 """ return URIRef(str(s) + '.' + str(p).rsplit('/', 1)[1] + ("_{}".format(idx) if idx is not None else ''))
Convert FHIR subject, predicate and entry index into a URI. The resulting element can be substituted for the name of the target BNODE :param s: Subject URI (e.g. "fhir:Patient/f201", "fhir:Patient/f201.Patient.identifier_0", ...) :param p: Predicate URI (e.g. "fhir:Patient.identifier", "fhir.Identifier.use :param idx: Relative position of BNODE if in a list :return: URI that can replace the BNODE (e.g. "fhir:Patient/f201
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L38-L46
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdfcompare.py
map_node
def map_node(s: Node, sk_s: URIRef, gin: Graph, gout: Graph) -> None: """ Transform the BNode whose subject is s into its equivalent, replacing s with its 'skolemized' equivalent :param s: Actual subject :param sk_s: Equivalent URI of subject in output graph :param gin: Input graph :param gout: Output graph """ for p, o in gin.predicate_objects(s): if not isinstance(o, BNode): gout.add((sk_s, p, o)) else: sk_o = subj_pred_idx_to_uri(sk_s, p, gin.value(o, FHIR.index)) gout.add((sk_s, p, sk_o)) map_node(o, sk_o, gin, gout)
python
def map_node(s: Node, sk_s: URIRef, gin: Graph, gout: Graph) -> None: """ Transform the BNode whose subject is s into its equivalent, replacing s with its 'skolemized' equivalent :param s: Actual subject :param sk_s: Equivalent URI of subject in output graph :param gin: Input graph :param gout: Output graph """ for p, o in gin.predicate_objects(s): if not isinstance(o, BNode): gout.add((sk_s, p, o)) else: sk_o = subj_pred_idx_to_uri(sk_s, p, gin.value(o, FHIR.index)) gout.add((sk_s, p, sk_o)) map_node(o, sk_o, gin, gout)
Transform the BNode whose subject is s into its equivalent, replacing s with its 'skolemized' equivalent :param s: Actual subject :param sk_s: Equivalent URI of subject in output graph :param gin: Input graph :param gout: Output graph
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L49-L63
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdfcompare.py
skolemize
def skolemize(gin: Graph) -> Graph: """ Replace all of the blank nodes in graph gin with FHIR paths :param gin: input graph :return: output graph """ gout = Graph() # Emit any unreferenced subject BNodes (boxes) anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0] if anon_subjs: idx = None if len(anon_subjs) == 1 else 0 for s in anon_subjs: map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout) if idx is not None: idx += 1 # Cover all other non-bnode entries for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)): map_node(subj, subj, gin, gout) return gout
python
def skolemize(gin: Graph) -> Graph: """ Replace all of the blank nodes in graph gin with FHIR paths :param gin: input graph :return: output graph """ gout = Graph() # Emit any unreferenced subject BNodes (boxes) anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0] if anon_subjs: idx = None if len(anon_subjs) == 1 else 0 for s in anon_subjs: map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout) if idx is not None: idx += 1 # Cover all other non-bnode entries for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)): map_node(subj, subj, gin, gout) return gout
Replace all of the blank nodes in graph gin with FHIR paths :param gin: input graph :return: output graph
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L66-L86
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdfcompare.py
complete_definition
def complete_definition(subj: Node, source_graph: Graph, target_graph: Optional[Graph]=None) -> PrettyGraph: """ Return the transitive closure of subject. :param subj: URI or BNode for subject :param source_graph: Graph containing defininition :param target_graph: return graph (for recursion) :return: target_graph """ if target_graph is None: target_graph = PrettyGraph() for p, o in source_graph.predicate_objects(subj): target_graph.add((subj, p, o)) if isinstance(o, BNode): complete_definition(o, source_graph, target_graph) return target_graph
python
def complete_definition(subj: Node, source_graph: Graph, target_graph: Optional[Graph]=None) -> PrettyGraph: """ Return the transitive closure of subject. :param subj: URI or BNode for subject :param source_graph: Graph containing defininition :param target_graph: return graph (for recursion) :return: target_graph """ if target_graph is None: target_graph = PrettyGraph() for p, o in source_graph.predicate_objects(subj): target_graph.add((subj, p, o)) if isinstance(o, BNode): complete_definition(o, source_graph, target_graph) return target_graph
Return the transitive closure of subject. :param subj: URI or BNode for subject :param source_graph: Graph containing defininition :param target_graph: return graph (for recursion) :return: target_graph
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L89-L105
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdfcompare.py
dump_nt_sorted
def dump_nt_sorted(g: Graph) -> List[str]: """ Dump graph g in a sorted n3 format :param g: graph to dump :return: stringified representation of g """ return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l]
python
def dump_nt_sorted(g: Graph) -> List[str]: """ Dump graph g in a sorted n3 format :param g: graph to dump :return: stringified representation of g """ return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l]
Dump graph g in a sorted n3 format :param g: graph to dump :return: stringified representation of g
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L108-L114
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/rdfcompare.py
rdf_compare
def rdf_compare(g1: Graph, g2: Graph, ignore_owl_version: bool=False, ignore_type_arcs: bool = False, compare_filter: Optional[Callable[[Graph, Graph, Graph], None]]=None) -> str: """ Compare graph g1 and g2 :param g1: first graph :param g2: second graph :param ignore_owl_version: :param ignore_type_arcs: :param compare_filter: Final adjustment for graph difference. Used, for example, to deal with FHIR decimal problems. :return: List of differences as printable lines or blank if everything matches """ def graph_for_subject(g: Graph, subj: Node) -> Graph: subj_in_g = complete_definition(subj, g) if ignore_type_arcs: for ta_s, ta_o in subj_in_g.subject_objects(RDF.type): if isinstance(ta_s, BNode) and isinstance(ta_o, URIRef): subj_in_g.remove((ta_s, RDF.type, ta_o)) if ignore_owl_version: subj_in_g.remove((subj, OWL.versionIRI, subj_in_g.value(subj, OWL.versionIRI))) return subj_in_g def primary_subjects(g: Graph) -> Set[Node]: anon_subjs = set(anon_s for anon_s in g.subjects() if isinstance(anon_s, BNode) and len([g.subject_predicates(anon_s)]) == 0) return set(s_ for s_ in g1.subjects() if isinstance(s_, URIRef)).union(anon_subjs) rval = "" # Step 1: Find any subjects in one graph that don't exist in the other g1_subjs = primary_subjects(g1) g2_subjs = primary_subjects(g2) for s in g1_subjs - g2_subjs: rval += "\n===== Subjects in Graph 1 but not Graph 2: " rval += PrettyGraph.strip_prefixes(complete_definition(s, g1)) for s in g2_subjs - g1_subjs: rval += "\n===== Subjects in Graph 2 but not Graph 1: " rval += PrettyGraph.strip_prefixes(complete_definition(s, g2)) # Step 2: Iterate over all of the remaining subjects comparing their contents for s in g1_subjs.intersection(g2_subjs): s_in_g1 = graph_for_subject(g1, s) s_in_g2 = graph_for_subject(g2, s) in_both, in_first, in_second = graph_diff(skolemize(s_in_g1), skolemize(s_in_g2)) if compare_filter: compare_filter(in_both, in_first, in_second) if len(list(in_first)) or len(list(in_second)): rval += "\n\nSubject {} DIFFERENCE: ".format(s) + '=' * 30 if len(in_first): rval += "\n\t----> First: \n" + '\n'.join(dump_nt_sorted(in_first)) if len(in_second): rval += "\n\t----> Second: \n" + '\n'.join(dump_nt_sorted(in_second)) rval += '-' * 40 return rval
python
def rdf_compare(g1: Graph, g2: Graph, ignore_owl_version: bool=False, ignore_type_arcs: bool = False, compare_filter: Optional[Callable[[Graph, Graph, Graph], None]]=None) -> str: """ Compare graph g1 and g2 :param g1: first graph :param g2: second graph :param ignore_owl_version: :param ignore_type_arcs: :param compare_filter: Final adjustment for graph difference. Used, for example, to deal with FHIR decimal problems. :return: List of differences as printable lines or blank if everything matches """ def graph_for_subject(g: Graph, subj: Node) -> Graph: subj_in_g = complete_definition(subj, g) if ignore_type_arcs: for ta_s, ta_o in subj_in_g.subject_objects(RDF.type): if isinstance(ta_s, BNode) and isinstance(ta_o, URIRef): subj_in_g.remove((ta_s, RDF.type, ta_o)) if ignore_owl_version: subj_in_g.remove((subj, OWL.versionIRI, subj_in_g.value(subj, OWL.versionIRI))) return subj_in_g def primary_subjects(g: Graph) -> Set[Node]: anon_subjs = set(anon_s for anon_s in g.subjects() if isinstance(anon_s, BNode) and len([g.subject_predicates(anon_s)]) == 0) return set(s_ for s_ in g1.subjects() if isinstance(s_, URIRef)).union(anon_subjs) rval = "" # Step 1: Find any subjects in one graph that don't exist in the other g1_subjs = primary_subjects(g1) g2_subjs = primary_subjects(g2) for s in g1_subjs - g2_subjs: rval += "\n===== Subjects in Graph 1 but not Graph 2: " rval += PrettyGraph.strip_prefixes(complete_definition(s, g1)) for s in g2_subjs - g1_subjs: rval += "\n===== Subjects in Graph 2 but not Graph 1: " rval += PrettyGraph.strip_prefixes(complete_definition(s, g2)) # Step 2: Iterate over all of the remaining subjects comparing their contents for s in g1_subjs.intersection(g2_subjs): s_in_g1 = graph_for_subject(g1, s) s_in_g2 = graph_for_subject(g2, s) in_both, in_first, in_second = graph_diff(skolemize(s_in_g1), skolemize(s_in_g2)) if compare_filter: compare_filter(in_both, in_first, in_second) if len(list(in_first)) or len(list(in_second)): rval += "\n\nSubject {} DIFFERENCE: ".format(s) + '=' * 30 if len(in_first): rval += "\n\t----> First: \n" + '\n'.join(dump_nt_sorted(in_first)) if len(in_second): rval += "\n\t----> Second: \n" + '\n'.join(dump_nt_sorted(in_second)) rval += '-' * 40 return rval
Compare graph g1 and g2 :param g1: first graph :param g2: second graph :param ignore_owl_version: :param ignore_type_arcs: :param compare_filter: Final adjustment for graph difference. Used, for example, to deal with FHIR decimal problems. :return: List of differences as printable lines or blank if everything matches
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/rdfcompare.py#L117-L169
BD2KOnFHIR/fhirtordf
fhirtordf/fhirtordf.py
proc_file
def proc_file(infile: str, outfile: str, opts: Namespace) -> bool: """ Process infile. :param infile: input file to be processed :param outfile: target output file. :param opts: :return: """ g = fhir_json_to_rdf(infile, opts.uribase, opts.graph, add_ontology_header=not opts.noontology, do_continuations=not opts.nocontinuation, replace_narrative_text=bool(opts.nonarrative), metavoc=opts.fhir_metavoc) # If we aren't carrying graph in opts, we're doing a file by file transformation if g: if not opts.graph: serialize_graph(g, outfile, opts) return True else: print("{} : Not a FHIR collection or resource".format(infile)) return False
python
def proc_file(infile: str, outfile: str, opts: Namespace) -> bool: """ Process infile. :param infile: input file to be processed :param outfile: target output file. :param opts: :return: """ g = fhir_json_to_rdf(infile, opts.uribase, opts.graph, add_ontology_header=not opts.noontology, do_continuations=not opts.nocontinuation, replace_narrative_text=bool(opts.nonarrative), metavoc=opts.fhir_metavoc) # If we aren't carrying graph in opts, we're doing a file by file transformation if g: if not opts.graph: serialize_graph(g, outfile, opts) return True else: print("{} : Not a FHIR collection or resource".format(infile)) return False
Process infile. :param infile: input file to be processed :param outfile: target output file. :param opts: :return:
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhirtordf.py#L76-L95
BD2KOnFHIR/fhirtordf
fhirtordf/fhirtordf.py
file_filter
def file_filter(ifn: str, indir: str, opts: Namespace) -> bool: """ Determine whether to process ifn. We con't process: 1) Anything in a directory having a path element that begins with "_" 2) Really, really big files 3) Temporary lists of know errors :param ifn: input file name :param indir: input directory :param opts: argparse options :return: True if to be processed, false if to be skipped """ # If it looks like we're processing a URL as an input file, skip the suffix check if '://' in ifn: return True if not ifn.endswith('.json'): return False if indir and (indir.startswith("_") or "/_" in indir or any(dn in indir for dn in opts.skipdirs)): return False if opts.skipfns and any(sfn in ifn for sfn in opts.skipfns): return False infile = os.path.join(indir, ifn) if not opts.infile and opts.maxsize and os.path.getsize(infile) > (opts.maxsize * 1000): return False return True
python
def file_filter(ifn: str, indir: str, opts: Namespace) -> bool: """ Determine whether to process ifn. We con't process: 1) Anything in a directory having a path element that begins with "_" 2) Really, really big files 3) Temporary lists of know errors :param ifn: input file name :param indir: input directory :param opts: argparse options :return: True if to be processed, false if to be skipped """ # If it looks like we're processing a URL as an input file, skip the suffix check if '://' in ifn: return True if not ifn.endswith('.json'): return False if indir and (indir.startswith("_") or "/_" in indir or any(dn in indir for dn in opts.skipdirs)): return False if opts.skipfns and any(sfn in ifn for sfn in opts.skipfns): return False infile = os.path.join(indir, ifn) if not opts.infile and opts.maxsize and os.path.getsize(infile) > (opts.maxsize * 1000): return False return True
Determine whether to process ifn. We con't process: 1) Anything in a directory having a path element that begins with "_" 2) Really, really big files 3) Temporary lists of know errors :param ifn: input file name :param indir: input directory :param opts: argparse options :return: True if to be processed, false if to be skipped
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhirtordf.py#L105-L133
BD2KOnFHIR/fhirtordf
fhirtordf/fhirtordf.py
fhirtordf
def fhirtordf(argv: List[str], default_exit: bool = True) -> bool: """ Entry point for command line utility """ dlp = dirlistproc.DirectoryListProcessor(argv, description="Convert FHIR JSON into RDF", infile_suffix=".json", outfile_suffix=".ttl", addargs=addargs, noexit=not default_exit) if not dlp.successful_parse: return False # Version if dlp.opts.version: print("FHIR to RDF Conversion Tool -- Version {}".format(__version__)) # We either have to have an input file or an input directory if not dlp.opts.infile and not dlp.opts.indir: if not dlp.opts.version: dlp.parser.error("Either an input file or an input directory must be supplied") return dlp.opts.version # Create the output directory if needed if dlp.opts.outdir and not os.path.exists(dlp.opts.outdir): os.makedirs(dlp.opts.outdir) # If we are going to a single output file or stdout, gather all the input dlp.opts.graph = Graph() if (not dlp.opts.outfile and not dlp.opts.outdir) or\ (dlp.opts.outfile and len(dlp.opts.outfile) == 1) else None dlp.opts.fhir_metavoc = load_fhir_ontology(dlp.opts) # If it looks like we're processing a URL as an input file, skip the suffix check if dlp.opts.infile and len(dlp.opts.infile) == 1 and not dlp.opts.indir and "://" in dlp.opts.infile[0]: dlp.infile_suffix = "" dlp.outfile_suffix = '.' + suffix_for(dlp.opts.format) nfiles, nsuccess = dlp.run(proc=proc_file, file_filter_2=file_filter) if nfiles: if dlp.opts.graph: serialize_graph(dlp.opts.graph, dlp.opts.outfile[0] if dlp.opts.outfile else None, dlp.opts) return nsuccess > 0 return False
python
def fhirtordf(argv: List[str], default_exit: bool = True) -> bool: """ Entry point for command line utility """ dlp = dirlistproc.DirectoryListProcessor(argv, description="Convert FHIR JSON into RDF", infile_suffix=".json", outfile_suffix=".ttl", addargs=addargs, noexit=not default_exit) if not dlp.successful_parse: return False # Version if dlp.opts.version: print("FHIR to RDF Conversion Tool -- Version {}".format(__version__)) # We either have to have an input file or an input directory if not dlp.opts.infile and not dlp.opts.indir: if not dlp.opts.version: dlp.parser.error("Either an input file or an input directory must be supplied") return dlp.opts.version # Create the output directory if needed if dlp.opts.outdir and not os.path.exists(dlp.opts.outdir): os.makedirs(dlp.opts.outdir) # If we are going to a single output file or stdout, gather all the input dlp.opts.graph = Graph() if (not dlp.opts.outfile and not dlp.opts.outdir) or\ (dlp.opts.outfile and len(dlp.opts.outfile) == 1) else None dlp.opts.fhir_metavoc = load_fhir_ontology(dlp.opts) # If it looks like we're processing a URL as an input file, skip the suffix check if dlp.opts.infile and len(dlp.opts.infile) == 1 and not dlp.opts.indir and "://" in dlp.opts.infile[0]: dlp.infile_suffix = "" dlp.outfile_suffix = '.' + suffix_for(dlp.opts.format) nfiles, nsuccess = dlp.run(proc=proc_file, file_filter_2=file_filter) if nfiles: if dlp.opts.graph: serialize_graph(dlp.opts.graph, dlp.opts.outfile[0] if dlp.opts.outfile else None, dlp.opts) return nsuccess > 0 return False
Entry point for command line utility
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhirtordf.py#L162-L202
BD2KOnFHIR/fhirtordf
fhirtordf/rdfsupport/uriutils.py
parse_fhir_resource_uri
def parse_fhir_resource_uri(uri: Union[URIRef, str]) -> FHIR_RESOURCE: """ Use the FHIR Regular Expression for Resource URI's to determine the namespace and type of a given URI. As an example, "http://hl7.org/fhir/Patient/p123" maps to the tuple ``('Patient', 'http://hl7.org/fhir') :param uri: URI to parse :return: FHIR_RESOURCE (namespace, type, resource) """ uri_str = str(uri) m = FHIR_RESOURCE_RE.match(uri_str) if m: return FHIR_RESOURCE(URIRef(m.group(FHIR_RE_BASE)), FHIR[m.group(FHIR_RE_RESOURCE)], m.group(FHIR_RE_ID)) else: # Not in the FHIR format - we can only do namespace and name namespace, name = uri_str.rsplit('#', 1) if '#' in uri_str \ else uri_str.rsplit('/', 1) if '/' in uri_str else (None, uri_str) return FHIR_RESOURCE(URIRef(namespace), None, name)
python
def parse_fhir_resource_uri(uri: Union[URIRef, str]) -> FHIR_RESOURCE: """ Use the FHIR Regular Expression for Resource URI's to determine the namespace and type of a given URI. As an example, "http://hl7.org/fhir/Patient/p123" maps to the tuple ``('Patient', 'http://hl7.org/fhir') :param uri: URI to parse :return: FHIR_RESOURCE (namespace, type, resource) """ uri_str = str(uri) m = FHIR_RESOURCE_RE.match(uri_str) if m: return FHIR_RESOURCE(URIRef(m.group(FHIR_RE_BASE)), FHIR[m.group(FHIR_RE_RESOURCE)], m.group(FHIR_RE_ID)) else: # Not in the FHIR format - we can only do namespace and name namespace, name = uri_str.rsplit('#', 1) if '#' in uri_str \ else uri_str.rsplit('/', 1) if '/' in uri_str else (None, uri_str) return FHIR_RESOURCE(URIRef(namespace), None, name)
Use the FHIR Regular Expression for Resource URI's to determine the namespace and type of a given URI. As an example, "http://hl7.org/fhir/Patient/p123" maps to the tuple ``('Patient', 'http://hl7.org/fhir') :param uri: URI to parse :return: FHIR_RESOURCE (namespace, type, resource)
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/rdfsupport/uriutils.py#L42-L60
astropy/astropy-helpers
astropy_helpers/distutils_helpers.py
get_dummy_distribution
def get_dummy_distribution(): """ Returns a distutils Distribution object used to instrument the setup environment before calling the actual setup() function. """ from .setup_helpers import _module_state if _module_state['registered_commands'] is None: raise RuntimeError( 'astropy_helpers.setup_helpers.register_commands() must be ' 'called before using ' 'astropy_helpers.setup_helpers.get_dummy_distribution()') # Pre-parse the Distutils command-line options and config files to if # the option is set. dist = Distribution({'script_name': os.path.basename(sys.argv[0]), 'script_args': sys.argv[1:]}) dist.cmdclass.update(_module_state['registered_commands']) with silence(): try: dist.parse_config_files() dist.parse_command_line() except (DistutilsError, AttributeError, SystemExit): # Let distutils handle DistutilsErrors itself AttributeErrors can # get raise for ./setup.py --help SystemExit can be raised if a # display option was used, for example pass return dist
python
def get_dummy_distribution(): """ Returns a distutils Distribution object used to instrument the setup environment before calling the actual setup() function. """ from .setup_helpers import _module_state if _module_state['registered_commands'] is None: raise RuntimeError( 'astropy_helpers.setup_helpers.register_commands() must be ' 'called before using ' 'astropy_helpers.setup_helpers.get_dummy_distribution()') # Pre-parse the Distutils command-line options and config files to if # the option is set. dist = Distribution({'script_name': os.path.basename(sys.argv[0]), 'script_args': sys.argv[1:]}) dist.cmdclass.update(_module_state['registered_commands']) with silence(): try: dist.parse_config_files() dist.parse_command_line() except (DistutilsError, AttributeError, SystemExit): # Let distutils handle DistutilsErrors itself AttributeErrors can # get raise for ./setup.py --help SystemExit can be raised if a # display option was used, for example pass return dist
Returns a distutils Distribution object used to instrument the setup environment before calling the actual setup() function.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/distutils_helpers.py#L23-L53
astropy/astropy-helpers
astropy_helpers/distutils_helpers.py
get_distutils_option
def get_distutils_option(option, commands): """ Returns the value of the given distutils option. Parameters ---------- option : str The name of the option commands : list of str The list of commands on which this option is available Returns ------- val : str or None the value of the given distutils option. If the option is not set, returns None. """ dist = get_dummy_distribution() for cmd in commands: cmd_opts = dist.command_options.get(cmd) if cmd_opts is not None and option in cmd_opts: return cmd_opts[option][1] else: return None
python
def get_distutils_option(option, commands): """ Returns the value of the given distutils option. Parameters ---------- option : str The name of the option commands : list of str The list of commands on which this option is available Returns ------- val : str or None the value of the given distutils option. If the option is not set, returns None. """ dist = get_dummy_distribution() for cmd in commands: cmd_opts = dist.command_options.get(cmd) if cmd_opts is not None and option in cmd_opts: return cmd_opts[option][1] else: return None
Returns the value of the given distutils option. Parameters ---------- option : str The name of the option commands : list of str The list of commands on which this option is available Returns ------- val : str or None the value of the given distutils option. If the option is not set, returns None.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/distutils_helpers.py#L62-L87
astropy/astropy-helpers
astropy_helpers/distutils_helpers.py
add_command_option
def add_command_option(command, name, doc, is_bool=False): """ Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value. """ dist = get_dummy_distribution() cmdcls = dist.get_command_class(command) if (hasattr(cmdcls, '_astropy_helpers_options') and name in cmdcls._astropy_helpers_options): return attr = name.replace('-', '_') if hasattr(cmdcls, attr): raise RuntimeError( '{0!r} already has a {1!r} class attribute, barring {2!r} from ' 'being usable as a custom option name.'.format(cmdcls, attr, name)) for idx, cmd in enumerate(cmdcls.user_options): if cmd[0] == name: log.warn('Overriding existing {0!r} option ' '{1!r}'.format(command, name)) del cmdcls.user_options[idx] if name in cmdcls.boolean_options: cmdcls.boolean_options.remove(name) break cmdcls.user_options.append((name, None, doc)) if is_bool: cmdcls.boolean_options.append(name) # Distutils' command parsing requires that a command object have an # attribute with the same name as the option (with '-' replaced with '_') # in order for that option to be recognized as valid setattr(cmdcls, attr, None) # This caches the options added through add_command_option so that if it is # run multiple times in the same interpreter repeated adds are ignored # (this way we can still raise a RuntimeError if a custom option overrides # a built-in option) if not hasattr(cmdcls, '_astropy_helpers_options'): cmdcls._astropy_helpers_options = set([name]) else: cmdcls._astropy_helpers_options.add(name)
python
def add_command_option(command, name, doc, is_bool=False): """ Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value. """ dist = get_dummy_distribution() cmdcls = dist.get_command_class(command) if (hasattr(cmdcls, '_astropy_helpers_options') and name in cmdcls._astropy_helpers_options): return attr = name.replace('-', '_') if hasattr(cmdcls, attr): raise RuntimeError( '{0!r} already has a {1!r} class attribute, barring {2!r} from ' 'being usable as a custom option name.'.format(cmdcls, attr, name)) for idx, cmd in enumerate(cmdcls.user_options): if cmd[0] == name: log.warn('Overriding existing {0!r} option ' '{1!r}'.format(command, name)) del cmdcls.user_options[idx] if name in cmdcls.boolean_options: cmdcls.boolean_options.remove(name) break cmdcls.user_options.append((name, None, doc)) if is_bool: cmdcls.boolean_options.append(name) # Distutils' command parsing requires that a command object have an # attribute with the same name as the option (with '-' replaced with '_') # in order for that option to be recognized as valid setattr(cmdcls, attr, None) # This caches the options added through add_command_option so that if it is # run multiple times in the same interpreter repeated adds are ignored # (this way we can still raise a RuntimeError if a custom option overrides # a built-in option) if not hasattr(cmdcls, '_astropy_helpers_options'): cmdcls._astropy_helpers_options = set([name]) else: cmdcls._astropy_helpers_options.add(name)
Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/distutils_helpers.py#L161-L223
astropy/astropy-helpers
astropy_helpers/distutils_helpers.py
get_distutils_display_options
def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands))
python
def get_distutils_display_options(): """ Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- """ short_display_opts = set('-' + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set('--' + o[0] for o in Distribution.display_options) # Include -h and --help which are not explicitly listed in # Distribution.display_options (as they are handled by optparse) short_display_opts.add('-h') long_display_opts.add('--help') # This isn't the greatest approach to hardcode these commands. # However, there doesn't seem to be a good way to determine # whether build *will be* run as part of the command at this # phase. display_commands = set([ 'clean', 'register', 'setopt', 'saveopts', 'egg_info', 'alias']) return short_display_opts.union(long_display_opts.union(display_commands))
Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or --
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/distutils_helpers.py#L226-L254
astropy/astropy-helpers
astropy_helpers/commands/build_sphinx.py
ensure_sphinx_astropy_installed
def ensure_sphinx_astropy_installed(): """ Make sure that sphinx-astropy is available, installing it temporarily if not. This returns the available version of sphinx-astropy as well as any paths that should be added to sys.path for sphinx-astropy to be available. """ # We've split out the Sphinx part of astropy-helpers into sphinx-astropy # but we want it to be auto-installed seamlessly for anyone using # build_docs. We check if it's already installed, and if not, we install # it to a local .eggs directory and add the eggs to the path (these # have to each be added to the path, we can't add them by simply adding # .eggs to the path) sys_path_inserts = [] sphinx_astropy_version = None try: from sphinx_astropy import __version__ as sphinx_astropy_version # noqa except ImportError: from setuptools import Distribution dist = Distribution() # Numpydoc 0.9.0 requires sphinx 1.6+, we can limit the version here # until we also makes our minimum required version Sphinx 1.6 if SPHINX_LT_16: dist.fetch_build_eggs('numpydoc<0.9') # This egg build doesn't respect python_requires, not aware of # pre-releases. We know that mpl 3.1+ requires Python 3.6+, so this # ugly workaround takes care of it until there is a solution for # https://github.com/astropy/astropy-helpers/issues/462 if LooseVersion(sys.version) < LooseVersion('3.6'): dist.fetch_build_eggs('matplotlib<3.1') eggs = dist.fetch_build_eggs('sphinx-astropy') # Find out the version of sphinx-astropy if possible. For some old # setuptools version, eggs will be None even if sphinx-astropy was # successfully installed. if eggs is not None: for egg in eggs: if egg.project_name == 'sphinx-astropy': sphinx_astropy_version = egg.parsed_version.public break eggs_path = os.path.abspath('.eggs') for egg in glob.glob(os.path.join(eggs_path, '*.egg')): sys_path_inserts.append(egg) return sphinx_astropy_version, sys_path_inserts
python
def ensure_sphinx_astropy_installed(): """ Make sure that sphinx-astropy is available, installing it temporarily if not. This returns the available version of sphinx-astropy as well as any paths that should be added to sys.path for sphinx-astropy to be available. """ # We've split out the Sphinx part of astropy-helpers into sphinx-astropy # but we want it to be auto-installed seamlessly for anyone using # build_docs. We check if it's already installed, and if not, we install # it to a local .eggs directory and add the eggs to the path (these # have to each be added to the path, we can't add them by simply adding # .eggs to the path) sys_path_inserts = [] sphinx_astropy_version = None try: from sphinx_astropy import __version__ as sphinx_astropy_version # noqa except ImportError: from setuptools import Distribution dist = Distribution() # Numpydoc 0.9.0 requires sphinx 1.6+, we can limit the version here # until we also makes our minimum required version Sphinx 1.6 if SPHINX_LT_16: dist.fetch_build_eggs('numpydoc<0.9') # This egg build doesn't respect python_requires, not aware of # pre-releases. We know that mpl 3.1+ requires Python 3.6+, so this # ugly workaround takes care of it until there is a solution for # https://github.com/astropy/astropy-helpers/issues/462 if LooseVersion(sys.version) < LooseVersion('3.6'): dist.fetch_build_eggs('matplotlib<3.1') eggs = dist.fetch_build_eggs('sphinx-astropy') # Find out the version of sphinx-astropy if possible. For some old # setuptools version, eggs will be None even if sphinx-astropy was # successfully installed. if eggs is not None: for egg in eggs: if egg.project_name == 'sphinx-astropy': sphinx_astropy_version = egg.parsed_version.public break eggs_path = os.path.abspath('.eggs') for egg in glob.glob(os.path.join(eggs_path, '*.egg')): sys_path_inserts.append(egg) return sphinx_astropy_version, sys_path_inserts
Make sure that sphinx-astropy is available, installing it temporarily if not. This returns the available version of sphinx-astropy as well as any paths that should be added to sys.path for sphinx-astropy to be available.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/commands/build_sphinx.py#L40-L89
astropy/astropy-helpers
astropy_helpers/utils.py
_get_platlib_dir
def _get_platlib_dir(cmd): """ Given a build command, return the name of the appropriate platform-specific build subdirectory directory (e.g. build/lib.linux-x86_64-2.7) """ plat_specifier = '.{0}-{1}'.format(cmd.plat_name, sys.version[0:3]) return os.path.join(cmd.build_base, 'lib' + plat_specifier)
python
def _get_platlib_dir(cmd): """ Given a build command, return the name of the appropriate platform-specific build subdirectory directory (e.g. build/lib.linux-x86_64-2.7) """ plat_specifier = '.{0}-{1}'.format(cmd.plat_name, sys.version[0:3]) return os.path.join(cmd.build_base, 'lib' + plat_specifier)
Given a build command, return the name of the appropriate platform-specific build subdirectory directory (e.g. build/lib.linux-x86_64-2.7)
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L37-L44
astropy/astropy-helpers
astropy_helpers/utils.py
get_numpy_include_path
def get_numpy_include_path(): """ Gets the path to the numpy headers. """ # We need to go through this nonsense in case setuptools # downloaded and installed Numpy for us as part of the build or # install, since Numpy may still think it's in "setup mode", when # in fact we're ready to use it to build astropy now. import builtins if hasattr(builtins, '__NUMPY_SETUP__'): del builtins.__NUMPY_SETUP__ import imp import numpy imp.reload(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
python
def get_numpy_include_path(): """ Gets the path to the numpy headers. """ # We need to go through this nonsense in case setuptools # downloaded and installed Numpy for us as part of the build or # install, since Numpy may still think it's in "setup mode", when # in fact we're ready to use it to build astropy now. import builtins if hasattr(builtins, '__NUMPY_SETUP__'): del builtins.__NUMPY_SETUP__ import imp import numpy imp.reload(numpy) try: numpy_include = numpy.get_include() except AttributeError: numpy_include = numpy.get_numpy_include() return numpy_include
Gets the path to the numpy headers.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L47-L67
astropy/astropy-helpers
astropy_helpers/utils.py
is_path_hidden
def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b'.') else: is_dotted = name.startswith('.') return is_dotted or _has_hidden_attribute(filepath)
python
def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b'.') else: is_dotted = name.startswith('.') return is_dotted or _has_hidden_attribute(filepath)
Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L128-L148
astropy/astropy-helpers
astropy_helpers/utils.py
walk_skip_hidden
def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter `topdown` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files
python
def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter `topdown` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files
A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter `topdown` from `os.walk`: the directories must always be recursed top-down when using this function. See also -------- os.walk : For a description of the parameters
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L151-L171
astropy/astropy-helpers
astropy_helpers/utils.py
write_if_different
def write_if_different(filename, data): """Write `data` to `filename`, if the content of the file is different. Parameters ---------- filename : str The file name to be written to. data : bytes The data to be written to `filename`. """ assert isinstance(data, bytes) if os.path.exists(filename): with open(filename, 'rb') as fd: original_data = fd.read() else: original_data = None if original_data != data: with open(filename, 'wb') as fd: fd.write(data)
python
def write_if_different(filename, data): """Write `data` to `filename`, if the content of the file is different. Parameters ---------- filename : str The file name to be written to. data : bytes The data to be written to `filename`. """ assert isinstance(data, bytes) if os.path.exists(filename): with open(filename, 'rb') as fd: original_data = fd.read() else: original_data = None if original_data != data: with open(filename, 'wb') as fd: fd.write(data)
Write `data` to `filename`, if the content of the file is different. Parameters ---------- filename : str The file name to be written to. data : bytes The data to be written to `filename`.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L174-L195
astropy/astropy-helpers
astropy_helpers/utils.py
import_file
def import_file(filename, name=None): """ Imports a module from a single file as if it doesn't belong to a particular package. The returned module will have the optional ``name`` if given, or else a name generated from the filename. """ # Specifying a traditional dot-separated fully qualified name here # results in a number of "Parent module 'astropy' not found while # handling absolute import" warnings. Using the same name, the # namespaces of the modules get merged together. So, this # generates an underscore-separated name which is more likely to # be unique, and it doesn't really matter because the name isn't # used directly here anyway. mode = 'r' if name is None: basename = os.path.splitext(filename)[0] name = '_'.join(os.path.relpath(basename).split(os.sep)[1:]) if not os.path.exists(filename): raise ImportError('Could not import file {0}'.format(filename)) if import_machinery: loader = import_machinery.SourceFileLoader(name, filename) mod = loader.load_module() else: with open(filename, mode) as fd: mod = imp.load_module(name, fd, filename, ('.py', mode, 1)) return mod
python
def import_file(filename, name=None): """ Imports a module from a single file as if it doesn't belong to a particular package. The returned module will have the optional ``name`` if given, or else a name generated from the filename. """ # Specifying a traditional dot-separated fully qualified name here # results in a number of "Parent module 'astropy' not found while # handling absolute import" warnings. Using the same name, the # namespaces of the modules get merged together. So, this # generates an underscore-separated name which is more likely to # be unique, and it doesn't really matter because the name isn't # used directly here anyway. mode = 'r' if name is None: basename = os.path.splitext(filename)[0] name = '_'.join(os.path.relpath(basename).split(os.sep)[1:]) if not os.path.exists(filename): raise ImportError('Could not import file {0}'.format(filename)) if import_machinery: loader = import_machinery.SourceFileLoader(name, filename) mod = loader.load_module() else: with open(filename, mode) as fd: mod = imp.load_module(name, fd, filename, ('.py', mode, 1)) return mod
Imports a module from a single file as if it doesn't belong to a particular package. The returned module will have the optional ``name`` if given, or else a name generated from the filename.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L198-L229
astropy/astropy-helpers
astropy_helpers/utils.py
resolve_name
def resolve_name(name): """Resolve a name like ``module.object`` to an object and return it. Raise `ImportError` if the module or name is not found. """ parts = name.split('.') cursor = len(parts) - 1 module_name = parts[:cursor] attr_name = parts[-1] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=[attr_name]) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] attr_name = parts[cursor] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret
python
def resolve_name(name): """Resolve a name like ``module.object`` to an object and return it. Raise `ImportError` if the module or name is not found. """ parts = name.split('.') cursor = len(parts) - 1 module_name = parts[:cursor] attr_name = parts[-1] while cursor > 0: try: ret = __import__('.'.join(module_name), fromlist=[attr_name]) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] attr_name = parts[cursor] ret = '' for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret
Resolve a name like ``module.object`` to an object and return it. Raise `ImportError` if the module or name is not found.
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/utils.py#L232-L261