body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
def file(self):
'Gets the file of this URL. # noqa: E501\n\n\n :return: The file of this URL. # noqa: E501\n :rtype: str\n '
return self._file | -3,721,828,442,324,312,600 | Gets the file of this URL. # noqa: E501
:return: The file of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | file | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def file(self):
'Gets the file of this URL. # noqa: E501\n\n\n :return: The file of this URL. # noqa: E501\n :rtype: str\n '
return self._file |
@file.setter
def file(self, file):
'Sets the file of this URL.\n\n\n :param file: The file of this URL. # noqa: E501\n :type: str\n '
self._file = file | 3,296,577,616,889,792,500 | Sets the file of this URL.
:param file: The file of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | file | CSTC-WTCB-BBRI/python_tb_rest_client | python | @file.setter
def file(self, file):
'Sets the file of this URL.\n\n\n :param file: The file of this URL. # noqa: E501\n :type: str\n '
self._file = file |
@property
def host(self):
'Gets the host of this URL. # noqa: E501\n\n\n :return: The host of this URL. # noqa: E501\n :rtype: str\n '
return self._host | -2,618,415,369,124,634,000 | Gets the host of this URL. # noqa: E501
:return: The host of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | host | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def host(self):
'Gets the host of this URL. # noqa: E501\n\n\n :return: The host of this URL. # noqa: E501\n :rtype: str\n '
return self._host |
@host.setter
def host(self, host):
'Sets the host of this URL.\n\n\n :param host: The host of this URL. # noqa: E501\n :type: str\n '
self._host = host | -7,627,030,577,744,579,000 | Sets the host of this URL.
:param host: The host of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | host | CSTC-WTCB-BBRI/python_tb_rest_client | python | @host.setter
def host(self, host):
'Sets the host of this URL.\n\n\n :param host: The host of this URL. # noqa: E501\n :type: str\n '
self._host = host |
@property
def path(self):
'Gets the path of this URL. # noqa: E501\n\n\n :return: The path of this URL. # noqa: E501\n :rtype: str\n '
return self._path | 3,544,141,312,571,741,700 | Gets the path of this URL. # noqa: E501
:return: The path of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | path | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def path(self):
'Gets the path of this URL. # noqa: E501\n\n\n :return: The path of this URL. # noqa: E501\n :rtype: str\n '
return self._path |
@path.setter
def path(self, path):
'Sets the path of this URL.\n\n\n :param path: The path of this URL. # noqa: E501\n :type: str\n '
self._path = path | 1,228,084,036,121,024,300 | Sets the path of this URL.
:param path: The path of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | path | CSTC-WTCB-BBRI/python_tb_rest_client | python | @path.setter
def path(self, path):
'Sets the path of this URL.\n\n\n :param path: The path of this URL. # noqa: E501\n :type: str\n '
self._path = path |
@property
def port(self):
'Gets the port of this URL. # noqa: E501\n\n\n :return: The port of this URL. # noqa: E501\n :rtype: int\n '
return self._port | -6,853,370,054,219,172,000 | Gets the port of this URL. # noqa: E501
:return: The port of this URL. # noqa: E501
:rtype: int | tb_rest_client/models/models_pe/url.py | port | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def port(self):
'Gets the port of this URL. # noqa: E501\n\n\n :return: The port of this URL. # noqa: E501\n :rtype: int\n '
return self._port |
@port.setter
def port(self, port):
'Sets the port of this URL.\n\n\n :param port: The port of this URL. # noqa: E501\n :type: int\n '
self._port = port | 4,037,869,362,115,543,600 | Sets the port of this URL.
:param port: The port of this URL. # noqa: E501
:type: int | tb_rest_client/models/models_pe/url.py | port | CSTC-WTCB-BBRI/python_tb_rest_client | python | @port.setter
def port(self, port):
'Sets the port of this URL.\n\n\n :param port: The port of this URL. # noqa: E501\n :type: int\n '
self._port = port |
@property
def protocol(self):
'Gets the protocol of this URL. # noqa: E501\n\n\n :return: The protocol of this URL. # noqa: E501\n :rtype: str\n '
return self._protocol | -6,072,068,150,181,221,000 | Gets the protocol of this URL. # noqa: E501
:return: The protocol of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | protocol | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def protocol(self):
'Gets the protocol of this URL. # noqa: E501\n\n\n :return: The protocol of this URL. # noqa: E501\n :rtype: str\n '
return self._protocol |
@protocol.setter
def protocol(self, protocol):
'Sets the protocol of this URL.\n\n\n :param protocol: The protocol of this URL. # noqa: E501\n :type: str\n '
self._protocol = protocol | 6,866,032,306,924,647,000 | Sets the protocol of this URL.
:param protocol: The protocol of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | protocol | CSTC-WTCB-BBRI/python_tb_rest_client | python | @protocol.setter
def protocol(self, protocol):
'Sets the protocol of this URL.\n\n\n :param protocol: The protocol of this URL. # noqa: E501\n :type: str\n '
self._protocol = protocol |
@property
def query(self):
'Gets the query of this URL. # noqa: E501\n\n\n :return: The query of this URL. # noqa: E501\n :rtype: str\n '
return self._query | 2,452,301,778,565,377,500 | Gets the query of this URL. # noqa: E501
:return: The query of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | query | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def query(self):
'Gets the query of this URL. # noqa: E501\n\n\n :return: The query of this URL. # noqa: E501\n :rtype: str\n '
return self._query |
@query.setter
def query(self, query):
'Sets the query of this URL.\n\n\n :param query: The query of this URL. # noqa: E501\n :type: str\n '
self._query = query | -7,903,680,722,677,127,000 | Sets the query of this URL.
:param query: The query of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | query | CSTC-WTCB-BBRI/python_tb_rest_client | python | @query.setter
def query(self, query):
'Sets the query of this URL.\n\n\n :param query: The query of this URL. # noqa: E501\n :type: str\n '
self._query = query |
@property
def ref(self):
'Gets the ref of this URL. # noqa: E501\n\n\n :return: The ref of this URL. # noqa: E501\n :rtype: str\n '
return self._ref | -5,318,472,932,718,013,000 | Gets the ref of this URL. # noqa: E501
:return: The ref of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | ref | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def ref(self):
'Gets the ref of this URL. # noqa: E501\n\n\n :return: The ref of this URL. # noqa: E501\n :rtype: str\n '
return self._ref |
@ref.setter
def ref(self, ref):
'Sets the ref of this URL.\n\n\n :param ref: The ref of this URL. # noqa: E501\n :type: str\n '
self._ref = ref | -5,554,801,580,472,594,000 | Sets the ref of this URL.
:param ref: The ref of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | ref | CSTC-WTCB-BBRI/python_tb_rest_client | python | @ref.setter
def ref(self, ref):
'Sets the ref of this URL.\n\n\n :param ref: The ref of this URL. # noqa: E501\n :type: str\n '
self._ref = ref |
@property
def user_info(self):
'Gets the user_info of this URL. # noqa: E501\n\n\n :return: The user_info of this URL. # noqa: E501\n :rtype: str\n '
return self._user_info | 3,626,666,944,548,141,000 | Gets the user_info of this URL. # noqa: E501
:return: The user_info of this URL. # noqa: E501
:rtype: str | tb_rest_client/models/models_pe/url.py | user_info | CSTC-WTCB-BBRI/python_tb_rest_client | python | @property
def user_info(self):
'Gets the user_info of this URL. # noqa: E501\n\n\n :return: The user_info of this URL. # noqa: E501\n :rtype: str\n '
return self._user_info |
@user_info.setter
def user_info(self, user_info):
'Sets the user_info of this URL.\n\n\n :param user_info: The user_info of this URL. # noqa: E501\n :type: str\n '
self._user_info = user_info | -897,848,595,035,173,900 | Sets the user_info of this URL.
:param user_info: The user_info of this URL. # noqa: E501
:type: str | tb_rest_client/models/models_pe/url.py | user_info | CSTC-WTCB-BBRI/python_tb_rest_client | python | @user_info.setter
def user_info(self, user_info):
'Sets the user_info of this URL.\n\n\n :param user_info: The user_info of this URL. # noqa: E501\n :type: str\n '
self._user_info = user_info |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(URL, dict):
for (key, value) in self.items():
result[key] = value
return result | 3,466,395,792,294,842,000 | Returns the model properties as a dict | tb_rest_client/models/models_pe/url.py | to_dict | CSTC-WTCB-BBRI/python_tb_rest_client | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(URL, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | tb_rest_client/models/models_pe/url.py | to_str | CSTC-WTCB-BBRI/python_tb_rest_client | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | tb_rest_client/models/models_pe/url.py | __repr__ | CSTC-WTCB-BBRI/python_tb_rest_client | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, URL)):
return False
return (self.__dict__ == other.__dict__) | 1,915,447,212,573,964,000 | Returns true if both objects are equal | tb_rest_client/models/models_pe/url.py | __eq__ | CSTC-WTCB-BBRI/python_tb_rest_client | python | def __eq__(self, other):
if (not isinstance(other, URL)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | tb_rest_client/models/models_pe/url.py | __ne__ | CSTC-WTCB-BBRI/python_tb_rest_client | python | def __ne__(self, other):
return (not (self == other)) |
def parse_arguments():
'\n Parse input arguments. Passing the API key is defined as mandatory.\n '
parser = argparse.ArgumentParser(description='Incrementally exports JSON orders data into CSV format and optionally into a SQLite DB.')
parser.add_argument('-k', '--key', type=str, required=True, help='API key to be used to perform the REST request to the backend.')
parser.add_argument('-l', '--locale', type=str, required=False, help='Specify the locale: it_IT for italian. Otherwise machine default one.')
parser.add_argument('-d', '--db', action='store_true', required=False, help='Instruct the tool to load a SQLite database up.')
parser.add_argument('-p', '--path', type=str, required=True, help='Define datastore base path to csv/ and db/ folders (csv/ and db/ folders should be already created).')
parser.add_argument('-n', '--number', type=int, required=True, help='Define how many records each REST call should pull down.')
parser.add_argument('-c', '--customer', type=int, required=False, help='Define whether the customer table should be updated contextually: it requires the number of cycles per page (max 50 records')
args = parser.parse_args()
return args | 8,244,479,134,053,085,000 | Parse input arguments. Passing the API key is defined as mandatory. | scripts/orders-exporter.py | parse_arguments | hailpam/data-crunching | python | def parse_arguments():
'\n \n '
parser = argparse.ArgumentParser(description='Incrementally exports JSON orders data into CSV format and optionally into a SQLite DB.')
parser.add_argument('-k', '--key', type=str, required=True, help='API key to be used to perform the REST request to the backend.')
parser.add_argument('-l', '--locale', type=str, required=False, help='Specify the locale: it_IT for italian. Otherwise machine default one.')
parser.add_argument('-d', '--db', action='store_true', required=False, help='Instruct the tool to load a SQLite database up.')
parser.add_argument('-p', '--path', type=str, required=True, help='Define datastore base path to csv/ and db/ folders (csv/ and db/ folders should be already created).')
parser.add_argument('-n', '--number', type=int, required=True, help='Define how many records each REST call should pull down.')
parser.add_argument('-c', '--customer', type=int, required=False, help='Define whether the customer table should be updated contextually: it requires the number of cycles per page (max 50 records')
args = parser.parse_args()
return args |
def __init__(self, hass: HomeAssistant, api: str, name: str, polling_interval: int):
'Initialize the global Omnilogic data updater.'
self.api = api
super().__init__(hass=hass, logger=_LOGGER, name=name, update_interval=timedelta(seconds=polling_interval)) | 4,122,379,445,778,655,700 | Initialize the global Omnilogic data updater. | homeassistant/components/omnilogic/common.py | __init__ | 123dev/core | python | def __init__(self, hass: HomeAssistant, api: str, name: str, polling_interval: int):
self.api = api
super().__init__(hass=hass, logger=_LOGGER, name=name, update_interval=timedelta(seconds=polling_interval)) |
async def _async_update_data(self):
'Fetch data from OmniLogic.'
try:
data = (await self.api.get_telemetry_data())
except OmniLogicException as error:
raise UpdateFailed(f'Error updating from OmniLogic: {error}') from error
parsed_data = {}
def get_item_data(item, item_kind, current_id, data):
'Get data per kind of Omnilogic API item.'
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if ('systemId' in item):
system_id = item['systemId']
current_id = (current_id + (item_kind, system_id))
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if (kind in item):
data = get_item_data(item[kind], kind, current_id, data)
return data
parsed_data = get_item_data(data, 'Backyard', (), parsed_data)
return parsed_data | -314,356,583,417,209,340 | Fetch data from OmniLogic. | homeassistant/components/omnilogic/common.py | _async_update_data | 123dev/core | python | async def _async_update_data(self):
try:
data = (await self.api.get_telemetry_data())
except OmniLogicException as error:
raise UpdateFailed(f'Error updating from OmniLogic: {error}') from error
parsed_data = {}
def get_item_data(item, item_kind, current_id, data):
'Get data per kind of Omnilogic API item.'
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if ('systemId' in item):
system_id = item['systemId']
current_id = (current_id + (item_kind, system_id))
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if (kind in item):
data = get_item_data(item[kind], kind, current_id, data)
return data
parsed_data = get_item_data(data, 'Backyard', (), parsed_data)
return parsed_data |
def __init__(self, coordinator: OmniLogicUpdateCoordinator, kind: str, name: str, item_id: tuple, icon: str):
'Initialize the OmniLogic Entity.'
super().__init__(coordinator)
bow_id = None
entity_data = coordinator.data[item_id]
backyard_id = item_id[:2]
if (len(item_id) == 6):
bow_id = item_id[:4]
msp_system_id = coordinator.data[backyard_id]['systemId']
entity_friendly_name = f"{coordinator.data[backyard_id]['BackyardName']} "
unique_id = f'{msp_system_id}'
if (bow_id is not None):
unique_id = f"{unique_id}_{coordinator.data[bow_id]['systemId']}"
entity_friendly_name = f"{entity_friendly_name}{coordinator.data[bow_id]['Name']} "
unique_id = f"{unique_id}_{coordinator.data[item_id]['systemId']}_{kind}"
if (entity_data.get('Name') is not None):
entity_friendly_name = f"{entity_friendly_name} {entity_data['Name']}"
entity_friendly_name = f'{entity_friendly_name} {name}'
unique_id = unique_id.replace(' ', '_')
self._kind = kind
self._name = entity_friendly_name
self._unique_id = unique_id
self._item_id = item_id
self._icon = icon
self._attrs = {}
self._msp_system_id = msp_system_id
self._backyard_name = coordinator.data[backyard_id]['BackyardName'] | -4,474,011,280,738,262,000 | Initialize the OmniLogic Entity. | homeassistant/components/omnilogic/common.py | __init__ | 123dev/core | python | def __init__(self, coordinator: OmniLogicUpdateCoordinator, kind: str, name: str, item_id: tuple, icon: str):
super().__init__(coordinator)
bow_id = None
entity_data = coordinator.data[item_id]
backyard_id = item_id[:2]
if (len(item_id) == 6):
bow_id = item_id[:4]
msp_system_id = coordinator.data[backyard_id]['systemId']
entity_friendly_name = f"{coordinator.data[backyard_id]['BackyardName']} "
unique_id = f'{msp_system_id}'
if (bow_id is not None):
unique_id = f"{unique_id}_{coordinator.data[bow_id]['systemId']}"
entity_friendly_name = f"{entity_friendly_name}{coordinator.data[bow_id]['Name']} "
unique_id = f"{unique_id}_{coordinator.data[item_id]['systemId']}_{kind}"
if (entity_data.get('Name') is not None):
entity_friendly_name = f"{entity_friendly_name} {entity_data['Name']}"
entity_friendly_name = f'{entity_friendly_name} {name}'
unique_id = unique_id.replace(' ', '_')
self._kind = kind
self._name = entity_friendly_name
self._unique_id = unique_id
self._item_id = item_id
self._icon = icon
self._attrs = {}
self._msp_system_id = msp_system_id
self._backyard_name = coordinator.data[backyard_id]['BackyardName'] |
@property
def unique_id(self) -> str:
'Return a unique, Home Assistant friendly identifier for this entity.'
return self._unique_id | -2,715,274,186,570,752,500 | Return a unique, Home Assistant friendly identifier for this entity. | homeassistant/components/omnilogic/common.py | unique_id | 123dev/core | python | @property
def unique_id(self) -> str:
return self._unique_id |
@property
def name(self) -> str:
'Return the name of the entity.'
return self._name | 1,957,982,589,564,320,800 | Return the name of the entity. | homeassistant/components/omnilogic/common.py | name | 123dev/core | python | @property
def name(self) -> str:
return self._name |
@property
def icon(self):
'Return the icon for the entity.'
return self._icon | -4,097,096,868,526,755,300 | Return the icon for the entity. | homeassistant/components/omnilogic/common.py | icon | 123dev/core | python | @property
def icon(self):
return self._icon |
@property
def device_state_attributes(self):
'Return the attributes.'
return self._attrs | -7,442,340,643,963,322,000 | Return the attributes. | homeassistant/components/omnilogic/common.py | device_state_attributes | 123dev/core | python | @property
def device_state_attributes(self):
return self._attrs |
@property
def device_info(self):
'Define the device as back yard/MSP System.'
return {ATTR_IDENTIFIERS: {(DOMAIN, self._msp_system_id)}, ATTR_NAME: self._backyard_name, ATTR_MANUFACTURER: 'Hayward', ATTR_MODEL: 'OmniLogic'} | -7,748,572,316,647,299,000 | Define the device as back yard/MSP System. | homeassistant/components/omnilogic/common.py | device_info | 123dev/core | python | @property
def device_info(self):
return {ATTR_IDENTIFIERS: {(DOMAIN, self._msp_system_id)}, ATTR_NAME: self._backyard_name, ATTR_MANUFACTURER: 'Hayward', ATTR_MODEL: 'OmniLogic'} |
def get_item_data(item, item_kind, current_id, data):
'Get data per kind of Omnilogic API item.'
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if ('systemId' in item):
system_id = item['systemId']
current_id = (current_id + (item_kind, system_id))
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if (kind in item):
data = get_item_data(item[kind], kind, current_id, data)
return data | 6,475,641,875,551,483,000 | Get data per kind of Omnilogic API item. | homeassistant/components/omnilogic/common.py | get_item_data | 123dev/core | python | def get_item_data(item, item_kind, current_id, data):
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if ('systemId' in item):
system_id = item['systemId']
current_id = (current_id + (item_kind, system_id))
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if (kind in item):
data = get_item_data(item[kind], kind, current_id, data)
return data |
def GenBasicBlocks(self):
' Generate basic blocks using the algorithm '
if (len(self.instructions) == 0):
return
bbCount = 0
self.basicBlocks = [BB.BasicBlock()]
self.basicBlocks[(- 1)].AddInstruction(self.instructions[0])
for instr in self.instructions[1:]:
if instr.IsTarget():
bbCount += 1
self.basicBlocks += [BB.BasicBlock(bbCount)]
self.basicBlocks[(- 1)].AddInstruction(instr)
if instr.instrType.is_JMP():
bbCount += 1
self.basicBlocks += [BB.BasicBlock(bbCount)]
self.basicBlocks = [bb for bb in self.basicBlocks if (not bb.IsEmpty())]
for (i, bb) in enumerate(self.basicBlocks):
bb.bbNum = i | -5,940,536,416,838,989,000 | Generate basic blocks using the algorithm | project/src/codegen/code_generation.py | GenBasicBlocks | vaishious/comperler | python | def GenBasicBlocks(self):
' '
if (len(self.instructions) == 0):
return
bbCount = 0
self.basicBlocks = [BB.BasicBlock()]
self.basicBlocks[(- 1)].AddInstruction(self.instructions[0])
for instr in self.instructions[1:]:
if instr.IsTarget():
bbCount += 1
self.basicBlocks += [BB.BasicBlock(bbCount)]
self.basicBlocks[(- 1)].AddInstruction(instr)
if instr.instrType.is_JMP():
bbCount += 1
self.basicBlocks += [BB.BasicBlock(bbCount)]
self.basicBlocks = [bb for bb in self.basicBlocks if (not bb.IsEmpty())]
for (i, bb) in enumerate(self.basicBlocks):
bb.bbNum = i |
@staticmethod
def _format(path, name):
' Format\n\n Add path to name.\n\n Parameters\n ----------\n path : str\n Base path\n name : str\n Path extension\n\n Returns\n -------\n str\n Formated path\n\n '
return '{}/{}'.format(path, name) | 2,094,405,422,228,392,000 | Format
Add path to name.
Parameters
----------
path : str
Base path
name : str
Path extension
Returns
-------
str
Formated path | blendhunter/network.py | _format | CosmoStat/BlendHunter | python | @staticmethod
def _format(path, name):
' Format\n\n Add path to name.\n\n Parameters\n ----------\n path : str\n Base path\n name : str\n Path extension\n\n Returns\n -------\n str\n Formated path\n\n '
return '{}/{}'.format(path, name) |
def getkwarg(self, key, default=None):
' Get keyword agrument\n\n Get value from keyword agruments if it exists otherwise return default.\n\n Parameters\n ----------\n key : str\n Dictionary key\n default : optional\n Default value\n\n '
return (self._kwargs[key] if (key in self._kwargs) else default) | 3,092,355,057,493,833,700 | Get keyword agrument
Get value from keyword agruments if it exists otherwise return default.
Parameters
----------
key : str
Dictionary key
default : optional
Default value | blendhunter/network.py | getkwarg | CosmoStat/BlendHunter | python | def getkwarg(self, key, default=None):
' Get keyword agrument\n\n Get value from keyword agruments if it exists otherwise return default.\n\n Parameters\n ----------\n key : str\n Dictionary key\n default : optional\n Default value\n\n '
return (self._kwargs[key] if (key in self._kwargs) else default) |
@staticmethod
def _get_image_shape(file):
' Get Image Shape\n\n Get the input image shape from an example image.\n\n Parameters\n ----------\n file : str\n File name\n\n Returns\n -------\n tuple\n Image shape\n\n '
return imread(file).shape | -849,533,267,932,076,300 | Get Image Shape
Get the input image shape from an example image.
Parameters
----------
file : str
File name
Returns
-------
tuple
Image shape | blendhunter/network.py | _get_image_shape | CosmoStat/BlendHunter | python | @staticmethod
def _get_image_shape(file):
' Get Image Shape\n\n Get the input image shape from an example image.\n\n Parameters\n ----------\n file : str\n File name\n\n Returns\n -------\n tuple\n Image shape\n\n '
return imread(file).shape |
def _get_target_shape(self, image_path=None):
' Get Target Shape\n\n Get the network target shape from the image shape.\n\n Parameters\n ----------\n image_path : str, optional\n Path to image file\n\n '
if (isinstance(self._image_shape, type(None)) and image_path):
file = self._format(image_path, os.listdir(image_path)[0])
self._image_shape = self._get_image_shape(file)
self._target_size = self._image_shape[:2] | -7,891,915,591,857,916,000 | Get Target Shape
Get the network target shape from the image shape.
Parameters
----------
image_path : str, optional
Path to image file | blendhunter/network.py | _get_target_shape | CosmoStat/BlendHunter | python | def _get_target_shape(self, image_path=None):
' Get Target Shape\n\n Get the network target shape from the image shape.\n\n Parameters\n ----------\n image_path : str, optional\n Path to image file\n\n '
if (isinstance(self._image_shape, type(None)) and image_path):
file = self._format(image_path, os.listdir(image_path)[0])
self._image_shape = self._get_image_shape(file)
self._target_size = self._image_shape[:2] |
def _load_generator(self, input_dir, batch_size=None, class_mode=None, augmentation=False):
' Load Generator\n\n Load files from an input directory into a Keras generator.\n\n Parameters\n ----------\n input_dir : str\n Input directory\n batch_size : int, optional\n Batch size\n class_mode : str, optional\n Generator class mode\n shuffle : bool, optional\n Option to shuffle input files\n\n Returns\n -------\n keras_preprocessing.image.DirectoryIterator\n Keras generator\n\n '
if augmentation:
datagen = ImageDataGenerator(rescale=(1.0 / 255), shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=(1.0 / 255))
generator = datagen.flow_from_directory(input_dir, target_size=self._target_size, batch_size=batch_size, class_mode=class_mode, shuffle=False)
generator.steps = (generator.n // generator.batch_size)
return generator | 1,026,942,451,619,151,200 | Load Generator
Load files from an input directory into a Keras generator.
Parameters
----------
input_dir : str
Input directory
batch_size : int, optional
Batch size
class_mode : str, optional
Generator class mode
shuffle : bool, optional
Option to shuffle input files
Returns
-------
keras_preprocessing.image.DirectoryIterator
Keras generator | blendhunter/network.py | _load_generator | CosmoStat/BlendHunter | python | def _load_generator(self, input_dir, batch_size=None, class_mode=None, augmentation=False):
' Load Generator\n\n Load files from an input directory into a Keras generator.\n\n Parameters\n ----------\n input_dir : str\n Input directory\n batch_size : int, optional\n Batch size\n class_mode : str, optional\n Generator class mode\n shuffle : bool, optional\n Option to shuffle input files\n\n Returns\n -------\n keras_preprocessing.image.DirectoryIterator\n Keras generator\n\n '
if augmentation:
datagen = ImageDataGenerator(rescale=(1.0 / 255), shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=(1.0 / 255))
generator = datagen.flow_from_directory(input_dir, target_size=self._target_size, batch_size=batch_size, class_mode=class_mode, shuffle=False)
generator.steps = (generator.n // generator.batch_size)
return generator |
def _get_feature(self, input_dir):
' Get Feature\n\n Get network feature and labels from VGG16 model.\n\n Parameters\n ----------\n input_dir : str\n Input directory\n\n Returns\n -------\n tuple\n VGG16 bottleneck feature, class labels\n\n '
generator = self._load_generator(input_dir, batch_size=self._batch_size_top)
labels = generator.classes[:(generator.steps * self._batch_size_top)]
return (self._vgg16_model.predict_generator(generator, generator.steps), labels) | -3,918,324,106,689,351,700 | Get Feature
Get network feature and labels from VGG16 model.
Parameters
----------
input_dir : str
Input directory
Returns
-------
tuple
VGG16 bottleneck feature, class labels | blendhunter/network.py | _get_feature | CosmoStat/BlendHunter | python | def _get_feature(self, input_dir):
' Get Feature\n\n Get network feature and labels from VGG16 model.\n\n Parameters\n ----------\n input_dir : str\n Input directory\n\n Returns\n -------\n tuple\n VGG16 bottleneck feature, class labels\n\n '
generator = self._load_generator(input_dir, batch_size=self._batch_size_top)
labels = generator.classes[:(generator.steps * self._batch_size_top)]
return (self._vgg16_model.predict_generator(generator, generator.steps), labels) |
@staticmethod
def _save_data(data, data_type, file_path):
' Save Data\n\n Save data to file.\n\n Parameters\n ----------\n data : np.ndarray\n Output data\n data_type : str\n Type of feature to be saved\n file_path : str\n File path\n\n '
file_name = '{}_{}.npy'.format(file_path, data_type)
np.save(file_name, data) | -6,445,560,500,047,569,000 | Save Data
Save data to file.
Parameters
----------
data : np.ndarray
Output data
data_type : str
Type of feature to be saved
file_path : str
File path | blendhunter/network.py | _save_data | CosmoStat/BlendHunter | python | @staticmethod
def _save_data(data, data_type, file_path):
' Save Data\n\n Save data to file.\n\n Parameters\n ----------\n data : np.ndarray\n Output data\n data_type : str\n Type of feature to be saved\n file_path : str\n File path\n\n '
file_name = '{}_{}.npy'.format(file_path, data_type)
np.save(file_name, data) |
@staticmethod
def _load_data(data_type, file_path):
' Load Data\n\n Load data from file.\n\n Parameters\n ----------\n data_type : str\n Type of feature to be loaded\n file_path : str\n File path\n\n '
file_name = '{}_{}.npy'.format(file_path, data_type)
if os.path.isfile(file_name):
return np.load(file_name)
else:
raise IOError('{} not found'.format(file_name)) | -7,156,602,867,933,073,000 | Load Data
Load data from file.
Parameters
----------
data_type : str
Type of feature to be loaded
file_path : str
File path | blendhunter/network.py | _load_data | CosmoStat/BlendHunter | python | @staticmethod
def _load_data(data_type, file_path):
' Load Data\n\n Load data from file.\n\n Parameters\n ----------\n data_type : str\n Type of feature to be loaded\n file_path : str\n File path\n\n '
file_name = '{}_{}.npy'.format(file_path, data_type)
if os.path.isfile(file_name):
return np.load(file_name)
else:
raise IOError('{} not found'.format(file_name)) |
@staticmethod
def _build_vgg16_model(input_shape=None):
' Build VGG16 Model\n\n Build VGG16 CNN model using imagenet weights.\n\n Parameters\n ----------\n input_shape : str, optional\n Input data shape\n\n Returns\n -------\n\n VGG16 model\n\n '
return VGG16(include_top=False, weights='imagenet', input_shape=input_shape) | -989,531,759,942,366,100 | Build VGG16 Model
Build VGG16 CNN model using imagenet weights.
Parameters
----------
input_shape : str, optional
Input data shape
Returns
-------
VGG16 model | blendhunter/network.py | _build_vgg16_model | CosmoStat/BlendHunter | python | @staticmethod
def _build_vgg16_model(input_shape=None):
' Build VGG16 Model\n\n Build VGG16 CNN model using imagenet weights.\n\n Parameters\n ----------\n input_shape : str, optional\n Input data shape\n\n Returns\n -------\n\n VGG16 model\n\n '
return VGG16(include_top=False, weights='imagenet', input_shape=input_shape) |
def _get_features(self):
' Get Features\n\n Get the network (bottleneck) features from the VGG16 model.\n\n '
self._vgg16_model = self._build_vgg16_model()
for (key, value) in self._features.items():
(bot_feat, labels) = self._get_feature(value['dir'])
if self._save_bottleneck:
self._save_data(bot_feat, key, self._bottleneck_file)
if self._save_labels:
self._save_data(labels, key, self._labels_file)
value['bottleneck'] = bot_feat
value['labels'] = labels | 5,985,295,128,808,247,000 | Get Features
Get the network (bottleneck) features from the VGG16 model. | blendhunter/network.py | _get_features | CosmoStat/BlendHunter | python | def _get_features(self):
' Get Features\n\n Get the network (bottleneck) features from the VGG16 model.\n\n '
self._vgg16_model = self._build_vgg16_model()
for (key, value) in self._features.items():
(bot_feat, labels) = self._get_feature(value['dir'])
if self._save_bottleneck:
self._save_data(bot_feat, key, self._bottleneck_file)
if self._save_labels:
self._save_data(labels, key, self._labels_file)
value['bottleneck'] = bot_feat
value['labels'] = labels |
def _load_features(self):
' Load Bottleneck Features\n\n Load VGG16 bottleneck features.\n\n '
for feature_name in ('bottleneck', 'labels'):
if (feature_name == 'bottleneck'):
out_path = self._bottleneck_file
else:
out_path = self._labels_file
for (key, value) in self._features.items():
if (feature_name not in value):
value[feature_name] = self._load_data(key, out_path) | -4,141,505,175,619,262,000 | Load Bottleneck Features
Load VGG16 bottleneck features. | blendhunter/network.py | _load_features | CosmoStat/BlendHunter | python | def _load_features(self):
' Load Bottleneck Features\n\n Load VGG16 bottleneck features.\n\n '
for feature_name in ('bottleneck', 'labels'):
if (feature_name == 'bottleneck'):
out_path = self._bottleneck_file
else:
out_path = self._labels_file
for (key, value) in self._features.items():
if (feature_name not in value):
value[feature_name] = self._load_data(key, out_path) |
@staticmethod
def _build_top_model(input_shape, dense_output=(256, 1024), dropout=0.1):
' Build Top Model\n\n Build the fully connected layers of the network.\n\n Parameters\n ----------\n input_shape : tuple\n Input data shape\n dense_output : tuple, optional\n Size of dense output layers, default is (256, 1024)\n dropout : float, optional\n Dropout rate, default is 0.1\n\n Returns\n -------\n keras.model\n Fully connected top model\n\n '
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(dense_output[0]))
model.add(Dropout(dropout))
model.add(Dense(dense_output[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model | 8,246,173,569,645,649,000 | Build Top Model
Build the fully connected layers of the network.
Parameters
----------
input_shape : tuple
Input data shape
dense_output : tuple, optional
Size of dense output layers, default is (256, 1024)
dropout : float, optional
Dropout rate, default is 0.1
Returns
-------
keras.model
Fully connected top model | blendhunter/network.py | _build_top_model | CosmoStat/BlendHunter | python | @staticmethod
def _build_top_model(input_shape, dense_output=(256, 1024), dropout=0.1):
' Build Top Model\n\n Build the fully connected layers of the network.\n\n Parameters\n ----------\n input_shape : tuple\n Input data shape\n dense_output : tuple, optional\n Size of dense output layers, default is (256, 1024)\n dropout : float, optional\n Dropout rate, default is 0.1\n\n Returns\n -------\n keras.model\n Fully connected top model\n\n '
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(dense_output[0]))
model.add(Dropout(dropout))
model.add(Dense(dense_output[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model |
def _train_top_model(self):
' Train Top Model\n\n Train fully connected top model of the network.\n\n '
self._load_features()
model = self._build_top_model(input_shape=self._features['train']['bottleneck'].shape[1:])
model.compile(optimizer=self.getkwarg('top_opt', 'adam'), loss=self.getkwarg('top_loss', 'binary_crossentropy'), metrics=self.getkwarg('top_metrics', ['accuracy']))
top_model_file = '{}.h5'.format(self._top_model_file)
callbacks = []
callbacks.append(ModelCheckpoint(top_model_file, monitor='val_loss', verbose=self._verbose, save_best_only=True, save_weights_only=True, mode='auto', period=1))
if self.getkwarg('top_early_stop', True):
min_delta = self.getkwarg('top_min_delta', 0.001)
patience = self.getkwarg('top_patience', 10)
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=min_delta, patience=patience, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_delta=0.001, cooldown=2, verbose=self._verbose))
self.history = model.fit(self._features['train']['bottleneck'], self._features['train']['labels'], epochs=self._epochs_top, batch_size=self._batch_size_top, callbacks=callbacks, validation_data=(self._features['valid']['bottleneck'], self._features['valid']['labels']), verbose=self._verbose)
model.save_weights(top_model_file) | 5,451,557,413,038,301,000 | Train Top Model
Train fully connected top model of the network. | blendhunter/network.py | _train_top_model | CosmoStat/BlendHunter | python | def _train_top_model(self):
' Train Top Model\n\n Train fully connected top model of the network.\n\n '
self._load_features()
model = self._build_top_model(input_shape=self._features['train']['bottleneck'].shape[1:])
model.compile(optimizer=self.getkwarg('top_opt', 'adam'), loss=self.getkwarg('top_loss', 'binary_crossentropy'), metrics=self.getkwarg('top_metrics', ['accuracy']))
top_model_file = '{}.h5'.format(self._top_model_file)
callbacks = []
callbacks.append(ModelCheckpoint(top_model_file, monitor='val_loss', verbose=self._verbose, save_best_only=True, save_weights_only=True, mode='auto', period=1))
if self.getkwarg('top_early_stop', True):
min_delta = self.getkwarg('top_min_delta', 0.001)
patience = self.getkwarg('top_patience', 10)
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=min_delta, patience=patience, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_delta=0.001, cooldown=2, verbose=self._verbose))
self.history = model.fit(self._features['train']['bottleneck'], self._features['train']['labels'], epochs=self._epochs_top, batch_size=self._batch_size_top, callbacks=callbacks, validation_data=(self._features['valid']['bottleneck'], self._features['valid']['labels']), verbose=self._verbose)
model.save_weights(top_model_file) |
def plot_history(self):
' Plot History\n\n Plot the training history metrics.\n\n '
sns.set(style='darkgrid')
if (not isinstance(self.history, type(None))):
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.subplot(122)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show()
else:
print('No history to display. Run training first.') | 6,298,212,048,318,549,000 | Plot History
Plot the training history metrics. | blendhunter/network.py | plot_history | CosmoStat/BlendHunter | python | def plot_history(self):
' Plot History\n\n Plot the training history metrics.\n\n '
sns.set(style='darkgrid')
if (not isinstance(self.history, type(None))):
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.subplot(122)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show()
else:
print('No history to display. Run training first.') |
def _freeze_layers(self, model, depth):
' Freeze Network Layers\n\n Parameters\n ----------\n model :\n Keras model\n depth : int\n Depth of layers to be frozen\n\n '
for layer in model.layers[:depth]:
layer.trainable = False | 5,375,483,744,825,306,000 | Freeze Network Layers
Parameters
----------
model :
Keras model
depth : int
Depth of layers to be frozen | blendhunter/network.py | _freeze_layers | CosmoStat/BlendHunter | python | def _freeze_layers(self, model, depth):
' Freeze Network Layers\n\n Parameters\n ----------\n model :\n Keras model\n depth : int\n Depth of layers to be frozen\n\n '
for layer in model.layers[:depth]:
layer.trainable = False |
def _build_final_model(self, load_top_weights=False, load_final_weights=False):
' Build Final Model\n\n Build the final BlendHunter model.\n\n Parameters\n ----------\n load_top_weights : bool\n Option to load the top model weights\n load_final_weights : bool\n Option to load the final model weights\n\n Returns\n -------\n\n Final model\n\n '
vgg16_model = self._build_vgg16_model(self._image_shape)
top_model = self._build_top_model(vgg16_model.output_shape[1:], dropout=0.4)
if load_top_weights:
top_model.load_weights('{}.h5'.format(self._top_model_file))
model = Model(inputs=vgg16_model.input, outputs=top_model(vgg16_model.output))
if load_final_weights:
model.load_weights('{}.h5'.format(self._final_model_file))
return model | 5,757,908,744,617,628,000 | Build Final Model
Build the final BlendHunter model.
Parameters
----------
load_top_weights : bool
Option to load the top model weights
load_final_weights : bool
Option to load the final model weights
Returns
-------
Final model | blendhunter/network.py | _build_final_model | CosmoStat/BlendHunter | python | def _build_final_model(self, load_top_weights=False, load_final_weights=False):
' Build Final Model\n\n Build the final BlendHunter model.\n\n Parameters\n ----------\n load_top_weights : bool\n Option to load the top model weights\n load_final_weights : bool\n Option to load the final model weights\n\n Returns\n -------\n\n Final model\n\n '
vgg16_model = self._build_vgg16_model(self._image_shape)
top_model = self._build_top_model(vgg16_model.output_shape[1:], dropout=0.4)
if load_top_weights:
top_model.load_weights('{}.h5'.format(self._top_model_file))
model = Model(inputs=vgg16_model.input, outputs=top_model(vgg16_model.output))
if load_final_weights:
model.load_weights('{}.h5'.format(self._final_model_file))
return model |
def _fine_tune(self):
' Fine Tune\n\n Fine tune the final model training.\n\n '
model = self._build_final_model(load_top_weights=True)
self._freeze_layers(model, 18)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001), metrics=['binary_accuracy'])
train_gen = self._load_generator(self._features['train']['dir'], batch_size=self._batch_size_fine, class_mode='binary', augmentation=True)
valid_gen = self._load_generator(self._features['valid']['dir'], batch_size=self._batch_size_fine, class_mode='binary')
callbacks = []
callbacks.append(ModelCheckpoint('{}.h5'.format(self._fine_tune_file), monitor='val_loss', verbose=self._verbose, save_best_only=True, save_weights_only=True, mode='auto', period=1))
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_delta=0.001, cooldown=2, verbose=self._verbose))
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps, epochs=self._epochs_fine, callbacks=callbacks, validation_data=valid_gen, validation_steps=valid_gen.steps, verbose=self._verbose)
self._freeze_layers(model, 19)
model.layers[17].trainable = True
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.0001), metrics=['binary_accuracy'])
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps, epochs=self._epochs_fine, callbacks=callbacks, validation_data=valid_gen, validation_steps=valid_gen.steps, verbose=self._verbose)
model.save_weights('{}.h5'.format(self._final_model_file)) | -8,034,878,660,751,384,000 | Fine Tune
Fine tune the final model training. | blendhunter/network.py | _fine_tune | CosmoStat/BlendHunter | python | def _fine_tune(self):
' Fine Tune\n\n Fine tune the final model training.\n\n '
model = self._build_final_model(load_top_weights=True)
self._freeze_layers(model, 18)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0001), metrics=['binary_accuracy'])
train_gen = self._load_generator(self._features['train']['dir'], batch_size=self._batch_size_fine, class_mode='binary', augmentation=True)
valid_gen = self._load_generator(self._features['valid']['dir'], batch_size=self._batch_size_fine, class_mode='binary')
callbacks = []
callbacks.append(ModelCheckpoint('{}.h5'.format(self._fine_tune_file), monitor='val_loss', verbose=self._verbose, save_best_only=True, save_weights_only=True, mode='auto', period=1))
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_delta=0.001, cooldown=2, verbose=self._verbose))
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps, epochs=self._epochs_fine, callbacks=callbacks, validation_data=valid_gen, validation_steps=valid_gen.steps, verbose=self._verbose)
self._freeze_layers(model, 19)
model.layers[17].trainable = True
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.0001), metrics=['binary_accuracy'])
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps, epochs=self._epochs_fine, callbacks=callbacks, validation_data=valid_gen, validation_steps=valid_gen.steps, verbose=self._verbose)
model.save_weights('{}.h5'.format(self._final_model_file)) |
def train(self, input_path, get_features=True, train_top=True, fine_tune=True, train_dir_name='train', valid_dir_name='validation', epochs_top=500, epochs_fine=50, batch_size_top=250, batch_size_fine=16, save_bottleneck=True, bottleneck_file='bottleneck_features', save_labels=True, labels_file='labels', fine_tune_file='fine_tune_checkpoint', top_model_file='top_model_weights', **kwargs):
" Train\n\n Train the BlendHunter network.\n\n Parameters\n ----------\n input_path : str\n Path to input data\n get_features : bool, optional\n Option to get bottleneck features, default is True\n train_top : bool, optional\n Option to train top model, default is True\n fine_tune : bool, optional\n Option to run fine tuning component of training, default is True\n train_dir_name : str, optional\n Training data directory name, default is 'train'\n valid_dir_name : str, optional\n Validation data directory name, default is 'validation'\n epochs_top : int, optional\n Number of training epochs for top model, default is 500\n epochs_fine : int, optional\n Number of training epochs for fine tuning, default is 50\n batch_size_top : int, optional\n Batch size for top model, default is 256\n batch_size_fine : int, optional\n Batch size for fine tuning, default is 16\n save_bottleneck : bool, optional\n Option to save bottleneck features, default is True\n bottleneck_file : str, optional\n File name for bottleneck features, default is\n 'bottleneck_features'\n fine_tune_file : str, optional\n Training checkpoint for the fine tuning step, default is\n 'fine_tune_checkpoint'\n\n "
start = time()
self._epochs_top = epochs_top
self._epochs_fine = epochs_fine
self._batch_size_top = batch_size_top
self._batch_size_fine = batch_size_fine
self._save_bottleneck = save_bottleneck
self._save_labels = save_labels
self._bottleneck_file = self._format(self._weights_path, bottleneck_file)
self._labels_file = self._format(self._weights_path, labels_file)
self._fine_tune_file = self._format(self._weights_path, fine_tune_file)
self._features = {'train': {}, 'valid': {}}
self._features['train']['dir'] = self._format(input_path, train_dir_name)
self._features['valid']['dir'] = self._format(input_path, valid_dir_name)
self._kwargs = kwargs
self._get_target_shape(self._format(self._features['train']['dir'], self._classes[0]))
if get_features:
self._get_features()
if train_top:
self._train_top_model()
if fine_tune:
self._fine_tune()
end = time()
print('Duration {:0.2f}s'.format((end - start))) | -3,860,889,324,644,368,000 | Train
Train the BlendHunter network.
Parameters
----------
input_path : str
Path to input data
get_features : bool, optional
Option to get bottleneck features, default is True
train_top : bool, optional
Option to train top model, default is True
fine_tune : bool, optional
Option to run fine tuning component of training, default is True
train_dir_name : str, optional
Training data directory name, default is 'train'
valid_dir_name : str, optional
Validation data directory name, default is 'validation'
epochs_top : int, optional
Number of training epochs for top model, default is 500
epochs_fine : int, optional
Number of training epochs for fine tuning, default is 50
batch_size_top : int, optional
Batch size for top model, default is 256
batch_size_fine : int, optional
Batch size for fine tuning, default is 16
save_bottleneck : bool, optional
Option to save bottleneck features, default is True
bottleneck_file : str, optional
File name for bottleneck features, default is
'bottleneck_features'
fine_tune_file : str, optional
Training checkpoint for the fine tuning step, default is
'fine_tune_checkpoint' | blendhunter/network.py | train | CosmoStat/BlendHunter | python | def train(self, input_path, get_features=True, train_top=True, fine_tune=True, train_dir_name='train', valid_dir_name='validation', epochs_top=500, epochs_fine=50, batch_size_top=250, batch_size_fine=16, save_bottleneck=True, bottleneck_file='bottleneck_features', save_labels=True, labels_file='labels', fine_tune_file='fine_tune_checkpoint', top_model_file='top_model_weights', **kwargs):
" Train\n\n Train the BlendHunter network.\n\n Parameters\n ----------\n input_path : str\n Path to input data\n get_features : bool, optional\n Option to get bottleneck features, default is True\n train_top : bool, optional\n Option to train top model, default is True\n fine_tune : bool, optional\n Option to run fine tuning component of training, default is True\n train_dir_name : str, optional\n Training data directory name, default is 'train'\n valid_dir_name : str, optional\n Validation data directory name, default is 'validation'\n epochs_top : int, optional\n Number of training epochs for top model, default is 500\n epochs_fine : int, optional\n Number of training epochs for fine tuning, default is 50\n batch_size_top : int, optional\n Batch size for top model, default is 256\n batch_size_fine : int, optional\n Batch size for fine tuning, default is 16\n save_bottleneck : bool, optional\n Option to save bottleneck features, default is True\n bottleneck_file : str, optional\n File name for bottleneck features, default is\n 'bottleneck_features'\n fine_tune_file : str, optional\n Training checkpoint for the fine tuning step, default is\n 'fine_tune_checkpoint'\n\n "
start = time()
self._epochs_top = epochs_top
self._epochs_fine = epochs_fine
self._batch_size_top = batch_size_top
self._batch_size_fine = batch_size_fine
self._save_bottleneck = save_bottleneck
self._save_labels = save_labels
self._bottleneck_file = self._format(self._weights_path, bottleneck_file)
self._labels_file = self._format(self._weights_path, labels_file)
self._fine_tune_file = self._format(self._weights_path, fine_tune_file)
self._features = {'train': {}, 'valid': {}}
self._features['train']['dir'] = self._format(input_path, train_dir_name)
self._features['valid']['dir'] = self._format(input_path, valid_dir_name)
self._kwargs = kwargs
self._get_target_shape(self._format(self._features['train']['dir'], self._classes[0]))
if get_features:
self._get_features()
if train_top:
self._train_top_model()
if fine_tune:
self._fine_tune()
end = time()
print('Duration {:0.2f}s'.format((end - start))) |
def predict(self, input_path=None, input_path_keras=None, input_data=None, weights_type='fine'):
" Predict\n\n Predict classes for test data\n\n Parameters\n ----------\n input_path : str\n Path to input data\n input_path_keras : str\n Path to input data in Keras format, i.e. path to directory one\n level above where the data is stored\n input_data : np.ndarray\n Array of input images\n weights_type : str, optional {'fine', 'top'}\n Type of weights to use for predition, default is 'fine'\n\n Returns\n -------\n dict\n Dictionary of file names and corresponding classes\n\n "
if input_path:
test_path = '/'.join(input_path.split('/')[:(- 1)])
elif input_path_keras:
test_path = input_path_keras
else:
test_path = None
if (weights_type not in ('fine', 'top')):
raise ValueError('Invalid value for weights_type. Options are "fine" or "top"')
if test_path:
self._get_target_shape(self._format(test_path, os.listdir(test_path)[0]))
if (weights_type == 'fine'):
model = self._build_final_model(load_final_weights=True)
elif (weights_type == 'top'):
model = self._build_final_model(load_top_weights=True)
test_gen = self._load_generator(test_path, class_mode='categorical', batch_size=1)
self.filenames = test_gen.filenames
test_gen.reset()
res = model.predict_generator(test_gen, verbose=self._verbose, steps=test_gen.steps).flatten()
elif (not isinstance(input_data, type(None))):
self._image_shape = input_data.shape[1:]
self._get_target_shape()
model = self._build_final_model(load_final_weights=True)
res = model.predict(input_data, verbose=self._verbose).flatten()
else:
raise RuntimeError('No input data provided.')
labels = {0: self._classes[0], 1: self._classes[1]}
preds = [labels[k] for k in np.around(res)]
return preds | -3,499,259,897,700,017,000 | Predict
Predict classes for test data
Parameters
----------
input_path : str
Path to input data
input_path_keras : str
Path to input data in Keras format, i.e. path to directory one
level above where the data is stored
input_data : np.ndarray
Array of input images
weights_type : str, optional {'fine', 'top'}
Type of weights to use for predition, default is 'fine'
Returns
-------
dict
Dictionary of file names and corresponding classes | blendhunter/network.py | predict | CosmoStat/BlendHunter | python | def predict(self, input_path=None, input_path_keras=None, input_data=None, weights_type='fine'):
" Predict\n\n Predict classes for test data\n\n Parameters\n ----------\n input_path : str\n Path to input data\n input_path_keras : str\n Path to input data in Keras format, i.e. path to directory one\n level above where the data is stored\n input_data : np.ndarray\n Array of input images\n weights_type : str, optional {'fine', 'top'}\n Type of weights to use for predition, default is 'fine'\n\n Returns\n -------\n dict\n Dictionary of file names and corresponding classes\n\n "
if input_path:
test_path = '/'.join(input_path.split('/')[:(- 1)])
elif input_path_keras:
test_path = input_path_keras
else:
test_path = None
if (weights_type not in ('fine', 'top')):
raise ValueError('Invalid value for weights_type. Options are "fine" or "top"')
if test_path:
self._get_target_shape(self._format(test_path, os.listdir(test_path)[0]))
if (weights_type == 'fine'):
model = self._build_final_model(load_final_weights=True)
elif (weights_type == 'top'):
model = self._build_final_model(load_top_weights=True)
test_gen = self._load_generator(test_path, class_mode='categorical', batch_size=1)
self.filenames = test_gen.filenames
test_gen.reset()
res = model.predict_generator(test_gen, verbose=self._verbose, steps=test_gen.steps).flatten()
elif (not isinstance(input_data, type(None))):
self._image_shape = input_data.shape[1:]
self._get_target_shape()
model = self._build_final_model(load_final_weights=True)
res = model.predict(input_data, verbose=self._verbose).flatten()
else:
raise RuntimeError('No input data provided.')
labels = {0: self._classes[0], 1: self._classes[1]}
preds = [labels[k] for k in np.around(res)]
return preds |
def __init_subclass__(cls, **kwargs):
'\n An __init_subclass__ hook initializes all of the subclasses of a given class.\n So for each subclass, it will call this block of code on import.\n This replicates some metaclass magic without the need to be aware of metaclasses.\n Here we use this to register each subclass in a dict that has the `is_datasource_for`\n attribute. This is then passed into the TimeSeries Factory so we can register them.\n '
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for | 8,528,098,994,569,175,000 | An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them. | sunpy/timeseries/timeseriesbase.py | __init_subclass__ | yashrsharma44/sunpy | python | def __init_subclass__(cls, **kwargs):
'\n An __init_subclass__ hook initializes all of the subclasses of a given class.\n So for each subclass, it will call this block of code on import.\n This replicates some metaclass magic without the need to be aware of metaclasses.\n Here we use this to register each subclass in a dict that has the `is_datasource_for`\n attribute. This is then passed into the TimeSeries Factory so we can register them.\n '
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for |
@property
def source(self):
'\n A string/object used to specify the source class of the TimeSeries.\n '
return self._source | -5,948,268,673,498,676,000 | A string/object used to specify the source class of the TimeSeries. | sunpy/timeseries/timeseriesbase.py | source | yashrsharma44/sunpy | python | @property
def source(self):
'\n \n '
return self._source |
@property
def columns(self):
'A list of all the names of the columns in the data.'
return list(self.data.columns.values) | -1,494,880,558,403,497,500 | A list of all the names of the columns in the data. | sunpy/timeseries/timeseriesbase.py | columns | yashrsharma44/sunpy | python | @property
def columns(self):
return list(self.data.columns.values) |
@property
def index(self):
'The time index of the data.'
return self.data.index | -2,782,964,505,124,396,000 | The time index of the data. | sunpy/timeseries/timeseriesbase.py | index | yashrsharma44/sunpy | python | @property
def index(self):
return self.data.index |
@property
def time_range(self):
'\n The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`\n object\n '
if (len(self.data) > 0):
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None | 1,425,110,208,352,478,700 | The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object | sunpy/timeseries/timeseriesbase.py | time_range | yashrsharma44/sunpy | python | @property
def time_range(self):
'\n The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`\n object\n '
if (len(self.data) > 0):
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None |
def quantity(self, colname, **kwargs):
'\n Return a `~astropy.units.quantity.Quantity` for the given column.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want output.\n\n Returns\n -------\n quantity : `~astropy.units.quantity.Quantity`\n '
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit) | -6,461,061,768,238,513,000 | Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity` | sunpy/timeseries/timeseriesbase.py | quantity | yashrsharma44/sunpy | python | def quantity(self, colname, **kwargs):
'\n Return a `~astropy.units.quantity.Quantity` for the given column.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want output.\n\n Returns\n -------\n quantity : `~astropy.units.quantity.Quantity`\n '
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit) |
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
'\n Return an new TimeSeries with the given column added or updated.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want output.\n\n quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`\n The values to be placed within the column.\n If updating values only then a numpy array is permitted.\n\n overwrite : `bool`, optional, default:True\n Set to true to allow the method to overwrite a column already present\n in the TimeSeries.\n\n Returns\n -------\n newts : TimeSeries\n\n '
if ((not unit) and isinstance(quantity, astropy.units.quantity.Quantity)):
unit = quantity.unit
elif (not unit):
unit = u.dimensionless_unscaled
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
if (not (colname in self.data.columns)):
units[colname] = unit
values = quantity
if (isinstance(values, astropy.units.quantity.Quantity) and overwrite):
values = values.to(units[colname]).value
if ((not (colname in self.data.columns)) or overwrite):
data[colname] = values
return self.__class__(data, meta, units) | 7,198,475,854,216,336,000 | Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries | sunpy/timeseries/timeseriesbase.py | add_column | yashrsharma44/sunpy | python | def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
'\n Return an new TimeSeries with the given column added or updated.\n\n Parameters\n ----------\n colname : `str`\n The heading of the column you want output.\n\n quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`\n The values to be placed within the column.\n If updating values only then a numpy array is permitted.\n\n overwrite : `bool`, optional, default:True\n Set to true to allow the method to overwrite a column already present\n in the TimeSeries.\n\n Returns\n -------\n newts : TimeSeries\n\n '
if ((not unit) and isinstance(quantity, astropy.units.quantity.Quantity)):
unit = quantity.unit
elif (not unit):
unit = u.dimensionless_unscaled
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
if (not (colname in self.data.columns)):
units[colname] = unit
values = quantity
if (isinstance(values, astropy.units.quantity.Quantity) and overwrite):
values = values.to(units[colname]).value
if ((not (colname in self.data.columns)) or overwrite):
data[colname] = values
return self.__class__(data, meta, units) |
def sort_index(self, **kwargs):
"Returns a sorted version of the TimeSeries object.\n Generally this shouldn't be necessary as most TimeSeries operations sort\n the data anyway to ensure consistent behaviour when truncating.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series in ascending chronological order.\n "
return GenericTimeSeries(self.data.sort_index(**kwargs), TimeSeriesMetaData(copy.copy(self.meta.metadata)), copy.copy(self.units)) | -3,113,045,102,198,193,000 | Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order. | sunpy/timeseries/timeseriesbase.py | sort_index | yashrsharma44/sunpy | python | def sort_index(self, **kwargs):
"Returns a sorted version of the TimeSeries object.\n Generally this shouldn't be necessary as most TimeSeries operations sort\n the data anyway to ensure consistent behaviour when truncating.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series in ascending chronological order.\n "
return GenericTimeSeries(self.data.sort_index(**kwargs), TimeSeriesMetaData(copy.copy(self.meta.metadata)), copy.copy(self.units)) |
def truncate(self, a, b=None, int=None):
'Returns a truncated version of the TimeSeries object.\n\n Parameters\n ----------\n a : `sunpy.time.TimeRange`, `str` or `int`\n Either a time range to truncate to, or a start time in some format\n recognised by pandas, or a index integer.\n\n b : `str` or `int`\n If specified, the end time of the time range in some format\n recognised by pandas, or a index integer.\n\n int : `int`\n If specified, the integer indicating the slicing intervals.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series with only the selected times.\n '
if (isinstance(a, str) and isinstance(b, str)):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
start = a.start.datetime
end = a.end.datetime
else:
start = a
end = b
truncated_data = self.data.sort_index()[start:end:int]
truncated_meta = TimeSeriesMetaData([])
if (len(truncated_data) > 0):
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object | -1,181,497,150,398,642,000 | Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times. | sunpy/timeseries/timeseriesbase.py | truncate | yashrsharma44/sunpy | python | def truncate(self, a, b=None, int=None):
'Returns a truncated version of the TimeSeries object.\n\n Parameters\n ----------\n a : `sunpy.time.TimeRange`, `str` or `int`\n Either a time range to truncate to, or a start time in some format\n recognised by pandas, or a index integer.\n\n b : `str` or `int`\n If specified, the end time of the time range in some format\n recognised by pandas, or a index integer.\n\n int : `int`\n If specified, the integer indicating the slicing intervals.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series with only the selected times.\n '
if (isinstance(a, str) and isinstance(b, str)):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
start = a.start.datetime
end = a.end.datetime
else:
start = a
end = b
truncated_data = self.data.sort_index()[start:end:int]
truncated_meta = TimeSeriesMetaData([])
if (len(truncated_data) > 0):
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object |
def extract(self, column_name):
'Returns a new time series with the chosen column.\n\n Parameters\n ----------\n column_name : `str`\n A valid column name.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series with only the selected column.\n '
'\n # TODO allow the extract function to pick more than one column\n if isinstance(self, pandas.Series):\n return self\n else:\n return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))\n '
data = self.data[[column_name]].dropna()
object = GenericTimeSeries(data.sort_index(), TimeSeriesMetaData(copy.copy(self.meta.metadata)), copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object | -4,715,312,840,530,796,000 | Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column. | sunpy/timeseries/timeseriesbase.py | extract | yashrsharma44/sunpy | python | def extract(self, column_name):
'Returns a new time series with the chosen column.\n\n Parameters\n ----------\n column_name : `str`\n A valid column name.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series with only the selected column.\n '
'\n # TODO allow the extract function to pick more than one column\n if isinstance(self, pandas.Series):\n return self\n else:\n return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))\n '
data = self.data[[column_name]].dropna()
object = GenericTimeSeries(data.sort_index(), TimeSeriesMetaData(copy.copy(self.meta.metadata)), copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object |
def concatenate(self, otherts, **kwargs):
'Concatenate with another TimeSeries. This function will check and\n remove any duplicate times. It will keep the column values from the\n original time series to which the new time series is being added.\n\n Parameters\n ----------\n otherts : `~sunpy.timeseries.TimeSeries`\n Another time series.\n\n same_source : `bool` Optional\n Set to true to check if the sources of the time series match.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series.\n\n Debate: decide if we want to be able to concatenate multiple time series\n at once.\n '
if (self == otherts):
return self
same_source = kwargs.get('same_source', False)
if (same_source and (not isinstance(otherts, self.__class__))):
raise TypeError('TimeSeries classes must match if specified.')
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
if (self.__class__ == otherts.__class__):
object = self.__class__(data.sort_index(), meta, units)
else:
object = GenericTimeSeries(data.sort_index(), meta, units)
object._sanitize_metadata()
object._sanitize_units()
return object | 7,234,888,421,910,104,000 | Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once. | sunpy/timeseries/timeseriesbase.py | concatenate | yashrsharma44/sunpy | python | def concatenate(self, otherts, **kwargs):
'Concatenate with another TimeSeries. This function will check and\n remove any duplicate times. It will keep the column values from the\n original time series to which the new time series is being added.\n\n Parameters\n ----------\n otherts : `~sunpy.timeseries.TimeSeries`\n Another time series.\n\n same_source : `bool` Optional\n Set to true to check if the sources of the time series match.\n\n Returns\n -------\n newts : `~sunpy.timeseries.TimeSeries`\n A new time series.\n\n Debate: decide if we want to be able to concatenate multiple time series\n at once.\n '
if (self == otherts):
return self
same_source = kwargs.get('same_source', False)
if (same_source and (not isinstance(otherts, self.__class__))):
raise TypeError('TimeSeries classes must match if specified.')
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
if (self.__class__ == otherts.__class__):
object = self.__class__(data.sort_index(), meta, units)
else:
object = GenericTimeSeries(data.sort_index(), meta, units)
object._sanitize_metadata()
object._sanitize_units()
return object |
def plot(self, axes=None, **plot_args):
'Plot a plot of the time series\n\n Parameters\n ----------\n axes : `~matplotlib.axes.Axes` or None\n If provided the image will be plotted on the given axes. Otherwise\n the current axes will be used.\n\n **plot_args : `dict`\n Any additional plot arguments that should be used\n when plotting.\n\n Returns\n -------\n axes : `~matplotlib.axes.Axes`\n The plot axes.\n '
if (axes is None):
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes | 3,276,638,724,602,902,000 | Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes. | sunpy/timeseries/timeseriesbase.py | plot | yashrsharma44/sunpy | python | def plot(self, axes=None, **plot_args):
'Plot a plot of the time series\n\n Parameters\n ----------\n axes : `~matplotlib.axes.Axes` or None\n If provided the image will be plotted on the given axes. Otherwise\n the current axes will be used.\n\n **plot_args : `dict`\n Any additional plot arguments that should be used\n when plotting.\n\n Returns\n -------\n axes : `~matplotlib.axes.Axes`\n The plot axes.\n '
if (axes is None):
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes |
def peek(self, **kwargs):
'Displays the time series in a new figure.\n\n Parameters\n ----------\n **kwargs : `dict`\n Any additional plot arguments that should be used when plotting.\n '
self._validate_data_for_ploting()
figure = plt.figure()
self.plot(**kwargs)
figure.show() | -7,159,612,167,630,332,000 | Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting. | sunpy/timeseries/timeseriesbase.py | peek | yashrsharma44/sunpy | python | def peek(self, **kwargs):
'Displays the time series in a new figure.\n\n Parameters\n ----------\n **kwargs : `dict`\n Any additional plot arguments that should be used when plotting.\n '
self._validate_data_for_ploting()
figure = plt.figure()
self.plot(**kwargs)
figure.show() |
def _validate_data_for_ploting(self):
'Raises an exception if the timeseries is invalid for plotting.\n To be added into all the peek methods in all source sup-classes.\n Currently only checks if we have an empty timeseries, where:\n len(self.data) == 0\n\n '
if (len(self.data) == 0):
raise ValueError("The timeseries can't be plotted as it has no data present. (len(self.data) == 0)") | 4,499,807,279,462,531,600 | Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0 | sunpy/timeseries/timeseriesbase.py | _validate_data_for_ploting | yashrsharma44/sunpy | python | def _validate_data_for_ploting(self):
'Raises an exception if the timeseries is invalid for plotting.\n To be added into all the peek methods in all source sup-classes.\n Currently only checks if we have an empty timeseries, where:\n len(self.data) == 0\n\n '
if (len(self.data) == 0):
raise ValueError("The timeseries can't be plotted as it has no data present. (len(self.data) == 0)") |
def _validate_meta(self):
'\n Validates the meta-information associated with a TimeSeries.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-specific\n validation should be handled in the relevant file in the\n sunpy.timeseries.sources package.\n\n Allows for default unit assignment for:\n COL_UNITS\n\n '
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and (u.Unit(self.meta.get(meta_property), parse_strict='silent').physical_type == 'unknown')):
warnings.warn(f'Unknown value for {meta_property.upper()}.', SunpyUserWarning) | 7,407,329,041,437,525,000 | Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS | sunpy/timeseries/timeseriesbase.py | _validate_meta | yashrsharma44/sunpy | python | def _validate_meta(self):
'\n Validates the meta-information associated with a TimeSeries.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-specific\n validation should be handled in the relevant file in the\n sunpy.timeseries.sources package.\n\n Allows for default unit assignment for:\n COL_UNITS\n\n '
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and (u.Unit(self.meta.get(meta_property), parse_strict='silent').physical_type == 'unknown')):
warnings.warn(f'Unknown value for {meta_property.upper()}.', SunpyUserWarning) |
def _validate_units(self, units, **kwargs):
'\n Validates the astropy unit-information associated with a TimeSeries.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-specific\n validation should be handled in the relevant file in the\n sunpy.timeseries.sources package.\n\n Allows for default unit assignment for:\n COL_UNITS\n\n '
warnings.simplefilter('always', Warning)
result = True
for key in units:
if (not isinstance(units[key], astropy.units.UnitBase)):
result = False
warnings.warn(f'Invalid unit given for {key}.', SunpyUserWarning)
return result | -8,785,898,291,672,626,000 | Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS | sunpy/timeseries/timeseriesbase.py | _validate_units | yashrsharma44/sunpy | python | def _validate_units(self, units, **kwargs):
'\n Validates the astropy unit-information associated with a TimeSeries.\n\n This method includes very basic validation checks which apply to\n all of the kinds of files that SunPy can read. Datasource-specific\n validation should be handled in the relevant file in the\n sunpy.timeseries.sources package.\n\n Allows for default unit assignment for:\n COL_UNITS\n\n '
warnings.simplefilter('always', Warning)
result = True
for key in units:
if (not isinstance(units[key], astropy.units.UnitBase)):
result = False
warnings.warn(f'Invalid unit given for {key}.', SunpyUserWarning)
return result |
def _sanitize_units(self, **kwargs):
"\n Sanitises the collections.OrderedDict used to store the units.\n Primarily this method will:\n\n Remove entries that don't match up to a column,\n Add unitless entries for columns with no units defined.\n Re-arrange the order of the dictionary to match the columns.\n "
warnings.simplefilter('always', Warning)
for column in (set(self.data.columns.tolist()) - set(self.units.keys())):
self.units[column] = u.dimensionless_unscaled
warnings.warn(f'Unknown units for {column}.', SunpyUserWarning)
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column: self.units[column]})
self.units = units | -7,829,692,574,669,847,000 | Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns. | sunpy/timeseries/timeseriesbase.py | _sanitize_units | yashrsharma44/sunpy | python | def _sanitize_units(self, **kwargs):
"\n Sanitises the collections.OrderedDict used to store the units.\n Primarily this method will:\n\n Remove entries that don't match up to a column,\n Add unitless entries for columns with no units defined.\n Re-arrange the order of the dictionary to match the columns.\n "
warnings.simplefilter('always', Warning)
for column in (set(self.data.columns.tolist()) - set(self.units.keys())):
self.units[column] = u.dimensionless_unscaled
warnings.warn(f'Unknown units for {column}.', SunpyUserWarning)
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column: self.units[column]})
self.units = units |
def _sanitize_metadata(self, **kwargs):
"\n Sanitises the TimeSeriesMetaData object used to store the metadata.\n Primarily this method will:\n\n Remove entries outside of the datas TimeRange or truncate TimeRanges\n if the metadata overflows past the data,\n Remove column references in the metadata that don't match to a column\n in the data.\n Remove metadata entries that have no columns matching the data.\n "
warnings.simplefilter('always', Warning)
self.meta._truncate(self.time_range)
redundant_cols = list((set(self.meta.columns) - set(self.columns)))
self.meta._remove_columns(redundant_cols) | 908,350,509,889,361,200 | Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data. | sunpy/timeseries/timeseriesbase.py | _sanitize_metadata | yashrsharma44/sunpy | python | def _sanitize_metadata(self, **kwargs):
"\n Sanitises the TimeSeriesMetaData object used to store the metadata.\n Primarily this method will:\n\n Remove entries outside of the datas TimeRange or truncate TimeRanges\n if the metadata overflows past the data,\n Remove column references in the metadata that don't match to a column\n in the data.\n Remove metadata entries that have no columns matching the data.\n "
warnings.simplefilter('always', Warning)
self.meta._truncate(self.time_range)
redundant_cols = list((set(self.meta.columns) - set(self.columns)))
self.meta._remove_columns(redundant_cols) |
def to_table(self, **kwargs):
'\n Return an Astropy Table of the give TimeSeries object.\n\n Returns\n -------\n newtable : `~astrpy.table`\n A new astropy table containing the data from the time series.\n The table will include units where relevant.\n '
table = Table.from_pandas(self.data)
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
for key in self.units:
table[key].unit = self.units[key]
return table | 3,324,495,531,496,266,000 | Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant. | sunpy/timeseries/timeseriesbase.py | to_table | yashrsharma44/sunpy | python | def to_table(self, **kwargs):
'\n Return an Astropy Table of the give TimeSeries object.\n\n Returns\n -------\n newtable : `~astrpy.table`\n A new astropy table containing the data from the time series.\n The table will include units where relevant.\n '
table = Table.from_pandas(self.data)
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
for key in self.units:
table[key].unit = self.units[key]
return table |
def to_dataframe(self, **kwargs):
'\n Return a Pandas DataFrame of the give TimeSeries object.\n\n Returns\n -------\n newdf : `~pandas.core.frame.DataFrame`\n A Pandas Dataframe containing the data.\n '
return self.data | -745,279,484,466,440,700 | Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data. | sunpy/timeseries/timeseriesbase.py | to_dataframe | yashrsharma44/sunpy | python | def to_dataframe(self, **kwargs):
'\n Return a Pandas DataFrame of the give TimeSeries object.\n\n Returns\n -------\n newdf : `~pandas.core.frame.DataFrame`\n A Pandas Dataframe containing the data.\n '
return self.data |
def to_array(self, columns=None):
'\n Return a numpy array of the give TimeSeries object.\n\n Parameters\n ----------\n columns: `list`, optional, default:None\n If None, return all columns minus the index, otherwise, returns\n specified columns.\n\n Returns\n -------\n values : `~numpy.ndarray`\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n '
if columns:
return self.data.values[columns]
else:
return self.data.values | -7,989,567,506,834,727,000 | Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes. | sunpy/timeseries/timeseriesbase.py | to_array | yashrsharma44/sunpy | python | def to_array(self, columns=None):
'\n Return a numpy array of the give TimeSeries object.\n\n Parameters\n ----------\n columns: `list`, optional, default:None\n If None, return all columns minus the index, otherwise, returns\n specified columns.\n\n Returns\n -------\n values : `~numpy.ndarray`\n If the caller is heterogeneous and contains booleans or objects,\n the result will be of dtype=object. See Notes.\n '
if columns:
return self.data.values[columns]
else:
return self.data.values |
def __eq__(self, other):
'\n Check two TimeSeries objects are the same, they have matching type, data,\n metadata and units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.GenericTimeSeries`\n The second TimeSeries object to compare with.\n\n Returns\n -------\n result : `bool`\n '
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or (self.meta != other.meta) or (self.units != other.units)):
match = False
else:
match = False
return match | -553,668,783,808,033,660 | Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool` | sunpy/timeseries/timeseriesbase.py | __eq__ | yashrsharma44/sunpy | python | def __eq__(self, other):
'\n Check two TimeSeries objects are the same, they have matching type, data,\n metadata and units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.GenericTimeSeries`\n The second TimeSeries object to compare with.\n\n Returns\n -------\n result : `bool`\n '
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or (self.meta != other.meta) or (self.units != other.units)):
match = False
else:
match = False
return match |
def __ne__(self, other):
"\n Check two TimeSeries objects are not the same, they don't have matching\n type, data, metadata and/or units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.GenericTimeSeries`\n The second TimeSeries object to compare with.\n\n Returns\n -------\n result : `bool`\n "
return (not (self == other)) | 6,942,846,282,346,546,000 | Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool` | sunpy/timeseries/timeseriesbase.py | __ne__ | yashrsharma44/sunpy | python | def __ne__(self, other):
"\n Check two TimeSeries objects are not the same, they don't have matching\n type, data, metadata and/or units entries.\n\n Parameters\n ----------\n other : `~sunpy.timeseries.GenericTimeSeries`\n The second TimeSeries object to compare with.\n\n Returns\n -------\n result : `bool`\n "
return (not (self == other)) |
@classmethod
def _parse_file(cls, filepath):
'Parses a file - to be implemented in any subclass that may use files'
return NotImplemented | 6,994,217,759,058,817,000 | Parses a file - to be implemented in any subclass that may use files | sunpy/timeseries/timeseriesbase.py | _parse_file | yashrsharma44/sunpy | python | @classmethod
def _parse_file(cls, filepath):
return NotImplemented |
def set_total(self, sum_value):
'This is an example of how a subclass would implement a direct setter.\n\n Args:\n sum_value: The total to set.\n '
self.sum.assign(sum_value) | 8,071,151,840,042,293,000 | This is an example of how a subclass would implement a direct setter.
Args:
sum_value: The total to set. | keras/engine/base_preprocessing_layer_test.py | set_total | 01-vyom/keras | python | def set_total(self, sum_value):
'This is an example of how a subclass would implement a direct setter.\n\n Args:\n sum_value: The total to set.\n '
self.sum.assign(sum_value) |
def test_adapt_bad_input_fails(self):
'Test that non-Dataset/Numpy inputs cause a reasonable error.'
input_dataset = {'foo': 0}
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, 'Failed to find data adapter'):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError, 'requires a'):
layer.adapt(input_dataset) | 504,718,727,793,669,800 | Test that non-Dataset/Numpy inputs cause a reasonable error. | keras/engine/base_preprocessing_layer_test.py | test_adapt_bad_input_fails | 01-vyom/keras | python | def test_adapt_bad_input_fails(self):
input_dataset = {'foo': 0}
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, 'Failed to find data adapter'):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError, 'requires a'):
layer.adapt(input_dataset) |
def test_adapt_infinite_dataset_fails(self):
'Test that preproc layers fail if an infinite dataset is passed.'
input_dataset = tf.data.Dataset.from_tensor_slices(np.array([[1], [2], [3], [4], [5], [0]])).repeat()
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, 'infinite dataset'):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError, '.*infinite number of elements.*'):
layer.adapt(input_dataset) | 1,876,904,811,739,438,600 | Test that preproc layers fail if an infinite dataset is passed. | keras/engine/base_preprocessing_layer_test.py | test_adapt_infinite_dataset_fails | 01-vyom/keras | python | def test_adapt_infinite_dataset_fails(self):
input_dataset = tf.data.Dataset.from_tensor_slices(np.array([[1], [2], [3], [4], [5], [0]])).repeat()
layer = AddingPreprocessingLayer()
if tf.executing_eagerly():
with self.assertRaisesRegex(ValueError, 'infinite dataset'):
layer.adapt(input_dataset)
else:
with self.assertRaisesRegex(ValueError, '.*infinite number of elements.*'):
layer.adapt(input_dataset) |
def test_setter_update(self):
'Test the prototyped setter method.'
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.set_total(15)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) | 6,372,803,395,745,935,000 | Test the prototyped setter method. | keras/engine/base_preprocessing_layer_test.py | test_setter_update | 01-vyom/keras | python | def test_setter_update(self):
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.set_total(15)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) |
def test_pre_build_adapt_update_numpy(self):
'Test that preproc layers can adapt() before build() is called.'
input_dataset = np.array([1, 2, 3, 4, 5])
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) | 8,383,667,575,545,763,000 | Test that preproc layers can adapt() before build() is called. | keras/engine/base_preprocessing_layer_test.py | test_pre_build_adapt_update_numpy | 01-vyom/keras | python | def test_pre_build_adapt_update_numpy(self):
input_dataset = np.array([1, 2, 3, 4, 5])
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) |
def test_post_build_adapt_update_numpy(self):
'Test that preproc layers can adapt() after build() is called.'
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) | -5,922,545,801,141,978,000 | Test that preproc layers can adapt() after build() is called. | keras/engine/base_preprocessing_layer_test.py | test_post_build_adapt_update_numpy | 01-vyom/keras | python | def test_post_build_adapt_update_numpy(self):
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) |
def test_pre_build_adapt_update_dataset(self):
'Test that preproc layers can adapt() before build() is called.'
input_dataset = tf.data.Dataset.from_tensor_slices(np.array([[1], [2], [3], [4], [5], [0]]))
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) | 1,788,629,159,740,084,700 | Test that preproc layers can adapt() before build() is called. | keras/engine/base_preprocessing_layer_test.py | test_pre_build_adapt_update_dataset | 01-vyom/keras | python | def test_pre_build_adapt_update_dataset(self):
input_dataset = tf.data.Dataset.from_tensor_slices(np.array([[1], [2], [3], [4], [5], [0]]))
layer = AddingPreprocessingLayer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) |
def test_post_build_adapt_update_dataset(self):
'Test that preproc layers can adapt() after build() is called.'
input_dataset = tf.data.Dataset.from_tensor_slices(np.array([[1], [2], [3], [4], [5], [0]]))
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) | -1,618,166,909,450,582,300 | Test that preproc layers can adapt() after build() is called. | keras/engine/base_preprocessing_layer_test.py | test_post_build_adapt_update_dataset | 01-vyom/keras | python | def test_post_build_adapt_update_dataset(self):
input_dataset = tf.data.Dataset.from_tensor_slices(np.array([[1], [2], [3], [4], [5], [0]]))
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0])) |
def test_weight_based_state_transfer(self):
'Test that preproc layers can transfer state via get/set weights..'
def get_model():
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
(model, layer) = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
weights = model.get_weights()
(model_2, _) = get_model()
self.assertAllEqual([[1], [2], [3]], model_2.predict([1.0, 2.0, 3.0]))
model_2.set_weights(weights)
self.assertAllEqual([[16], [17], [18]], model_2.predict([1.0, 2.0, 3.0])) | 3,336,310,296,255,985,000 | Test that preproc layers can transfer state via get/set weights.. | keras/engine/base_preprocessing_layer_test.py | test_weight_based_state_transfer | 01-vyom/keras | python | def test_weight_based_state_transfer(self):
def get_model():
input_data = keras.Input(shape=(1,))
layer = AddingPreprocessingLayer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
(model, layer) = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
weights = model.get_weights()
(model_2, _) = get_model()
self.assertAllEqual([[1], [2], [3]], model_2.predict([1.0, 2.0, 3.0]))
model_2.set_weights(weights)
self.assertAllEqual([[16], [17], [18]], model_2.predict([1.0, 2.0, 3.0])) |
def test_adapt_sets_input_shape_rank(self):
"Check that `.adapt()` sets the `input_shape`'s rank."
adapt_dataset = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32)
layer = AddingPreprocessingLayer()
layer.adapt(adapt_dataset)
input_dataset = np.array([[[1.0, 2.0], [3.0, 4.0]], [[3.0, 4.0], [5.0, 6.0]]], dtype=np.float32)
layer(input_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, None, None)) | 452,410,190,368,058,430 | Check that `.adapt()` sets the `input_shape`'s rank. | keras/engine/base_preprocessing_layer_test.py | test_adapt_sets_input_shape_rank | 01-vyom/keras | python | def test_adapt_sets_input_shape_rank(self):
adapt_dataset = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32)
layer = AddingPreprocessingLayer()
layer.adapt(adapt_dataset)
input_dataset = np.array([[[1.0, 2.0], [3.0, 4.0]], [[3.0, 4.0], [5.0, 6.0]]], dtype=np.float32)
layer(input_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, None, None)) |
def test_adapt_doesnt_overwrite_input_shape(self):
"Check that `.adapt()` doesn't change the `input_shape`."
adapt_dataset = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32)
layer = AddingPreprocessingLayer(input_shape=[1, 2])
layer.adapt(adapt_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, 1, 2)) | 8,446,419,764,574,836,000 | Check that `.adapt()` doesn't change the `input_shape`. | keras/engine/base_preprocessing_layer_test.py | test_adapt_doesnt_overwrite_input_shape | 01-vyom/keras | python | def test_adapt_doesnt_overwrite_input_shape(self):
adapt_dataset = np.array([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]], dtype=np.float32)
layer = AddingPreprocessingLayer(input_shape=[1, 2])
layer.adapt(adapt_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, 1, 2)) |
def test_adapt_fails(self):
'Test that calling adapt leads to a runtime error.'
input_dataset = {'foo': 0}
with tf.Graph().as_default():
layer = AddingPreprocessingLayer()
with self.assertRaisesRegex(RuntimeError, '`adapt` is only supported in tensorflow v2'):
layer.adapt(input_dataset) | -7,956,983,366,574,396,000 | Test that calling adapt leads to a runtime error. | keras/engine/base_preprocessing_layer_test.py | test_adapt_fails | 01-vyom/keras | python | def test_adapt_fails(self):
input_dataset = {'foo': 0}
with tf.Graph().as_default():
layer = AddingPreprocessingLayer()
with self.assertRaisesRegex(RuntimeError, '`adapt` is only supported in tensorflow v2'):
layer.adapt(input_dataset) |
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> dict[(str, dict[(str, str)])]:
'Get the entity sources.'
return hass.data.get(DATA_ENTITY_SOURCE, {}) | -6,891,118,196,048,396,000 | Get the entity sources. | homeassistant/helpers/entity.py | entity_sources | algra4/core | python | @callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> dict[(str, dict[(str, str)])]:
return hass.data.get(DATA_ENTITY_SOURCE, {}) |
def generate_entity_id(entity_id_format: str, name: (str | None), current_ids: (list[str] | None)=None, hass: (HomeAssistant | None)=None) -> str:
'Generate a unique entity ID based on given entity IDs or used IDs.'
return async_generate_entity_id(entity_id_format, name, current_ids, hass) | -3,719,838,031,144,718,000 | Generate a unique entity ID based on given entity IDs or used IDs. | homeassistant/helpers/entity.py | generate_entity_id | algra4/core | python | def generate_entity_id(entity_id_format: str, name: (str | None), current_ids: (list[str] | None)=None, hass: (HomeAssistant | None)=None) -> str:
return async_generate_entity_id(entity_id_format, name, current_ids, hass) |
@callback
def async_generate_entity_id(entity_id_format: str, name: (str | None), current_ids: (Iterable[str] | None)=None, hass: (HomeAssistant | None)=None) -> str:
'Generate a unique entity ID based on given entity IDs or used IDs.'
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if (current_ids is not None):
return ensure_unique_string(preferred_string, current_ids)
if (hass is None):
raise ValueError('Missing required parameter current_ids or hass')
test_string = preferred_string
tries = 1
while (not hass.states.async_available(test_string)):
tries += 1
test_string = f'{preferred_string}_{tries}'
return test_string | -7,373,176,610,640,175,000 | Generate a unique entity ID based on given entity IDs or used IDs. | homeassistant/helpers/entity.py | async_generate_entity_id | algra4/core | python | @callback
def async_generate_entity_id(entity_id_format: str, name: (str | None), current_ids: (Iterable[str] | None)=None, hass: (HomeAssistant | None)=None) -> str:
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if (current_ids is not None):
return ensure_unique_string(preferred_string, current_ids)
if (hass is None):
raise ValueError('Missing required parameter current_ids or hass')
test_string = preferred_string
tries = 1
while (not hass.states.async_available(test_string)):
tries += 1
test_string = f'{preferred_string}_{tries}'
return test_string |
def get_capability(hass: HomeAssistant, entity_id: str, capability: str) -> (Any | None):
'Get a capability attribute of an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(capability)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return (entry.capabilities.get(capability) if entry.capabilities else None) | -1,369,188,086,554,530,000 | Get a capability attribute of an entity.
First try the statemachine, then entity registry. | homeassistant/helpers/entity.py | get_capability | algra4/core | python | def get_capability(hass: HomeAssistant, entity_id: str, capability: str) -> (Any | None):
'Get a capability attribute of an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(capability)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return (entry.capabilities.get(capability) if entry.capabilities else None) |
def get_device_class(hass: HomeAssistant, entity_id: str) -> (str | None):
'Get device class of an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(ATTR_DEVICE_CLASS)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return (entry.device_class or entry.original_device_class) | 6,866,142,974,874,640,000 | Get device class of an entity.
First try the statemachine, then entity registry. | homeassistant/helpers/entity.py | get_device_class | algra4/core | python | def get_device_class(hass: HomeAssistant, entity_id: str) -> (str | None):
'Get device class of an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(ATTR_DEVICE_CLASS)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return (entry.device_class or entry.original_device_class) |
def get_supported_features(hass: HomeAssistant, entity_id: str) -> int:
'Get supported features for an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return (entry.supported_features or 0) | -2,023,362,472,376,857,000 | Get supported features for an entity.
First try the statemachine, then entity registry. | homeassistant/helpers/entity.py | get_supported_features | algra4/core | python | def get_supported_features(hass: HomeAssistant, entity_id: str) -> int:
'Get supported features for an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return (entry.supported_features or 0) |
def get_unit_of_measurement(hass: HomeAssistant, entity_id: str) -> (str | None):
'Get unit of measurement class of an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return entry.unit_of_measurement | -3,598,349,409,584,291,300 | Get unit of measurement class of an entity.
First try the statemachine, then entity registry. | homeassistant/helpers/entity.py | get_unit_of_measurement | algra4/core | python | def get_unit_of_measurement(hass: HomeAssistant, entity_id: str) -> (str | None):
'Get unit of measurement class of an entity.\n\n First try the statemachine, then entity registry.\n '
if (state := hass.states.get(entity_id)):
return state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
entity_registry = er.async_get(hass)
if (not (entry := entity_registry.async_get(entity_id))):
raise HomeAssistantError(f'Unknown entity {entity_id}')
return entry.unit_of_measurement |
@property
def should_poll(self) -> bool:
'Return True if entity has to be polled for state.\n\n False if entity pushes its state to HA.\n '
return self._attr_should_poll | 4,027,934,016,660,659,000 | Return True if entity has to be polled for state.
False if entity pushes its state to HA. | homeassistant/helpers/entity.py | should_poll | algra4/core | python | @property
def should_poll(self) -> bool:
'Return True if entity has to be polled for state.\n\n False if entity pushes its state to HA.\n '
return self._attr_should_poll |
@property
def unique_id(self) -> (str | None):
'Return a unique ID.'
return self._attr_unique_id | -237,840,374,900,852,200 | Return a unique ID. | homeassistant/helpers/entity.py | unique_id | algra4/core | python | @property
def unique_id(self) -> (str | None):
return self._attr_unique_id |
@property
def name(self) -> (str | None):
'Return the name of the entity.'
if hasattr(self, '_attr_name'):
return self._attr_name
if hasattr(self, 'entity_description'):
return self.entity_description.name
return None | 8,010,586,074,726,211,000 | Return the name of the entity. | homeassistant/helpers/entity.py | name | algra4/core | python | @property
def name(self) -> (str | None):
if hasattr(self, '_attr_name'):
return self._attr_name
if hasattr(self, 'entity_description'):
return self.entity_description.name
return None |
@property
def state(self) -> StateType:
'Return the state of the entity.'
return self._attr_state | -2,358,687,993,022,337,500 | Return the state of the entity. | homeassistant/helpers/entity.py | state | algra4/core | python | @property
def state(self) -> StateType:
return self._attr_state |
@property
def capability_attributes(self) -> (Mapping[(str, Any)] | None):
'Return the capability attributes.\n\n Attributes that explain the capabilities of an entity.\n\n Implemented by component base class. Convention for attribute names\n is lowercase snake_case.\n '
return None | -4,403,257,508,118,563,300 | Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case. | homeassistant/helpers/entity.py | capability_attributes | algra4/core | python | @property
def capability_attributes(self) -> (Mapping[(str, Any)] | None):
'Return the capability attributes.\n\n Attributes that explain the capabilities of an entity.\n\n Implemented by component base class. Convention for attribute names\n is lowercase snake_case.\n '
return None |
@property
def state_attributes(self) -> (dict[(str, Any)] | None):
'Return the state attributes.\n\n Implemented by component base class, should not be extended by integrations.\n Convention for attribute names is lowercase snake_case.\n '
return None | -8,200,392,401,859,698,000 | Return the state attributes.
Implemented by component base class, should not be extended by integrations.
Convention for attribute names is lowercase snake_case. | homeassistant/helpers/entity.py | state_attributes | algra4/core | python | @property
def state_attributes(self) -> (dict[(str, Any)] | None):
'Return the state attributes.\n\n Implemented by component base class, should not be extended by integrations.\n Convention for attribute names is lowercase snake_case.\n '
return None |
Subsets and Splits