body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
async def test_status_invalid_json(hass, mqtt_mock): 'Test to make sure nothing breaks if the vacuum sends bad JSON.' config = deepcopy(DEFAULT_CONFIG) config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) async_fire_mqtt_message(hass, 'vacuum/state', '{"asdfasas false}') state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNKNOWN)
1,168,305,222,630,873,000
Test to make sure nothing breaks if the vacuum sends bad JSON.
tests/components/mqtt/test_state_vacuum.py
test_status_invalid_json
FuqiangSong/home-assistant
python
async def test_status_invalid_json(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) async_fire_mqtt_message(hass, 'vacuum/state', '{"asdfasas false}') state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNKNOWN)
async def test_default_availability_payload(hass, mqtt_mock): 'Test availability by default payload with defined topic.' config = deepcopy(DEFAULT_CONFIG) config.update({'availability_topic': 'availability-topic'}) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE) async_fire_mqtt_message(hass, 'availability-topic', 'online') state = hass.states.get('vacuum.mqtttest') assert (STATE_UNAVAILABLE != state.state) async_fire_mqtt_message(hass, 'availability-topic', 'offline') state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE)
-6,620,312,135,905,730,000
Test availability by default payload with defined topic.
tests/components/mqtt/test_state_vacuum.py
test_default_availability_payload
FuqiangSong/home-assistant
python
async def test_default_availability_payload(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) config.update({'availability_topic': 'availability-topic'}) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE) async_fire_mqtt_message(hass, 'availability-topic', 'online') state = hass.states.get('vacuum.mqtttest') assert (STATE_UNAVAILABLE != state.state) async_fire_mqtt_message(hass, 'availability-topic', 'offline') state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE)
async def test_custom_availability_payload(hass, mqtt_mock): 'Test availability by custom payload with defined topic.' config = deepcopy(DEFAULT_CONFIG) config.update({'availability_topic': 'availability-topic', 'payload_available': 'good', 'payload_not_available': 'nogood'}) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE) async_fire_mqtt_message(hass, 'availability-topic', 'good') state = hass.states.get('vacuum.mqtttest') assert (state.state != STATE_UNAVAILABLE) async_fire_mqtt_message(hass, 'availability-topic', 'nogood') state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE)
-6,984,247,191,293,095,000
Test availability by custom payload with defined topic.
tests/components/mqtt/test_state_vacuum.py
test_custom_availability_payload
FuqiangSong/home-assistant
python
async def test_custom_availability_payload(hass, mqtt_mock): config = deepcopy(DEFAULT_CONFIG) config.update({'availability_topic': 'availability-topic', 'payload_available': 'good', 'payload_not_available': 'nogood'}) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})) state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE) async_fire_mqtt_message(hass, 'availability-topic', 'good') state = hass.states.get('vacuum.mqtttest') assert (state.state != STATE_UNAVAILABLE) async_fire_mqtt_message(hass, 'availability-topic', 'nogood') state = hass.states.get('vacuum.mqtttest') assert (state.state == STATE_UNAVAILABLE)
async def test_discovery_removal_vacuum(hass, mqtt_mock): 'Test removal of discovered vacuum.' entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data = '{ "name": "Beer", "command_topic": "test_topic"}' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is not None) assert (state.name == 'Beer') async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', '') (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is None)
5,494,130,513,752,167,000
Test removal of discovered vacuum.
tests/components/mqtt/test_state_vacuum.py
test_discovery_removal_vacuum
FuqiangSong/home-assistant
python
async def test_discovery_removal_vacuum(hass, mqtt_mock): entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data = '{ "name": "Beer", "command_topic": "test_topic"}' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is not None) assert (state.name == 'Beer') async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', ) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is None)
async def test_discovery_broken(hass, mqtt_mock, caplog): 'Test handling of bad discovery message.' entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data1 = '{ "name": "Beer", "command_topic": "test_topic#"}' data2 = '{ "name": "Milk", "command_topic": "test_topic"}' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is None) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2) (await hass.async_block_till_done()) state = hass.states.get('vacuum.milk') assert (state is not None) assert (state.name == 'Milk') state = hass.states.get('vacuum.beer') assert (state is None)
-2,486,486,397,341,616,600
Test handling of bad discovery message.
tests/components/mqtt/test_state_vacuum.py
test_discovery_broken
FuqiangSong/home-assistant
python
async def test_discovery_broken(hass, mqtt_mock, caplog): entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data1 = '{ "name": "Beer", "command_topic": "test_topic#"}' data2 = '{ "name": "Milk", "command_topic": "test_topic"}' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is None) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2) (await hass.async_block_till_done()) state = hass.states.get('vacuum.milk') assert (state is not None) assert (state.name == 'Milk') state = hass.states.get('vacuum.beer') assert (state is None)
async def test_discovery_update_vacuum(hass, mqtt_mock): 'Test update of discovered vacuum.' entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data1 = '{ "name": "Beer", "command_topic": "test_topic"}' data2 = '{ "name": "Milk", "command_topic": "test_topic"}' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is not None) assert (state.name == 'Beer') async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is not None) assert (state.name == 'Milk') state = hass.states.get('vacuum.milk') assert (state is None)
6,639,160,471,907,825,000
Test update of discovered vacuum.
tests/components/mqtt/test_state_vacuum.py
test_discovery_update_vacuum
FuqiangSong/home-assistant
python
async def test_discovery_update_vacuum(hass, mqtt_mock): entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data1 = '{ "name": "Beer", "command_topic": "test_topic"}' data2 = '{ "name": "Milk", "command_topic": "test_topic"}' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is not None) assert (state.name == 'Beer') async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2) (await hass.async_block_till_done()) state = hass.states.get('vacuum.beer') assert (state is not None) assert (state.name == 'Milk') state = hass.states.get('vacuum.milk') assert (state is None)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock): 'Test the setting of attribute via MQTT with JSON payload.' assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'json_attributes_topic': 'attr-topic'}})) async_fire_mqtt_message(hass, 'attr-topic', '{ "val": "100" }') state = hass.states.get('vacuum.test') assert (state.attributes.get('val') == '100')
2,170,986,282,247,794,400
Test the setting of attribute via MQTT with JSON payload.
tests/components/mqtt/test_state_vacuum.py
test_setting_attribute_via_mqtt_json_message
FuqiangSong/home-assistant
python
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock): assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'json_attributes_topic': 'attr-topic'}})) async_fire_mqtt_message(hass, 'attr-topic', '{ "val": "100" }') state = hass.states.get('vacuum.test') assert (state.attributes.get('val') == '100')
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog): 'Test attributes get extracted from a JSON result.' assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'json_attributes_topic': 'attr-topic'}})) async_fire_mqtt_message(hass, 'attr-topic', '[ "list", "of", "things"]') state = hass.states.get('vacuum.test') assert (state.attributes.get('val') is None) assert ('JSON result was not a dictionary' in caplog.text)
-70,801,421,141,000,880
Test attributes get extracted from a JSON result.
tests/components/mqtt/test_state_vacuum.py
test_update_with_json_attrs_not_dict
FuqiangSong/home-assistant
python
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog): assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'json_attributes_topic': 'attr-topic'}})) async_fire_mqtt_message(hass, 'attr-topic', '[ "list", "of", "things"]') state = hass.states.get('vacuum.test') assert (state.attributes.get('val') is None) assert ('JSON result was not a dictionary' in caplog.text)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog): 'Test attributes get extracted from a JSON result.' assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'json_attributes_topic': 'attr-topic'}})) async_fire_mqtt_message(hass, 'attr-topic', 'This is not JSON') state = hass.states.get('vacuum.test') assert (state.attributes.get('val') is None) assert ('Erroneous JSON: This is not JSON' in caplog.text)
937,416,036,234,111,100
Test attributes get extracted from a JSON result.
tests/components/mqtt/test_state_vacuum.py
test_update_with_json_attrs_bad_json
FuqiangSong/home-assistant
python
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog): assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: {'platform': 'mqtt', 'name': 'test', 'json_attributes_topic': 'attr-topic'}})) async_fire_mqtt_message(hass, 'attr-topic', 'This is not JSON') state = hass.states.get('vacuum.test') assert (state.attributes.get('val') is None) assert ('Erroneous JSON: This is not JSON' in caplog.text)
async def test_discovery_update_attr(hass, mqtt_mock, caplog): 'Test update of discovered MQTTAttributes.' entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data1 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic1" }' data2 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic2" }' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1) (await hass.async_block_till_done()) async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "100" }') state = hass.states.get('vacuum.beer') assert (state.attributes.get('val') == '100') async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2) (await hass.async_block_till_done()) async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "50" }') state = hass.states.get('vacuum.beer') assert (state.attributes.get('val') == '100') async_fire_mqtt_message(hass, 'attr-topic2', '{ "val": "75" }') state = hass.states.get('vacuum.beer') assert (state.attributes.get('val') == '75')
427,545,815,223,096,260
Test update of discovered MQTTAttributes.
tests/components/mqtt/test_state_vacuum.py
test_discovery_update_attr
FuqiangSong/home-assistant
python
async def test_discovery_update_attr(hass, mqtt_mock, caplog): entry = MockConfigEntry(domain=mqtt.DOMAIN) (await async_start(hass, 'homeassistant', {}, entry)) data1 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic1" }' data2 = '{ "name": "Beer", "command_topic": "test_topic", "json_attributes_topic": "attr-topic2" }' async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data1) (await hass.async_block_till_done()) async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "100" }') state = hass.states.get('vacuum.beer') assert (state.attributes.get('val') == '100') async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data2) (await hass.async_block_till_done()) async_fire_mqtt_message(hass, 'attr-topic1', '{ "val": "50" }') state = hass.states.get('vacuum.beer') assert (state.attributes.get('val') == '100') async_fire_mqtt_message(hass, 'attr-topic2', '{ "val": "75" }') state = hass.states.get('vacuum.beer') assert (state.attributes.get('val') == '75')
async def test_unique_id(hass, mqtt_mock): 'Test unique id option only creates one vacuum per unique_id.' (await async_mock_mqtt_component(hass)) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]})) async_fire_mqtt_message(hass, 'test-topic', 'payload') assert (len(hass.states.async_entity_ids()) == 2)
6,461,815,832,846,092,000
Test unique id option only creates one vacuum per unique_id.
tests/components/mqtt/test_state_vacuum.py
test_unique_id
FuqiangSong/home-assistant
python
async def test_unique_id(hass, mqtt_mock): (await async_mock_mqtt_component(hass)) assert (await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: [{'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}, {'platform': 'mqtt', 'name': 'Test 2', 'command_topic': 'command-topic', 'unique_id': 'TOTALLY_UNIQUE'}]})) async_fire_mqtt_message(hass, 'test-topic', 'payload') assert (len(hass.states.async_entity_ids()) == 2)
async def test_entity_device_info_with_identifier(hass, mqtt_mock): 'Test MQTT vacuum device registry integration.' entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_hass(hass) (await async_start(hass, 'homeassistant', {}, entry)) registry = (await hass.helpers.device_registry.async_get_registry()) data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'}) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) device = registry.async_get_device({('mqtt', 'helloworld')}, set()) assert (device is not None) assert (device.identifiers == {('mqtt', 'helloworld')}) assert (device.connections == {('mac', '02:5b:26:a8:dc:12')}) assert (device.manufacturer == 'Whatever') assert (device.name == 'Beer') assert (device.model == 'Glass') assert (device.sw_version == '0.1-beta')
-7,580,355,173,830,552,000
Test MQTT vacuum device registry integration.
tests/components/mqtt/test_state_vacuum.py
test_entity_device_info_with_identifier
FuqiangSong/home-assistant
python
async def test_entity_device_info_with_identifier(hass, mqtt_mock): entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_hass(hass) (await async_start(hass, 'homeassistant', {}, entry)) registry = (await hass.helpers.device_registry.async_get_registry()) data = json.dumps({'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'}) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) device = registry.async_get_device({('mqtt', 'helloworld')}, set()) assert (device is not None) assert (device.identifiers == {('mqtt', 'helloworld')}) assert (device.connections == {('mac', '02:5b:26:a8:dc:12')}) assert (device.manufacturer == 'Whatever') assert (device.name == 'Beer') assert (device.model == 'Glass') assert (device.sw_version == '0.1-beta')
async def test_entity_device_info_update(hass, mqtt_mock): 'Test device registry update.' entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_hass(hass) (await async_start(hass, 'homeassistant', {}, entry)) registry = (await hass.helpers.device_registry.async_get_registry()) config = {'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'} data = json.dumps(config) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) device = registry.async_get_device({('mqtt', 'helloworld')}, set()) assert (device is not None) assert (device.name == 'Beer') config['device']['name'] = 'Milk' data = json.dumps(config) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) device = registry.async_get_device({('mqtt', 'helloworld')}, set()) assert (device is not None) assert (device.name == 'Milk')
-6,846,881,501,567,133,000
Test device registry update.
tests/components/mqtt/test_state_vacuum.py
test_entity_device_info_update
FuqiangSong/home-assistant
python
async def test_entity_device_info_update(hass, mqtt_mock): entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_hass(hass) (await async_start(hass, 'homeassistant', {}, entry)) registry = (await hass.helpers.device_registry.async_get_registry()) config = {'platform': 'mqtt', 'name': 'Test 1', 'command_topic': 'test-command-topic', 'device': {'identifiers': ['helloworld'], 'connections': [['mac', '02:5b:26:a8:dc:12']], 'manufacturer': 'Whatever', 'name': 'Beer', 'model': 'Glass', 'sw_version': '0.1-beta'}, 'unique_id': 'veryunique'} data = json.dumps(config) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) device = registry.async_get_device({('mqtt', 'helloworld')}, set()) assert (device is not None) assert (device.name == 'Beer') config['device']['name'] = 'Milk' data = json.dumps(config) async_fire_mqtt_message(hass, 'homeassistant/vacuum/bla/config', data) (await hass.async_block_till_done()) device = registry.async_get_device({('mqtt', 'helloworld')}, set()) assert (device is not None) assert (device.name == 'Milk')
def to_np_unicode(string): '\n Converts the specified string to a numpy unicode array.\n ' n = len(string) np_string = np.zeros(n, dtype='U') for j in range(n): np_string[j] = string[j] return np_string
-1,173,059,717,860,704,000
Converts the specified string to a numpy unicode array.
python/tskit/drawing.py
to_np_unicode
brianzhang01/tskit
python
def to_np_unicode(string): '\n \n ' n = len(string) np_string = np.zeros(n, dtype='U') for j in range(n): np_string[j] = string[j] return np_string
def closest_left_node(tree, u): '\n Returns the node that closest to u in a left-to-right sense.\n ' ret = NULL while ((u != NULL) and (ret == NULL)): ret = tree.left_sib(u) u = tree.parent(u) return ret
-3,175,000,869,601,706,000
Returns the node that closest to u in a left-to-right sense.
python/tskit/drawing.py
closest_left_node
brianzhang01/tskit
python
def closest_left_node(tree, u): '\n \n ' ret = NULL while ((u != NULL) and (ret == NULL)): ret = tree.left_sib(u) u = tree.parent(u) return ret
def node_time_depth(tree, min_branch_length=None, max_tree_height='tree'): '\n Returns a dictionary mapping nodes in the specified tree to their depth\n in the specified tree (from the root direction). If min_branch_len is\n provided, it specifies the minimum length of each branch. If not specified,\n default to 1.\n ' if (min_branch_length is None): min_branch_length = {u: 1 for u in range(tree.tree_sequence.num_nodes)} time_node_map = collections.defaultdict(list) current_depth = 0 depth = {} if (max_tree_height == 'tree'): for u in tree.nodes(): time_node_map[tree.time(u)].append(u) for t in sorted(time_node_map.keys()): for u in time_node_map[t]: for v in tree.children(u): current_depth = max(current_depth, (depth[v] + min_branch_length[v])) for u in time_node_map[t]: depth[u] = current_depth current_depth += 2 for root in tree.roots: current_depth = max(current_depth, (depth[root] + min_branch_length[root])) else: assert (max_tree_height == 'ts') ts = tree.tree_sequence for node in ts.nodes(): time_node_map[node.time].append(node.id) node_edges = collections.defaultdict(list) for edge in ts.edges(): node_edges[edge.parent].append(edge) for t in sorted(time_node_map.keys()): for u in time_node_map[t]: for edge in node_edges[u]: v = edge.child current_depth = max(current_depth, (depth[v] + min_branch_length[v])) for u in time_node_map[t]: depth[u] = current_depth current_depth += 2 return (depth, current_depth)
-9,167,423,746,071,495,000
Returns a dictionary mapping nodes in the specified tree to their depth in the specified tree (from the root direction). If min_branch_len is provided, it specifies the minimum length of each branch. If not specified, default to 1.
python/tskit/drawing.py
node_time_depth
brianzhang01/tskit
python
def node_time_depth(tree, min_branch_length=None, max_tree_height='tree'): '\n Returns a dictionary mapping nodes in the specified tree to their depth\n in the specified tree (from the root direction). If min_branch_len is\n provided, it specifies the minimum length of each branch. If not specified,\n default to 1.\n ' if (min_branch_length is None): min_branch_length = {u: 1 for u in range(tree.tree_sequence.num_nodes)} time_node_map = collections.defaultdict(list) current_depth = 0 depth = {} if (max_tree_height == 'tree'): for u in tree.nodes(): time_node_map[tree.time(u)].append(u) for t in sorted(time_node_map.keys()): for u in time_node_map[t]: for v in tree.children(u): current_depth = max(current_depth, (depth[v] + min_branch_length[v])) for u in time_node_map[t]: depth[u] = current_depth current_depth += 2 for root in tree.roots: current_depth = max(current_depth, (depth[root] + min_branch_length[root])) else: assert (max_tree_height == 'ts') ts = tree.tree_sequence for node in ts.nodes(): time_node_map[node.time].append(node.id) node_edges = collections.defaultdict(list) for edge in ts.edges(): node_edges[edge.parent].append(edge) for t in sorted(time_node_map.keys()): for u in time_node_map[t]: for edge in node_edges[u]: v = edge.child current_depth = max(current_depth, (depth[v] + min_branch_length[v])) for u in time_node_map[t]: depth[u] = current_depth current_depth += 2 return (depth, current_depth)
@pytest.mark.parametrize('collection', ['test_simple'], indirect=True) def test_simple(self, tmpdir, collection): '\n Given collection.json, generate: library.py\n ' postman2robot(self.cli_args) assert os.path.isfile(collection['generated']) assert (collection['generated'].read() == collection['expected'].read())
-3,601,454,122,862,852,600
Given collection.json, generate: library.py
test/test_cli_postman2robot.py
test_simple
xNok/postman2robotframework
python
@pytest.mark.parametrize('collection', ['test_simple'], indirect=True) def test_simple(self, tmpdir, collection): '\n \n ' postman2robot(self.cli_args) assert os.path.isfile(collection['generated']) assert (collection['generated'].read() == collection['expected'].read())
@pytest.mark.parametrize('collection', ['test_with_folder'], indirect=True) def test_with_folder(self, tmpdir, collection): '\n Given collection.json, generate: library.py\n ' postman2robot(self.cli_args) assert os.path.isfile(collection['generated']) assert (collection['generated'].read() == collection['expected'].read())
3,790,798,327,278,181,000
Given collection.json, generate: library.py
test/test_cli_postman2robot.py
test_with_folder
xNok/postman2robotframework
python
@pytest.mark.parametrize('collection', ['test_with_folder'], indirect=True) def test_with_folder(self, tmpdir, collection): '\n \n ' postman2robot(self.cli_args) assert os.path.isfile(collection['generated']) assert (collection['generated'].read() == collection['expected'].read())
@pytest.mark.parametrize('collection', ['test_with_variables'], indirect=True) def test_with_variables(self, tmpdir, collection): '\n Given collection.json, generate: library.py\n ' postman2robot(self.cli_args) assert os.path.isfile(collection['generated']) assert (collection['generated'].read() == collection['expected'].read())
-1,530,621,004,787,444,500
Given collection.json, generate: library.py
test/test_cli_postman2robot.py
test_with_variables
xNok/postman2robotframework
python
@pytest.mark.parametrize('collection', ['test_with_variables'], indirect=True) def test_with_variables(self, tmpdir, collection): '\n \n ' postman2robot(self.cli_args) assert os.path.isfile(collection['generated']) assert (collection['generated'].read() == collection['expected'].read())
def square_env(self): '\n Generate map where each vertex has a one hot categorical distribution\n Returns:\n (N,N,num_categories) matrix with one-hot categorical observations\n ' env_size = self.env_size env = np.zeros((env_size[0], env_size[1], self.num_categories)) for i in range(env_size[0]): category = np.random.randint(0, self.num_categories, env_size[1]) env[(i, np.arange(category.size), category)] = 1 return env
-6,309,671,404,201,822,000
Generate map where each vertex has a one hot categorical distribution Returns: (N,N,num_categories) matrix with one-hot categorical observations
generate.py
square_env
Victorwz/Generative-Hippocampal-entorhinal-System
python
def square_env(self): '\n Generate map where each vertex has a one hot categorical distribution\n Returns:\n (N,N,num_categories) matrix with one-hot categorical observations\n ' env_size = self.env_size env = np.zeros((env_size[0], env_size[1], self.num_categories)) for i in range(env_size[0]): category = np.random.randint(0, self.num_categories, env_size[1]) env[(i, np.arange(category.size), category)] = 1 return env
def update_location_4way(self, env, loc): '\n Samples a valid four-way action and updates location \n ' length = env.shape[0] valid = False while (not valid): action = np.random.randint(0, 4) if (action == 0): if ((loc[0] - 1) >= 0): loc[0] -= 1 valid = True elif (action == 1): if ((loc[1] + 1) < length): loc[1] += 1 valid = True elif (action == 2): if ((loc[0] + 1) < length): loc[0] += 1 valid = True elif (action == 3): if ((loc[1] - 1) >= 0): loc[1] -= 1 valid = True act = np.zeros(4) act[action] = 1 return (act, loc)
-4,695,557,541,666,478,000
Samples a valid four-way action and updates location
generate.py
update_location_4way
Victorwz/Generative-Hippocampal-entorhinal-System
python
def update_location_4way(self, env, loc): '\n \n ' length = env.shape[0] valid = False while (not valid): action = np.random.randint(0, 4) if (action == 0): if ((loc[0] - 1) >= 0): loc[0] -= 1 valid = True elif (action == 1): if ((loc[1] + 1) < length): loc[1] += 1 valid = True elif (action == 2): if ((loc[0] + 1) < length): loc[0] += 1 valid = True elif (action == 3): if ((loc[1] - 1) >= 0): loc[1] -= 1 valid = True act = np.zeros(4) act[action] = 1 return (act, loc)
def trajectory_4way(self, env): '\n Generate trajectory of agent diffusing through 4-way connected graph\n At each point we sample the one-hot observation and take an action\n 0 = up\n 1 = right\n 2 = down\n 3 = left \n\n Params:\n steps (int): Number of steps to take\n env (3d np array): environment in which to wander (NxNx(num_categories))\n Returns \n Observations (steps, num_categories), Actions (steps, 4) \n ' observations = np.zeros((self.steps, self.num_categories)) actions = np.zeros((self.steps, 4)) positions = np.zeros((self.steps, 2)) loc = np.random.randint(0, env.shape[0], 2) for step in range(self.steps): positions[step] = loc obs = env[(loc[0], loc[1])] (action, loc) = self.update_location_4way(env, loc) observations[step] = obs actions[step] = action return (observations, actions, positions)
-1,165,239,288,881,800,200
Generate trajectory of agent diffusing through 4-way connected graph At each point we sample the one-hot observation and take an action 0 = up 1 = right 2 = down 3 = left Params: steps (int): Number of steps to take env (3d np array): environment in which to wander (NxNx(num_categories)) Returns Observations (steps, num_categories), Actions (steps, 4)
generate.py
trajectory_4way
Victorwz/Generative-Hippocampal-entorhinal-System
python
def trajectory_4way(self, env): '\n Generate trajectory of agent diffusing through 4-way connected graph\n At each point we sample the one-hot observation and take an action\n 0 = up\n 1 = right\n 2 = down\n 3 = left \n\n Params:\n steps (int): Number of steps to take\n env (3d np array): environment in which to wander (NxNx(num_categories))\n Returns \n Observations (steps, num_categories), Actions (steps, 4) \n ' observations = np.zeros((self.steps, self.num_categories)) actions = np.zeros((self.steps, 4)) positions = np.zeros((self.steps, 2)) loc = np.random.randint(0, env.shape[0], 2) for step in range(self.steps): positions[step] = loc obs = env[(loc[0], loc[1])] (action, loc) = self.update_location_4way(env, loc) observations[step] = obs actions[step] = action return (observations, actions, positions)
def generate_data(self, verbose=False): '\n Generates N square environments and trajectories ((observation, action) pairs)\n for each environment\n\n Params:\n envs (int): number of environments to generate\n steps (int): how many steps an agent initially takes in each environment\n env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)\n save (bool): whether or not to save the dataset\n \n Returns:\n Dict of "environments, observations, actions", each corresponding to: \n environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories), \n observations: Array shape: (num_envs, steps, num_categories),\n actions: Array shape: (num_envs, steps, 4)\n ' env_size = self.env_size if (self.num_categories == None): self.num_categories = (env_size[0] * env_size[1]) self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories)) self.observations = np.zeros((self.num_envs, self.steps, self.num_categories)) self.actions = np.zeros((self.num_envs, self.steps, 4)) self.positions = np.zeros((self.num_envs, self.steps, 2)) for i in range(self.num_envs): env = self.square_env() (obs, acts, pos) = self.trajectory_4way(env) self.environments[i] = env self.observations[i] = obs self.actions[i] = acts self.positions[i] = pos self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions} if self.save: name = os.path.join(self.data_root, 'four_way_graph.pickle') with open(name, 'wb') as handle: pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
-4,490,168,198,373,569,500
Generates N square environments and trajectories ((observation, action) pairs) for each environment Params: envs (int): number of environments to generate steps (int): how many steps an agent initially takes in each environment env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...) save (bool): whether or not to save the dataset Returns: Dict of "environments, observations, actions", each corresponding to: environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories), observations: Array shape: (num_envs, steps, num_categories), actions: Array shape: (num_envs, steps, 4)
generate.py
generate_data
Victorwz/Generative-Hippocampal-entorhinal-System
python
def generate_data(self, verbose=False): '\n Generates N square environments and trajectories ((observation, action) pairs)\n for each environment\n\n Params:\n envs (int): number of environments to generate\n steps (int): how many steps an agent initially takes in each environment\n env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)\n save (bool): whether or not to save the dataset\n \n Returns:\n Dict of "environments, observations, actions", each corresponding to: \n environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories), \n observations: Array shape: (num_envs, steps, num_categories),\n actions: Array shape: (num_envs, steps, 4)\n ' env_size = self.env_size if (self.num_categories == None): self.num_categories = (env_size[0] * env_size[1]) self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories)) self.observations = np.zeros((self.num_envs, self.steps, self.num_categories)) self.actions = np.zeros((self.num_envs, self.steps, 4)) self.positions = np.zeros((self.num_envs, self.steps, 2)) for i in range(self.num_envs): env = self.square_env() (obs, acts, pos) = self.trajectory_4way(env) self.environments[i] = env self.observations[i] = obs self.actions[i] = acts self.positions[i] = pos self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions} if self.save: name = os.path.join(self.data_root, 'four_way_graph.pickle') with open(name, 'wb') as handle: pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def initialize_options(self, *args): 'omit -Wstrict-prototypes from CFLAGS since its only valid for C code.' self.android = False self.makefile = False self.jar = False import distutils.sysconfig cfg_vars = distutils.sysconfig.get_config_vars() replacement = {'-Wstrict-prototypes': '', '-Wimplicit-function-declaration': ''} tracing = self.distribution.enable_tracing remove_args = ['-O0', '-O1', '-O2', '-O3', '-g'] for (k, v) in cfg_vars.items(): if (not isinstance(v, str)): continue if ((not (k == 'OPT')) and (not ('FLAGS' in k))): continue args = v.split() for r in remove_args: args = list(filter(r.__ne__, args)) cfg_vars[k] = ' '.join(args) super().initialize_options()
-1,229,185,972,584,870,700
omit -Wstrict-prototypes from CFLAGS since its only valid for C code.
setupext/build_ext.py
initialize_options
altendky/jpype
python
def initialize_options(self, *args): self.android = False self.makefile = False self.jar = False import distutils.sysconfig cfg_vars = distutils.sysconfig.get_config_vars() replacement = {'-Wstrict-prototypes': , '-Wimplicit-function-declaration': } tracing = self.distribution.enable_tracing remove_args = ['-O0', '-O1', '-O2', '-O3', '-g'] for (k, v) in cfg_vars.items(): if (not isinstance(v, str)): continue if ((not (k == 'OPT')) and (not ('FLAGS' in k))): continue args = v.split() for r in remove_args: args = list(filter(r.__ne__, args)) cfg_vars[k] = ' '.join(args) super().initialize_options()
def build_java_ext(self, ext): 'Run command.' java = self.distribution.enable_build_jar javac = 'javac' try: if os.path.exists(os.path.join(os.environ['JAVA_HOME'], 'bin', 'javac')): javac = ('"%s"' % os.path.join(os.environ['JAVA_HOME'], 'bin', 'javac')) except KeyError: pass jar = 'jar' try: if os.path.exists(os.path.join(os.environ['JAVA_HOME'], 'bin', 'jar')): jar = ('"%s"' % os.path.join(os.environ['JAVA_HOME'], 'bin', 'jar')) except KeyError: pass if (not java): src = os.path.join('native', 'jars') dest = os.path.dirname(self.get_ext_fullpath('JAVA')) if os.path.exists(src): distutils.log.info('Using Jar cache') copy_tree(src, dest) return classpath = '.' if ext.libraries: classpath = os.path.pathsep.join(ext.libraries) distutils.log.info('Jar cache is missing, using --enable-build-jar to recreate it.') coverage = self.distribution.enable_coverage target_version = '1.8' try: dirname = os.path.dirname(self.get_ext_fullpath('JAVA')) jarFile = os.path.join(dirname, (ext.name + '.jar')) build_dir = os.path.join(self.build_temp, ext.name, 'classes') os.makedirs(build_dir, exist_ok=True) os.makedirs(dirname, exist_ok=True) cmd1 = shlex.split(('%s -cp "%s" -d "%s" -g:none -source %s -target %s' % (javac, classpath, build_dir, target_version, target_version))) cmd1.extend(ext.sources) debug = '-g:none' if coverage: debug = '-g:lines,vars,source' os.makedirs('build/classes', exist_ok=True) self.announce((' %s' % ' '.join(cmd1)), level=distutils.log.INFO) subprocess.check_call(cmd1) try: for file in glob.iglob('native/java/**/*.*', recursive=True): if (file.endswith('.java') or os.path.isdir(file)): continue p = os.path.join(build_dir, os.path.relpath(file, 'native/java')) print('Copy file', file, p) shutil.copyfile(file, p) except Exception as ex: print('FAIL', ex) pass cmd3 = shlex.split(('%s cvf "%s" -C "%s" .' % (jar, jarFile, build_dir))) self.announce((' %s' % ' '.join(cmd3)), level=distutils.log.INFO) subprocess.check_call(cmd3) except subprocess.CalledProcessError as exc: distutils.log.error(exc.output) raise DistutilsPlatformError('Error executing {}'.format(exc.cmd))
-7,071,095,930,355,216,000
Run command.
setupext/build_ext.py
build_java_ext
altendky/jpype
python
def build_java_ext(self, ext): java = self.distribution.enable_build_jar javac = 'javac' try: if os.path.exists(os.path.join(os.environ['JAVA_HOME'], 'bin', 'javac')): javac = ('"%s"' % os.path.join(os.environ['JAVA_HOME'], 'bin', 'javac')) except KeyError: pass jar = 'jar' try: if os.path.exists(os.path.join(os.environ['JAVA_HOME'], 'bin', 'jar')): jar = ('"%s"' % os.path.join(os.environ['JAVA_HOME'], 'bin', 'jar')) except KeyError: pass if (not java): src = os.path.join('native', 'jars') dest = os.path.dirname(self.get_ext_fullpath('JAVA')) if os.path.exists(src): distutils.log.info('Using Jar cache') copy_tree(src, dest) return classpath = '.' if ext.libraries: classpath = os.path.pathsep.join(ext.libraries) distutils.log.info('Jar cache is missing, using --enable-build-jar to recreate it.') coverage = self.distribution.enable_coverage target_version = '1.8' try: dirname = os.path.dirname(self.get_ext_fullpath('JAVA')) jarFile = os.path.join(dirname, (ext.name + '.jar')) build_dir = os.path.join(self.build_temp, ext.name, 'classes') os.makedirs(build_dir, exist_ok=True) os.makedirs(dirname, exist_ok=True) cmd1 = shlex.split(('%s -cp "%s" -d "%s" -g:none -source %s -target %s' % (javac, classpath, build_dir, target_version, target_version))) cmd1.extend(ext.sources) debug = '-g:none' if coverage: debug = '-g:lines,vars,source' os.makedirs('build/classes', exist_ok=True) self.announce((' %s' % ' '.join(cmd1)), level=distutils.log.INFO) subprocess.check_call(cmd1) try: for file in glob.iglob('native/java/**/*.*', recursive=True): if (file.endswith('.java') or os.path.isdir(file)): continue p = os.path.join(build_dir, os.path.relpath(file, 'native/java')) print('Copy file', file, p) shutil.copyfile(file, p) except Exception as ex: print('FAIL', ex) pass cmd3 = shlex.split(('%s cvf "%s" -C "%s" .' % (jar, jarFile, build_dir))) self.announce((' %s' % ' '.join(cmd3)), level=distutils.log.INFO) subprocess.check_call(cmd3) except subprocess.CalledProcessError as exc: distutils.log.error(exc.output) raise DistutilsPlatformError('Error executing {}'.format(exc.cmd))
def create_app(test_config: TestConfig=None) -> Flask: ' App factory method to initialize the application with given configuration ' app: Flask = Flask(__name__) if (test_config is not None): app.config.from_mapping(test_config) @app.route('/') def index() -> str: return 'My Hello World App is working...' @app.route('/version') def version() -> str: '\n DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.\n It should be setup in docker build task..\n It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.\n E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .\n ' return (getenv('DOCKER_IMAGE_TAG') or "DOCKER_IMAGE_TAG haven't been setup") return app
7,191,790,435,853,017,000
App factory method to initialize the application with given configuration
my_hello_world_app/web_api/router.py
create_app
gsjay980/data-science-IP
python
def create_app(test_config: TestConfig=None) -> Flask: ' ' app: Flask = Flask(__name__) if (test_config is not None): app.config.from_mapping(test_config) @app.route('/') def index() -> str: return 'My Hello World App is working...' @app.route('/version') def version() -> str: '\n DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.\n It should be setup in docker build task..\n It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.\n E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .\n ' return (getenv('DOCKER_IMAGE_TAG') or "DOCKER_IMAGE_TAG haven't been setup") return app
@app.route('/version') def version() -> str: '\n DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.\n It should be setup in docker build task..\n It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.\n E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .\n ' return (getenv('DOCKER_IMAGE_TAG') or "DOCKER_IMAGE_TAG haven't been setup")
-3,475,765,495,773,032,000
DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG. It should be setup in docker build task.. It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag. E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .
my_hello_world_app/web_api/router.py
version
gsjay980/data-science-IP
python
@app.route('/version') def version() -> str: '\n DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.\n It should be setup in docker build task..\n It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.\n E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .\n ' return (getenv('DOCKER_IMAGE_TAG') or "DOCKER_IMAGE_TAG haven't been setup")
def deploy_request_json_as_strings(job): "\n Get a string list representing a deploy job's request JSON.\n " request_json = job.request_json classifier_id = request_json['classifier_id'] try: classifier = Classifier.objects.get(pk=classifier_id) classifier_display = 'Classifier ID {} (Source ID {})'.format(classifier_id, classifier.source.pk) except Classifier.DoesNotExist: classifier_display = 'Classifier ID {} (deleted)'.format(classifier_id) return [classifier_display, 'URL: {}'.format(request_json['url']), 'Point count: {}'.format(len(request_json['points']))]
1,338,199,238,588,783,600
Get a string list representing a deploy job's request JSON.
project/vision_backend_api/utils.py
deploy_request_json_as_strings
beijbom/coralnet
python
def deploy_request_json_as_strings(job): "\n \n " request_json = job.request_json classifier_id = request_json['classifier_id'] try: classifier = Classifier.objects.get(pk=classifier_id) classifier_display = 'Classifier ID {} (Source ID {})'.format(classifier_id, classifier.source.pk) except Classifier.DoesNotExist: classifier_display = 'Classifier ID {} (deleted)'.format(classifier_id) return [classifier_display, 'URL: {}'.format(request_json['url']), 'Point count: {}'.format(len(request_json['points']))]
def main(): 'Run administrative tasks.' os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrapmart.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
1,642,794,296,057,949,400
Run administrative tasks.
manage.py
main
vivekx01/oldscrapmart
python
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrapmart.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
def open(table_file: str, table_map_file: str=None) -> pd.DataFrame: '\n Opens a dynamo table file, returning a DynamoTable object\n :param table_file:\n :return: dataframe\n ' df = pd.read_csv(table_file, header=None, delim_whitespace=True) n_cols = df.shape[1] if (n_cols <= len(COLUMN_NAMES)): column_names = COLUMN_NAMES[0:n_cols] df.columns = column_names else: extra_columns_needed = (n_cols - len(COLUMN_NAMES)) column_names = (list(COLUMN_NAMES) + ['' for x in range(extra_columns_needed)]) df = df.apply(pd.to_numeric, errors='ignore') if ((table_map_file is not None) and Path(table_map_file).exists()): table_map_dict = table_map_read(table_map_file) tomo_file = [table_map_dict[tomo_idx] for tomo_idx in df['tomo']] df['tomo_file'] = tomo_file return df
264,022,627,744,365,470
Opens a dynamo table file, returning a DynamoTable object :param table_file: :return: dataframe
dynamotable/dynamotable.py
open
brisvag/dynamotable
python
def open(table_file: str, table_map_file: str=None) -> pd.DataFrame: '\n Opens a dynamo table file, returning a DynamoTable object\n :param table_file:\n :return: dataframe\n ' df = pd.read_csv(table_file, header=None, delim_whitespace=True) n_cols = df.shape[1] if (n_cols <= len(COLUMN_NAMES)): column_names = COLUMN_NAMES[0:n_cols] df.columns = column_names else: extra_columns_needed = (n_cols - len(COLUMN_NAMES)) column_names = (list(COLUMN_NAMES) + [ for x in range(extra_columns_needed)]) df = df.apply(pd.to_numeric, errors='ignore') if ((table_map_file is not None) and Path(table_map_file).exists()): table_map_dict = table_map_read(table_map_file) tomo_file = [table_map_dict[tomo_idx] for tomo_idx in df['tomo']] df['tomo_file'] = tomo_file return df
def read(filename: str, table_map_file: str=None) -> pd.DataFrame: '\n Opens a dynamo table file, returning a pandas DataFrame\n :param filename:\n :return: dataframe\n ' df = open(filename, table_map_file) return df
-4,210,808,082,134,412,300
Opens a dynamo table file, returning a pandas DataFrame :param filename: :return: dataframe
dynamotable/dynamotable.py
read
brisvag/dynamotable
python
def read(filename: str, table_map_file: str=None) -> pd.DataFrame: '\n Opens a dynamo table file, returning a pandas DataFrame\n :param filename:\n :return: dataframe\n ' df = open(filename, table_map_file) return df
def new(dataframe: pd.DataFrame, filename: str): '\n Writes a dynamo table file from a pandas DataFrame\n :param dataframe: pandas dataframe with headings matching the name from the dynamo table convention\n :param filename: file in which to save data from dataframe, should end in .tbl\n :return:\n ' n_rows = dataframe.shape[0] if (('tomo_file' in dataframe.columns) and ('tomo' not in dataframe.columns)): tomo_names = dataframe['tomo_file'].unique() tomo_name_idx = {name: index for (index, name) in enumerate(tomo_names)} tomo_idx = [tomo_name_idx[name] for name in dataframe['tomo_file']] dataframe['tomo'] = tomo_idx if ('tag' not in dataframe.columns): tags = [(x + 1) for x in range(n_rows)] dataframe['tag'] = tags zeros = [0 for x in range(n_rows)] ones = [1 for x in range(n_rows)] data = {} for column_name in COLUMN_NAMES: if (column_name in dataframe.columns): data[column_name] = dataframe[column_name] elif ((column_name not in dataframe.columns) and (column_name == 'aligned_value')): data[column_name] = ones else: data[column_name] = zeros table = pd.DataFrame.from_dict(data) filename = str(filename) if (not filename.endswith('.tbl')): filename = filename.join('.tbl') table.to_csv(filename, sep=' ', header=False, index=False) if ('tomo_file' in dataframe.columns): table_file_name = filename.replace('.tbl', '.doc') table_map = dataframe[['tomo', 'tomo_file']].drop_duplicates(subset='tomo') table_map.to_csv(table_file_name, sep=' ', header=False, index=False) return
7,186,038,429,234,580,000
Writes a dynamo table file from a pandas DataFrame :param dataframe: pandas dataframe with headings matching the name from the dynamo table convention :param filename: file in which to save data from dataframe, should end in .tbl :return:
dynamotable/dynamotable.py
new
brisvag/dynamotable
python
def new(dataframe: pd.DataFrame, filename: str): '\n Writes a dynamo table file from a pandas DataFrame\n :param dataframe: pandas dataframe with headings matching the name from the dynamo table convention\n :param filename: file in which to save data from dataframe, should end in .tbl\n :return:\n ' n_rows = dataframe.shape[0] if (('tomo_file' in dataframe.columns) and ('tomo' not in dataframe.columns)): tomo_names = dataframe['tomo_file'].unique() tomo_name_idx = {name: index for (index, name) in enumerate(tomo_names)} tomo_idx = [tomo_name_idx[name] for name in dataframe['tomo_file']] dataframe['tomo'] = tomo_idx if ('tag' not in dataframe.columns): tags = [(x + 1) for x in range(n_rows)] dataframe['tag'] = tags zeros = [0 for x in range(n_rows)] ones = [1 for x in range(n_rows)] data = {} for column_name in COLUMN_NAMES: if (column_name in dataframe.columns): data[column_name] = dataframe[column_name] elif ((column_name not in dataframe.columns) and (column_name == 'aligned_value')): data[column_name] = ones else: data[column_name] = zeros table = pd.DataFrame.from_dict(data) filename = str(filename) if (not filename.endswith('.tbl')): filename = filename.join('.tbl') table.to_csv(filename, sep=' ', header=False, index=False) if ('tomo_file' in dataframe.columns): table_file_name = filename.replace('.tbl', '.doc') table_map = dataframe[['tomo', 'tomo_file']].drop_duplicates(subset='tomo') table_map.to_csv(table_file_name, sep=' ', header=False, index=False) return
def write(dataframe: pd.DataFrame, filename: str): '\n Writes a dynamo table file from a pandas DataFrame\n :param dataframe: pandas dataframe with headings matching the name from the dynamo table convention\n :param filename: file in which to save data from dataframe, should end in .tbl\n :return:\n ' new(dataframe, filename) return
-5,981,066,273,529,164,000
Writes a dynamo table file from a pandas DataFrame :param dataframe: pandas dataframe with headings matching the name from the dynamo table convention :param filename: file in which to save data from dataframe, should end in .tbl :return:
dynamotable/dynamotable.py
write
brisvag/dynamotable
python
def write(dataframe: pd.DataFrame, filename: str): '\n Writes a dynamo table file from a pandas DataFrame\n :param dataframe: pandas dataframe with headings matching the name from the dynamo table convention\n :param filename: file in which to save data from dataframe, should end in .tbl\n :return:\n ' new(dataframe, filename) return
def create_repository(repository_type: str, file_system_directory=None, s3_url=None, s3_endpoint_url=None, gcs_url=None) -> BaseRepository: 'Creates a repository based on a provided type and parameters' if (repository_type == 's3'): return S3Repository(s3_url, endpoint_url=s3_endpoint_url) elif (repository_type == 'gcs'): return GCSRepository(gcs_url) elif (repository_type == 'file_system'): return FileSystemRepository(file_system_directory) else: raise ValueError(('Unrecognized repository type {}' % repository_type))
2,210,660,703,866,795,500
Creates a repository based on a provided type and parameters
bentoml/yatai/repository/__init__.py
create_repository
AnvithaGadagi/BentoML
python
def create_repository(repository_type: str, file_system_directory=None, s3_url=None, s3_endpoint_url=None, gcs_url=None) -> BaseRepository: if (repository_type == 's3'): return S3Repository(s3_url, endpoint_url=s3_endpoint_url) elif (repository_type == 'gcs'): return GCSRepository(gcs_url) elif (repository_type == 'file_system'): return FileSystemRepository(file_system_directory) else: raise ValueError(('Unrecognized repository type {}' % repository_type))
def shorten_amount(amount): ' Given an amount in bitcoin, shorten it\n ' amount = int((amount * (10 ** 12))) units = ['p', 'n', 'u', 'm'] for unit in units: if ((amount % 1000) == 0): amount //= 1000 else: break else: unit = '' return (str(amount) + unit)
7,155,915,786,397,356,000
Given an amount in bitcoin, shorten it
electrum/lnaddr.py
shorten_amount
Feathercoin-Applications/electrum-ftc
python
def shorten_amount(amount): ' \n ' amount = int((amount * (10 ** 12))) units = ['p', 'n', 'u', 'm'] for unit in units: if ((amount % 1000) == 0): amount //= 1000 else: break else: unit = return (str(amount) + unit)
def unshorten_amount(amount) -> Decimal: ' Given a shortened amount, convert it into a decimal\n ' units = {'p': (10 ** 12), 'n': (10 ** 9), 'u': (10 ** 6), 'm': (10 ** 3)} unit = str(amount)[(- 1)] if (not re.fullmatch('\\d+[pnum]?', str(amount))): raise LnDecodeException("Invalid amount '{}'".format(amount)) if (unit in units.keys()): return (Decimal(amount[:(- 1)]) / units[unit]) else: return Decimal(amount)
7,668,607,578,479,434,000
Given a shortened amount, convert it into a decimal
electrum/lnaddr.py
unshorten_amount
Feathercoin-Applications/electrum-ftc
python
def unshorten_amount(amount) -> Decimal: ' \n ' units = {'p': (10 ** 12), 'n': (10 ** 9), 'u': (10 ** 6), 'm': (10 ** 3)} unit = str(amount)[(- 1)] if (not re.fullmatch('\\d+[pnum]?', str(amount))): raise LnDecodeException("Invalid amount '{}'".format(amount)) if (unit in units.keys()): return (Decimal(amount[:(- 1)]) / units[unit]) else: return Decimal(amount)
def encode_fallback(fallback: str, net: Type[AbstractNet]): ' Encode all supported fallback addresses.\n ' (wver, wprog_ints) = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback) if (wver is not None): wprog = bytes(wprog_ints) else: (addrtype, addr) = b58_address_to_hash160(fallback) if (addrtype == net.ADDRTYPE_P2PKH): wver = 17 elif (addrtype == net.ADDRTYPE_P2SH): wver = 18 else: raise LnEncodeException(f'Unknown address type {addrtype} for {net}') wprog = addr return tagged('f', (bitstring.pack('uint:5', wver) + wprog))
3,149,272,238,788,878,000
Encode all supported fallback addresses.
electrum/lnaddr.py
encode_fallback
Feathercoin-Applications/electrum-ftc
python
def encode_fallback(fallback: str, net: Type[AbstractNet]): ' \n ' (wver, wprog_ints) = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback) if (wver is not None): wprog = bytes(wprog_ints) else: (addrtype, addr) = b58_address_to_hash160(fallback) if (addrtype == net.ADDRTYPE_P2PKH): wver = 17 elif (addrtype == net.ADDRTYPE_P2SH): wver = 18 else: raise LnEncodeException(f'Unknown address type {addrtype} for {net}') wprog = addr return tagged('f', (bitstring.pack('uint:5', wver) + wprog))
def trim_to_min_length(bits): "Ensures 'bits' have min number of leading zeroes.\n Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.\n " bits = bits[:] while ((bits.len % 5) != 0): bits.prepend('0b0') while bits.startswith('0b00000'): if (len(bits) == 5): break bits = bits[5:] return bits
-7,475,223,037,422,194,000
Ensures 'bits' have min number of leading zeroes. Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
electrum/lnaddr.py
trim_to_min_length
Feathercoin-Applications/electrum-ftc
python
def trim_to_min_length(bits): "Ensures 'bits' have min number of leading zeroes.\n Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.\n " bits = bits[:] while ((bits.len % 5) != 0): bits.prepend('0b0') while bits.startswith('0b00000'): if (len(bits) == 5): break bits = bits[5:] return bits
def set_measurepoint_command_handler(arrived_message, arg_list): ' message callback, handle the received downstream message and implement your logic\n\n :param arrived_message: the arrived msg instance , it may instanceof class <BaseCommand> or <BaseResponse>\n :param arg_list: the topic args extract from the arrived topic , including productKey , deviceKey ,etc\n :return: the msg you want to reply to the cloud , if you do NOT want send msg , just return None\n ' print('receive measurepoint set command, params: {}'.format(arrived_message.get_params())) print('product key = {}, device key= {}'.format(arg_list[0], arg_list[1])) return MeasurepointSetReply().builder().set_code(200).set_message('measurepoints set success').build()
4,787,075,641,865,560,000
message callback, handle the received downstream message and implement your logic :param arrived_message: the arrived msg instance , it may instanceof class <BaseCommand> or <BaseResponse> :param arg_list: the topic args extract from the arrived topic , including productKey , deviceKey ,etc :return: the msg you want to reply to the cloud , if you do NOT want send msg , just return None
enos/sample/CommandSample.py
set_measurepoint_command_handler
charleshuangcai/enos-device-sdk-python
python
def set_measurepoint_command_handler(arrived_message, arg_list): ' message callback, handle the received downstream message and implement your logic\n\n :param arrived_message: the arrived msg instance , it may instanceof class <BaseCommand> or <BaseResponse>\n :param arg_list: the topic args extract from the arrived topic , including productKey , deviceKey ,etc\n :return: the msg you want to reply to the cloud , if you do NOT want send msg , just return None\n ' print('receive measurepoint set command, params: {}'.format(arrived_message.get_params())) print('product key = {}, device key= {}'.format(arg_list[0], arg_list[1])) return MeasurepointSetReply().builder().set_code(200).set_message('measurepoints set success').build()
def fit_ellipse(pty, ptx): 'Fit an\n \n inspired from \n http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html\n \n :param pty: point coordinates in the slow dimension (y)\n :param ptx: point coordinates in the fast dimension (x)\n ' x = ptx[:, numpy.newaxis] y = pty[:, numpy.newaxis] D = numpy.hstack(((x * x), (x * y), (y * y), x, y, numpy.ones_like(x))) S = numpy.dot(D.T, D) C = numpy.zeros([6, 6]) C[(0, 2)] = C[(2, 0)] = 2 C[(1, 1)] = (- 1) (E, V) = numpy.linalg.eig(numpy.dot(numpy.linalg.inv(S), C)) n = numpy.argmax(numpy.abs(E)) res = V[:, n] (b, c, d, f, g, a) = ((res[1] / 2), res[2], (res[3] / 2), (res[4] / 2), res[5], res[0]) num = ((b * b) - (a * c)) x0 = (((c * d) - (b * f)) / num) y0 = (((a * f) - (b * d)) / num) if (b == 0): if (a > c): angle = 0 else: angle = (numpy.pi / 2) elif (a > c): angle = (numpy.arctan2((2 * b), (a - c)) / 2) else: angle = ((numpy.pi / 2) + (numpy.arctan2((2 * b), (a - c)) / 2)) up = (2 * ((((((a * f) * f) + ((c * d) * d)) + ((g * b) * b)) - (((2 * b) * d) * f)) - ((a * c) * g))) down1 = (((b * b) - (a * c)) * (((c - a) * numpy.sqrt((1 + (((4 * b) * b) / ((a - c) * (a - c)))))) - (c + a))) down2 = (((b * b) - (a * c)) * (((a - c) * numpy.sqrt((1 + (((4 * b) * b) / ((a - c) * (a - c)))))) - (c + a))) res1 = numpy.sqrt((up / down1)) res2 = numpy.sqrt((up / down2)) return Ellipse(y0, x0, angle, max(res1, res2), min(res1, res2))
343,945,079,364,709,100
Fit an inspired from http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html :param pty: point coordinates in the slow dimension (y) :param ptx: point coordinates in the fast dimension (x)
autoprocess/utils/ellipse.py
fit_ellipse
michel4j/auto-process
python
def fit_ellipse(pty, ptx): 'Fit an\n \n inspired from \n http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html\n \n :param pty: point coordinates in the slow dimension (y)\n :param ptx: point coordinates in the fast dimension (x)\n ' x = ptx[:, numpy.newaxis] y = pty[:, numpy.newaxis] D = numpy.hstack(((x * x), (x * y), (y * y), x, y, numpy.ones_like(x))) S = numpy.dot(D.T, D) C = numpy.zeros([6, 6]) C[(0, 2)] = C[(2, 0)] = 2 C[(1, 1)] = (- 1) (E, V) = numpy.linalg.eig(numpy.dot(numpy.linalg.inv(S), C)) n = numpy.argmax(numpy.abs(E)) res = V[:, n] (b, c, d, f, g, a) = ((res[1] / 2), res[2], (res[3] / 2), (res[4] / 2), res[5], res[0]) num = ((b * b) - (a * c)) x0 = (((c * d) - (b * f)) / num) y0 = (((a * f) - (b * d)) / num) if (b == 0): if (a > c): angle = 0 else: angle = (numpy.pi / 2) elif (a > c): angle = (numpy.arctan2((2 * b), (a - c)) / 2) else: angle = ((numpy.pi / 2) + (numpy.arctan2((2 * b), (a - c)) / 2)) up = (2 * ((((((a * f) * f) + ((c * d) * d)) + ((g * b) * b)) - (((2 * b) * d) * f)) - ((a * c) * g))) down1 = (((b * b) - (a * c)) * (((c - a) * numpy.sqrt((1 + (((4 * b) * b) / ((a - c) * (a - c)))))) - (c + a))) down2 = (((b * b) - (a * c)) * (((a - c) * numpy.sqrt((1 + (((4 * b) * b) / ((a - c) * (a - c)))))) - (c + a))) res1 = numpy.sqrt((up / down1)) res2 = numpy.sqrt((up / down2)) return Ellipse(y0, x0, angle, max(res1, res2), min(res1, res2))
def __init__(self, theta, label=None): 'Create new RX gate.' super().__init__('rx', 1, [theta], label=label)
7,545,432,243,902,676,000
Create new RX gate.
qiskit/circuit/library/standard_gates/rx.py
__init__
CatalinaAlbornoz/qiskit-terra
python
def __init__(self, theta, label=None): super().__init__('rx', 1, [theta], label=label)
def _define(self): '\n gate rx(theta) a {r(theta, 0) a;}\n ' from qiskit.circuit.quantumcircuit import QuantumCircuit from .r import RGate q = QuantumRegister(1, 'q') qc = QuantumCircuit(q, name=self.name) rules = [(RGate(self.params[0], 0), [q[0]], [])] qc._data = rules self.definition = qc
-1,717,500,108,928,348,000
gate rx(theta) a {r(theta, 0) a;}
qiskit/circuit/library/standard_gates/rx.py
_define
CatalinaAlbornoz/qiskit-terra
python
def _define(self): '\n \n ' from qiskit.circuit.quantumcircuit import QuantumCircuit from .r import RGate q = QuantumRegister(1, 'q') qc = QuantumCircuit(q, name=self.name) rules = [(RGate(self.params[0], 0), [q[0]], [])] qc._data = rules self.definition = qc
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None): "Return a (mutli-)controlled-RX gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n " if (num_ctrl_qubits == 1): gate = CRXGate(self.params[0], label=label, ctrl_state=ctrl_state) gate.base_gate.label = self.label return gate return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)
-8,959,485,569,521,171,000
Return a (mutli-)controlled-RX gate. Args: num_ctrl_qubits (int): number of control qubits. label (str or None): An optional label for the gate [Default: None] ctrl_state (int or str or None): control state expressed as integer, string (e.g. '110'), or None. If None, use all 1s. Returns: ControlledGate: controlled version of this gate.
qiskit/circuit/library/standard_gates/rx.py
control
CatalinaAlbornoz/qiskit-terra
python
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None): "Return a (mutli-)controlled-RX gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n " if (num_ctrl_qubits == 1): gate = CRXGate(self.params[0], label=label, ctrl_state=ctrl_state) gate.base_gate.label = self.label return gate return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)
def inverse(self): 'Return inverted RX gate.\n\n :math:`RX(\\lambda)^{\\dagger} = RX(-\\lambda)`\n ' return RXGate((- self.params[0]))
-233,053,563,629,664,600
Return inverted RX gate. :math:`RX(\lambda)^{\dagger} = RX(-\lambda)`
qiskit/circuit/library/standard_gates/rx.py
inverse
CatalinaAlbornoz/qiskit-terra
python
def inverse(self): 'Return inverted RX gate.\n\n :math:`RX(\\lambda)^{\\dagger} = RX(-\\lambda)`\n ' return RXGate((- self.params[0]))
def to_matrix(self): 'Return a numpy.array for the RX gate.' cos = math.cos((self.params[0] / 2)) sin = math.sin((self.params[0] / 2)) return numpy.array([[cos, ((- 1j) * sin)], [((- 1j) * sin), cos]], dtype=complex)
3,745,042,401,274,416,600
Return a numpy.array for the RX gate.
qiskit/circuit/library/standard_gates/rx.py
to_matrix
CatalinaAlbornoz/qiskit-terra
python
def to_matrix(self): cos = math.cos((self.params[0] / 2)) sin = math.sin((self.params[0] / 2)) return numpy.array([[cos, ((- 1j) * sin)], [((- 1j) * sin), cos]], dtype=complex)
def __init__(self, theta, label=None, ctrl_state=None): 'Create new CRX gate.' super().__init__('crx', 2, [theta], num_ctrl_qubits=1, label=label, ctrl_state=ctrl_state) self.base_gate = RXGate(theta)
2,648,166,993,281,835,500
Create new CRX gate.
qiskit/circuit/library/standard_gates/rx.py
__init__
CatalinaAlbornoz/qiskit-terra
python
def __init__(self, theta, label=None, ctrl_state=None): super().__init__('crx', 2, [theta], num_ctrl_qubits=1, label=label, ctrl_state=ctrl_state) self.base_gate = RXGate(theta)
def _define(self): '\n gate cu3(theta,phi,lambda) c, t\n { u1(pi/2) t;\n cx c,t;\n u3(-theta/2,0,0) t;\n cx c,t;\n u3(theta/2,-pi/2,0) t;\n }\n ' from qiskit.circuit.quantumcircuit import QuantumCircuit from .u1 import U1Gate from .u3 import U3Gate from .x import CXGate q = QuantumRegister(2, 'q') qc = QuantumCircuit(q, name=self.name) rules = [(U1Gate((pi / 2)), [q[1]], []), (CXGate(), [q[0], q[1]], []), (U3Gate(((- self.params[0]) / 2), 0, 0), [q[1]], []), (CXGate(), [q[0], q[1]], []), (U3Gate((self.params[0] / 2), ((- pi) / 2), 0), [q[1]], [])] qc._data = rules self.definition = qc
655,310,419,067,584,000
gate cu3(theta,phi,lambda) c, t { u1(pi/2) t; cx c,t; u3(-theta/2,0,0) t; cx c,t; u3(theta/2,-pi/2,0) t; }
qiskit/circuit/library/standard_gates/rx.py
_define
CatalinaAlbornoz/qiskit-terra
python
def _define(self): '\n gate cu3(theta,phi,lambda) c, t\n { u1(pi/2) t;\n cx c,t;\n u3(-theta/2,0,0) t;\n cx c,t;\n u3(theta/2,-pi/2,0) t;\n }\n ' from qiskit.circuit.quantumcircuit import QuantumCircuit from .u1 import U1Gate from .u3 import U3Gate from .x import CXGate q = QuantumRegister(2, 'q') qc = QuantumCircuit(q, name=self.name) rules = [(U1Gate((pi / 2)), [q[1]], []), (CXGate(), [q[0], q[1]], []), (U3Gate(((- self.params[0]) / 2), 0, 0), [q[1]], []), (CXGate(), [q[0], q[1]], []), (U3Gate((self.params[0] / 2), ((- pi) / 2), 0), [q[1]], [])] qc._data = rules self.definition = qc
def inverse(self): 'Return inverse RX gate (i.e. with the negative rotation angle).' return CRXGate((- self.params[0]))
8,956,026,514,540,424,000
Return inverse RX gate (i.e. with the negative rotation angle).
qiskit/circuit/library/standard_gates/rx.py
inverse
CatalinaAlbornoz/qiskit-terra
python
def inverse(self): return CRXGate((- self.params[0]))
def to_matrix(self): 'Return a numpy.array for the CRX gate.' half_theta = (float(self.params[0]) / 2) cos = numpy.cos(half_theta) isin = (1j * numpy.sin(half_theta)) if self.ctrl_state: return numpy.array([[1, 0, 0, 0], [0, cos, 0, (- isin)], [0, 0, 1, 0], [0, (- isin), 0, cos]], dtype=complex) else: return numpy.array([[cos, 0, (- isin), 0], [0, 1, 0, 0], [(- isin), 0, cos, 0], [0, 0, 0, 1]], dtype=complex)
-4,680,870,059,257,651,000
Return a numpy.array for the CRX gate.
qiskit/circuit/library/standard_gates/rx.py
to_matrix
CatalinaAlbornoz/qiskit-terra
python
def to_matrix(self): half_theta = (float(self.params[0]) / 2) cos = numpy.cos(half_theta) isin = (1j * numpy.sin(half_theta)) if self.ctrl_state: return numpy.array([[1, 0, 0, 0], [0, cos, 0, (- isin)], [0, 0, 1, 0], [0, (- isin), 0, cos]], dtype=complex) else: return numpy.array([[cos, 0, (- isin), 0], [0, 1, 0, 0], [(- isin), 0, cos, 0], [0, 0, 0, 1]], dtype=complex)
def setup(self, **kwargs): 'Setup required parameters.\n\n :param dict kwargs: input args\n\n :return: void\n :rtype: void\n ' for (key, value) in kwargs.items(): setattr(self, key, value)
4,428,399,285,892,407,000
Setup required parameters. :param dict kwargs: input args :return: void :rtype: void
ibsng/handler/online_payment/get_all_gateway_names.py
setup
ParspooyeshFanavar/pyibsng
python
def setup(self, **kwargs): 'Setup required parameters.\n\n :param dict kwargs: input args\n\n :return: void\n :rtype: void\n ' for (key, value) in kwargs.items(): setattr(self, key, value)
@distributed_trace_async async def test_four(self, input: Optional[Union[(IO, '_models.SourcePath')]]=None, **kwargs: Any) -> None: 'TestFour should be in OperationGroupTwoOperations.\n\n :param input: Input parameter.\n :type input: IO or ~multiapi.v3.models.SourcePath\n :keyword str content_type: Media type of the body sent to the API. Default value is\n "application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png",\n "image/tiff", "application/json."\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') _json = None _content = None if (content_type.split(';')[0] in ['application/json']): if (input is not None): _json = self._serialize.body(input, 'SourcePath') elif (content_type.split(';')[0] in ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff']): _content = input else: raise ValueError("The content_type '{}' is not one of the allowed values: ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(content_type)) request = build_test_four_request(content_type=content_type, json=_json, content=_content, template_url=self.test_four.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
8,420,078,726,562,085,000
TestFour should be in OperationGroupTwoOperations. :param input: Input parameter. :type input: IO or ~multiapi.v3.models.SourcePath :keyword str content_type: Media type of the body sent to the API. Default value is "application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png", "image/tiff", "application/json." :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v3/aio/operations/_operation_group_two_operations.py
test_four
Sneezry/autorest.python
python
@distributed_trace_async async def test_four(self, input: Optional[Union[(IO, '_models.SourcePath')]]=None, **kwargs: Any) -> None: 'TestFour should be in OperationGroupTwoOperations.\n\n :param input: Input parameter.\n :type input: IO or ~multiapi.v3.models.SourcePath\n :keyword str content_type: Media type of the body sent to the API. Default value is\n "application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png",\n "image/tiff", "application/json."\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') _json = None _content = None if (content_type.split(';')[0] in ['application/json']): if (input is not None): _json = self._serialize.body(input, 'SourcePath') elif (content_type.split(';')[0] in ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff']): _content = input else: raise ValueError("The content_type '{}' is not one of the allowed values: ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(content_type)) request = build_test_four_request(content_type=content_type, json=_json, content=_content, template_url=self.test_four.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@distributed_trace_async async def test_five(self, **kwargs: Any) -> None: 'TestFive should be in OperationGroupTwoOperations.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_test_five_request(template_url=self.test_five.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
9,097,854,441,601,655,000
TestFive should be in OperationGroupTwoOperations. :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v3/aio/operations/_operation_group_two_operations.py
test_five
Sneezry/autorest.python
python
@distributed_trace_async async def test_five(self, **kwargs: Any) -> None: 'TestFive should be in OperationGroupTwoOperations.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_test_five_request(template_url=self.test_five.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = (await self._client._pipeline.run(request, stream=False, **kwargs)) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
def take_by(iterable, count): "\n Returns elements from the input iterable by batches of N items.\n ('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']\n " it = iter(iterable) while True: batch = list(islice(it, count)) if (len(batch) == 0): break (yield batch)
-649,869,201,443,456,800
Returns elements from the input iterable by batches of N items. ('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']
datumaro/util/__init__.py
take_by
AdaptiveCity/datumaro
python
def take_by(iterable, count): "\n Returns elements from the input iterable by batches of N items.\n ('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']\n " it = iter(iterable) while True: batch = list(islice(it, count)) if (len(batch) == 0): break (yield batch)
def escape(s: str, escapes: Iterable[Tuple[(str, str)]]) -> str: "\n 'escapes' is an iterable of (pattern, substitute) pairs\n " for (pattern, sub) in escapes: s = s.replace(pattern, sub) return s
-909,467,583,857,605,200
'escapes' is an iterable of (pattern, substitute) pairs
datumaro/util/__init__.py
escape
AdaptiveCity/datumaro
python
def escape(s: str, escapes: Iterable[Tuple[(str, str)]]) -> str: "\n \n " for (pattern, sub) in escapes: s = s.replace(pattern, sub) return s
def unescape(s: str, escapes: Iterable[Tuple[(str, str)]]) -> str: "\n 'escapes' is an iterable of (pattern, substitute) pairs\n " for (pattern, sub) in escapes: s = s.replace(sub, pattern) return s
-3,851,960,089,878,061,000
'escapes' is an iterable of (pattern, substitute) pairs
datumaro/util/__init__.py
unescape
AdaptiveCity/datumaro
python
def unescape(s: str, escapes: Iterable[Tuple[(str, str)]]) -> str: "\n \n " for (pattern, sub) in escapes: s = s.replace(sub, pattern) return s
async def async_setup_entry(hass, config_entry, async_add_entities): 'Set up Homekit numbers.' hkid = config_entry.data['AccessoryPairingID'] conn = hass.data[KNOWN_DEVICES][hkid] @callback def async_add_characteristic(char: Characteristic): kwargs = NUMBER_ENTITIES.get(char.type) if (not kwargs): return False info = {'aid': char.service.accessory.aid, 'iid': char.service.iid} async_add_entities([HomeKitNumber(conn, info, char, **kwargs)], True) return True conn.add_char_factory(async_add_characteristic)
6,154,837,550,089,510,000
Set up Homekit numbers.
homeassistant/components/homekit_controller/number.py
async_setup_entry
0xFEEDC0DE64/homeassistant-core
python
async def async_setup_entry(hass, config_entry, async_add_entities): hkid = config_entry.data['AccessoryPairingID'] conn = hass.data[KNOWN_DEVICES][hkid] @callback def async_add_characteristic(char: Characteristic): kwargs = NUMBER_ENTITIES.get(char.type) if (not kwargs): return False info = {'aid': char.service.accessory.aid, 'iid': char.service.iid} async_add_entities([HomeKitNumber(conn, info, char, **kwargs)], True) return True conn.add_char_factory(async_add_characteristic)
def __init__(self, conn, info, char, device_class=None, icon=None, name=None, **kwargs): 'Initialise a HomeKit number control.' self._device_class = device_class self._icon = icon self._name = name super().__init__(conn, info, char)
1,418,250,424,926,336,800
Initialise a HomeKit number control.
homeassistant/components/homekit_controller/number.py
__init__
0xFEEDC0DE64/homeassistant-core
python
def __init__(self, conn, info, char, device_class=None, icon=None, name=None, **kwargs): self._device_class = device_class self._icon = icon self._name = name super().__init__(conn, info, char)
def get_characteristic_types(self): 'Define the homekit characteristics the entity is tracking.' return [self._char.type]
-2,544,382,386,653,076,500
Define the homekit characteristics the entity is tracking.
homeassistant/components/homekit_controller/number.py
get_characteristic_types
0xFEEDC0DE64/homeassistant-core
python
def get_characteristic_types(self): return [self._char.type]
@property def device_class(self): 'Return type of sensor.' return self._device_class
2,750,407,145,566,773,000
Return type of sensor.
homeassistant/components/homekit_controller/number.py
device_class
0xFEEDC0DE64/homeassistant-core
python
@property def device_class(self): return self._device_class
@property def icon(self): 'Return the sensor icon.' return self._icon
6,366,013,988,344,084,000
Return the sensor icon.
homeassistant/components/homekit_controller/number.py
icon
0xFEEDC0DE64/homeassistant-core
python
@property def icon(self): return self._icon
@property def min_value(self) -> float: 'Return the minimum value.' return self._char.minValue
1,605,403,851,298,037,200
Return the minimum value.
homeassistant/components/homekit_controller/number.py
min_value
0xFEEDC0DE64/homeassistant-core
python
@property def min_value(self) -> float: return self._char.minValue
@property def max_value(self) -> float: 'Return the maximum value.' return self._char.maxValue
3,902,711,280,717,481,000
Return the maximum value.
homeassistant/components/homekit_controller/number.py
max_value
0xFEEDC0DE64/homeassistant-core
python
@property def max_value(self) -> float: return self._char.maxValue
@property def step(self) -> float: 'Return the increment/decrement step.' return self._char.minStep
-2,828,572,041,455,336,400
Return the increment/decrement step.
homeassistant/components/homekit_controller/number.py
step
0xFEEDC0DE64/homeassistant-core
python
@property def step(self) -> float: return self._char.minStep
@property def value(self) -> float: 'Return the current characteristic value.' return self._char.value
-6,657,676,111,223,585,000
Return the current characteristic value.
homeassistant/components/homekit_controller/number.py
value
0xFEEDC0DE64/homeassistant-core
python
@property def value(self) -> float: return self._char.value
async def async_set_value(self, value: float): 'Set the characteristic to this value.' (await self.async_put_characteristics({self._char.type: value}))
-685,266,159,138,981,200
Set the characteristic to this value.
homeassistant/components/homekit_controller/number.py
async_set_value
0xFEEDC0DE64/homeassistant-core
python
async def async_set_value(self, value: float): (await self.async_put_characteristics({self._char.type: value}))
def _create_user(self, username, password, is_staff, is_superuser, **extra_fields): 'Create and save a CustomUser with the given username and password. ' now = timezone.now() if (not username): raise ValueError('The given username must be set') is_active = extra_fields.pop('is_active', True) user = self.model(username=username, is_staff=is_staff, is_active=is_active, is_superuser=is_superuser, last_login=now, date_joined=now, **extra_fields) user.set_password(password) user.save(using=self._db) return user
-4,101,493,751,555,540,500
Create and save a CustomUser with the given username and password.
LW-UI/authosm/models.py
_create_user
5g-media/OIDC_ON_OSMr5
python
def _create_user(self, username, password, is_staff, is_superuser, **extra_fields): ' ' now = timezone.now() if (not username): raise ValueError('The given username must be set') is_active = extra_fields.pop('is_active', True) user = self.model(username=username, is_staff=is_staff, is_active=is_active, is_superuser=is_superuser, last_login=now, date_joined=now, **extra_fields) user.set_password(password) user.save(using=self._db) return user
@property def is_authenticated(self): 'Checks for a valid authentication.' if ((self.token is not None) and utils.is_token_valid({'expires': self.token_expires})): return True else: return False
3,942,811,889,618,474,500
Checks for a valid authentication.
LW-UI/authosm/models.py
is_authenticated
5g-media/OIDC_ON_OSMr5
python
@property def is_authenticated(self): if ((self.token is not None) and utils.is_token_valid({'expires': self.token_expires})): return True else: return False
def construct(self, *inputs): 'fc network' x = inputs[0] out = self.fc1(x) out = self.activation(out) out = self.fc2(out) out = self.activation(out) out = self.fc3(out) out = self.activation(out) out = self.fc4(out) out = self.activation(out) out = self.fc5(out) return out
988,839,359,339,869,200
fc network
MindElec/examples/physics_driven/frequency_domain_maxwell/src/model.py
construct
mindspore-ai/mindscience
python
def construct(self, *inputs): x = inputs[0] out = self.fc1(x) out = self.activation(out) out = self.fc2(out) out = self.activation(out) out = self.fc3(out) out = self.activation(out) out = self.fc4(out) out = self.activation(out) out = self.fc5(out) return out
def create_workplace(): '\n :return: void\n Creates folder\n ' for directory in [APP_FOLDER, API_FOLDER, DATA_FOLDER]: if (not os.path.exists(directory)): os.makedirs(directory)
1,398,987,062,235,902,000
:return: void Creates folder
pyhodl/app.py
create_workplace
sirfoga/pyhodl
python
def create_workplace(): '\n :return: void\n Creates folder\n ' for directory in [APP_FOLDER, API_FOLDER, DATA_FOLDER]: if (not os.path.exists(directory)): os.makedirs(directory)
def get_coin(symbol): '\n :param symbol: str\n Symbol of coin\n :return: CryptoCoin\n Coin if a crypto-coin exists with that name\n ' candidate = CryptoCoin(symbol, symbol) for coin in CRYPTO_COINS: if (coin.symbol == candidate): return coin
1,010,828,971,042,119,300
:param symbol: str Symbol of coin :return: CryptoCoin Coin if a crypto-coin exists with that name
pyhodl/app.py
get_coin
sirfoga/pyhodl
python
def get_coin(symbol): '\n :param symbol: str\n Symbol of coin\n :return: CryptoCoin\n Coin if a crypto-coin exists with that name\n ' candidate = CryptoCoin(symbol, symbol) for coin in CRYPTO_COINS: if (coin.symbol == candidate): return coin
def _read_config(self): '\n :return: {}\n Config data\n ' self.raw = JSONParser(self.config_file).get_content() for (key, value) in self.raw.items(): self.data[key] = value
-7,357,402,466,340,129,000
:return: {} Config data
pyhodl/app.py
_read_config
sirfoga/pyhodl
python
def _read_config(self): '\n :return: {}\n Config data\n ' self.raw = JSONParser(self.config_file).get_content() for (key, value) in self.raw.items(): self.data[key] = value
def create_config(self): '\n :return: void\n Creates config file\n ' if os.path.exists(self.config_file): raise ValueError('Creating new config will erase previous data!') write_dicts_to_json({}, self.config_file)
-5,348,249,475,872,377,000
:return: void Creates config file
pyhodl/app.py
create_config
sirfoga/pyhodl
python
def create_config(self): '\n :return: void\n Creates config file\n ' if os.path.exists(self.config_file): raise ValueError('Creating new config will erase previous data!') write_dicts_to_json({}, self.config_file)
def get(self, key): '\n :param key: str\n What you want\n :return: {}\n Item you want\n ' return self.data[key]
-2,567,232,002,435,149,300
:param key: str What you want :return: {} Item you want
pyhodl/app.py
get
sirfoga/pyhodl
python
def get(self, key): '\n :param key: str\n What you want\n :return: {}\n Item you want\n ' return self.data[key]
def save(self): '\n :return: void\n Saves app data to local config file\n ' write_dicts_to_json(self.data, self.config_file)
-5,262,518,568,008,340,000
:return: void Saves app data to local config file
pyhodl/app.py
save
sirfoga/pyhodl
python
def save(self): '\n :return: void\n Saves app data to local config file\n ' write_dicts_to_json(self.data, self.config_file)
def __init__(self, pairs=[], default_fields=None): '\n Constructs a mapping of information about a model.\n :class:`~revscoring.scoring.ModelInfo` objects are usually nested\n within each other to provide a convenient tree structure for\n :func:`~revscoring.scoring.ModelInfo.lookup` and\n :func:`~revscoring.scoring.ModelInfo.format`.\n ' self._data = OrderedDict(pairs) self._default_fields = (set(default_fields) if (default_fields is not None) else None)
4,558,162,301,263,171,000
Constructs a mapping of information about a model. :class:`~revscoring.scoring.ModelInfo` objects are usually nested within each other to provide a convenient tree structure for :func:`~revscoring.scoring.ModelInfo.lookup` and :func:`~revscoring.scoring.ModelInfo.format`.
revscoring/scoring/model_info.py
__init__
leojoubert/revscoring
python
def __init__(self, pairs=[], default_fields=None): '\n Constructs a mapping of information about a model.\n :class:`~revscoring.scoring.ModelInfo` objects are usually nested\n within each other to provide a convenient tree structure for\n :func:`~revscoring.scoring.ModelInfo.lookup` and\n :func:`~revscoring.scoring.ModelInfo.format`.\n ' self._data = OrderedDict(pairs) self._default_fields = (set(default_fields) if (default_fields is not None) else None)
def lookup(self, path=None): '\n Looks up a specific information value based on either a string pattern\n or a path.\n\n For example, the pattern "stats.roc_auc.labels.true" is the same as\n the path ``[\'stats\', \'roc_auc\', \'labels\', True]``.\n\n :Parameters:\n path : `str` | `list`\n The location of the information to lookup.\n ' if isinstance(path, str): path = util.parse_pattern(path) elif (path is None): path = [] d = self remaining_path = list(path) while (len(path) > 0): key = path.pop(0) d = try_key(key, d) if hasattr(d, 'lookup'): return d.lookup(remaining_path) else: continue return d
-878,813,525,819,076,000
Looks up a specific information value based on either a string pattern or a path. For example, the pattern "stats.roc_auc.labels.true" is the same as the path ``['stats', 'roc_auc', 'labels', True]``. :Parameters: path : `str` | `list` The location of the information to lookup.
revscoring/scoring/model_info.py
lookup
leojoubert/revscoring
python
def lookup(self, path=None): '\n Looks up a specific information value based on either a string pattern\n or a path.\n\n For example, the pattern "stats.roc_auc.labels.true" is the same as\n the path ``[\'stats\', \'roc_auc\', \'labels\', True]``.\n\n :Parameters:\n path : `str` | `list`\n The location of the information to lookup.\n ' if isinstance(path, str): path = util.parse_pattern(path) elif (path is None): path = [] d = self remaining_path = list(path) while (len(path) > 0): key = path.pop(0) d = try_key(key, d) if hasattr(d, 'lookup'): return d.lookup(remaining_path) else: continue return d
def format(self, paths=None, formatting='str', **kwargs): '\n Format a representation of the model information in a useful way.\n\n :Parameters:\n paths : `iterable` ( `str` | [`str`] )\n A set of paths to use when selecting which information should\n formatted. Everything beneath a provided path in the tree\n will be formatted. E.g. `statistics.roc_auc` and `statistics`\n will format redundantly because `roc_auc` is already within\n `statistics`. Alternatively `statistics.roc_auc` and\n `statistics.pr_auc` will format only those two specific\n bits of information.\n formatting : "json" or "str"\n Which output formatting do you want? "str" returns something\n nice to show on the command-line. "json" returns something\n that will pass through :func:`json.dump` without error.\n ' paths = (paths or []) _paths = [(util.parse_pattern(path) if isinstance(path, str) else path) for path in paths] path_tree = util.treeify(_paths) if (formatting == 'str'): return self.format_str(path_tree, **kwargs) elif (formatting == 'json'): return self.format_json(path_tree, **kwargs) else: raise ValueError('Formatting {0} is not available for {1}.'.format(formatting, self.__class__.__name__))
-6,262,320,247,850,609,000
Format a representation of the model information in a useful way. :Parameters: paths : `iterable` ( `str` | [`str`] ) A set of paths to use when selecting which information should formatted. Everything beneath a provided path in the tree will be formatted. E.g. `statistics.roc_auc` and `statistics` will format redundantly because `roc_auc` is already within `statistics`. Alternatively `statistics.roc_auc` and `statistics.pr_auc` will format only those two specific bits of information. formatting : "json" or "str" Which output formatting do you want? "str" returns something nice to show on the command-line. "json" returns something that will pass through :func:`json.dump` without error.
revscoring/scoring/model_info.py
format
leojoubert/revscoring
python
def format(self, paths=None, formatting='str', **kwargs): '\n Format a representation of the model information in a useful way.\n\n :Parameters:\n paths : `iterable` ( `str` | [`str`] )\n A set of paths to use when selecting which information should\n formatted. Everything beneath a provided path in the tree\n will be formatted. E.g. `statistics.roc_auc` and `statistics`\n will format redundantly because `roc_auc` is already within\n `statistics`. Alternatively `statistics.roc_auc` and\n `statistics.pr_auc` will format only those two specific\n bits of information.\n formatting : "json" or "str"\n Which output formatting do you want? "str" returns something\n nice to show on the command-line. "json" returns something\n that will pass through :func:`json.dump` without error.\n ' paths = (paths or []) _paths = [(util.parse_pattern(path) if isinstance(path, str) else path) for path in paths] path_tree = util.treeify(_paths) if (formatting == 'str'): return self.format_str(path_tree, **kwargs) elif (formatting == 'json'): return self.format_json(path_tree, **kwargs) else: raise ValueError('Formatting {0} is not available for {1}.'.format(formatting, self.__class__.__name__))
def measure_curvature_real(self, ploty, x, y): '\n Calculates the curvature of polynomial functions in meters.\n ' ym_per_pix = (30 / 720) xm_per_pix = (3.7 / 700) fit_cr = np.polyfit((y * ym_per_pix), (x * xm_per_pix), 2) y_eval = np.max(ploty) curverad = (((1 + (((((2 * fit_cr[0]) * y_eval) * ym_per_pix) + fit_cr[1]) ** 2)) ** 1.5) / np.absolute((2 * fit_cr[0]))) self.radius_of_curvature = curverad return curverad
-4,170,597,890,059,986,000
Calculates the curvature of polynomial functions in meters.
lane/Lane.py
measure_curvature_real
jo-ny/CarND-Advanced-Lane-Lines
python
def measure_curvature_real(self, ploty, x, y): '\n \n ' ym_per_pix = (30 / 720) xm_per_pix = (3.7 / 700) fit_cr = np.polyfit((y * ym_per_pix), (x * xm_per_pix), 2) y_eval = np.max(ploty) curverad = (((1 + (((((2 * fit_cr[0]) * y_eval) * ym_per_pix) + fit_cr[1]) ** 2)) ** 1.5) / np.absolute((2 * fit_cr[0]))) self.radius_of_curvature = curverad return curverad
def build_untouched_content(self): " Builds a string with the contents of the file that must be left as is,\n and replaces the wxGlade blocks with tags that in turn will be replaced\n by the new wxGlade blocks\n\n WARNING: NOT YET COMPLETE -- crazyinsomniac\n\n alb - almost done :)\n WARNING: There is *NO* support for here documents: if you put wxGlade\n blocks inside a here document, you're likely going into troubles...\n " BaseSourceFileContent.build_untouched_content(self) inside_block = False inside_pod = False tmp_in = self._load_file(self.name) out_lines = [] check_old_methods = [] for line in tmp_in: result = self.rec_pod.match(line) if result: inside_pod = True if inside_pod: out_lines.append(line) if line.startswith('=cut'): inside_pod = False continue result = self.rec_class_decl.match(line) if result: if (not self.class_name): out_lines.append(('<%swxGlade insert new_classes>' % self.nonce)) self.new_classes_inserted = True self.class_name = result.group(1) self.class_name = self.format_classname(self.class_name) self.classes.add(self.class_name) out_lines.append(line) elif (not inside_block): result = self.rec_block_start.match(line) if result: spaces = result.group('spaces') which_class = result.group('classname') which_block = result.group('block') if (not which_class): which_class = self.class_name else: which_class = self.format_classname(which_class) self.spaces[which_class] = spaces inside_block = True if (not self.class_name): out_lines.append(('<%swxGlade replace %s>' % (self.nonce, which_block))) else: if (which_block in ('__do_layout', '__set_properties')): check_old_methods.append(len(out_lines)) out_lines.append(('<%swxGlade replace %s %s>' % (self.nonce, which_class, which_block))) else: result = self.rec_event_handler.match(line) if result: which_handler = result.group('handler') which_class = self.format_classname(result.group('class')) self.event_handlers.setdefault(which_class, set()).add(which_handler) if (self.class_name and self.is_end_of_class(line)): out_lines.append(('<%swxGlade event_handlers %s>' % (self.nonce, self.class_name))) out_lines.append(line) elif self.rec_block_end.match(line): inside_block = False if (not self.new_classes_inserted): out_lines.append(('<%swxGlade insert new_classes>' % self.nonce)) while check_old_methods: i = check_old_methods.pop((- 1)) if (out_lines[(i + 1)].strip() == '}'): self._remove_method(out_lines, (i - 2), (i + 1)) self.content = out_lines
191,555,267,265,504,800
Builds a string with the contents of the file that must be left as is, and replaces the wxGlade blocks with tags that in turn will be replaced by the new wxGlade blocks WARNING: NOT YET COMPLETE -- crazyinsomniac alb - almost done :) WARNING: There is *NO* support for here documents: if you put wxGlade blocks inside a here document, you're likely going into troubles...
codegen/perl_codegen.py
build_untouched_content
ardovm/wxGlade
python
def build_untouched_content(self): " Builds a string with the contents of the file that must be left as is,\n and replaces the wxGlade blocks with tags that in turn will be replaced\n by the new wxGlade blocks\n\n WARNING: NOT YET COMPLETE -- crazyinsomniac\n\n alb - almost done :)\n WARNING: There is *NO* support for here documents: if you put wxGlade\n blocks inside a here document, you're likely going into troubles...\n " BaseSourceFileContent.build_untouched_content(self) inside_block = False inside_pod = False tmp_in = self._load_file(self.name) out_lines = [] check_old_methods = [] for line in tmp_in: result = self.rec_pod.match(line) if result: inside_pod = True if inside_pod: out_lines.append(line) if line.startswith('=cut'): inside_pod = False continue result = self.rec_class_decl.match(line) if result: if (not self.class_name): out_lines.append(('<%swxGlade insert new_classes>' % self.nonce)) self.new_classes_inserted = True self.class_name = result.group(1) self.class_name = self.format_classname(self.class_name) self.classes.add(self.class_name) out_lines.append(line) elif (not inside_block): result = self.rec_block_start.match(line) if result: spaces = result.group('spaces') which_class = result.group('classname') which_block = result.group('block') if (not which_class): which_class = self.class_name else: which_class = self.format_classname(which_class) self.spaces[which_class] = spaces inside_block = True if (not self.class_name): out_lines.append(('<%swxGlade replace %s>' % (self.nonce, which_block))) else: if (which_block in ('__do_layout', '__set_properties')): check_old_methods.append(len(out_lines)) out_lines.append(('<%swxGlade replace %s %s>' % (self.nonce, which_class, which_block))) else: result = self.rec_event_handler.match(line) if result: which_handler = result.group('handler') which_class = self.format_classname(result.group('class')) self.event_handlers.setdefault(which_class, set()).add(which_handler) if (self.class_name and self.is_end_of_class(line)): out_lines.append(('<%swxGlade event_handlers %s>' % (self.nonce, self.class_name))) out_lines.append(line) elif self.rec_block_end.match(line): inside_block = False if (not self.new_classes_inserted): out_lines.append(('<%swxGlade insert new_classes>' % self.nonce)) while check_old_methods: i = check_old_methods.pop((- 1)) if (out_lines[(i + 1)].strip() == '}'): self._remove_method(out_lines, (i - 2), (i + 1)) self.content = out_lines
def _get_app_template(self, app, top_win): 'build template string for application' if (not self.app_name): return None klass = app.klass if self._use_gettext: gettext1 = ['%(tab)smy $local = Wx::Locale->new("English", "en", "en"); # replace with ??', '%(tab)s$local->AddCatalog("%(textdomain)s"); # replace with the appropriate catalog name\n'] else: gettext1 = [] if klass: ret = ['package %(klass)s;', '', 'use base qw(Wx::App);', 'use strict;', '%(pl_import)s', 'sub OnInit {', '%(tab)smy( $self ) = shift;', '', '%(tab)sWx::InitAllImageHandlers();', '', '%(tab)smy $%(top_win)s = %(top_win_class)s->new();', '', '%(tab)s$self->SetTopWindow($%(top_win)s);', '%(tab)s$%(top_win)s->Show(1);', '', '%(tab)sreturn 1;', '}'] if self._mark_blocks: ret.append('# end of class %(klass)s') ret += ((['', 'package main;', '', 'unless(caller){'] + gettext1) + ['%(tab)smy $%(name)s = %(klass)s->new();', '%(tab)s$%(name)s->MainLoop();', '}', '']) else: ret = ((['1;', '', 'package main;', '%(pl_import)s', 'unless(caller){'] + gettext1) + ['%(tab)slocal *Wx::App::OnInit = sub{1};', '%(tab)smy $%(name)s = Wx::App->new();', '%(tab)sWx::InitAllImageHandlers();', '', '%(tab)smy $%(top_win)s = %(top_win_class)s->new();', '', '%(tab)s$%(name)s->SetTopWindow($%(top_win)s);', '%(tab)s$%(top_win)s->Show(1);', '%(tab)s$%(name)s->MainLoop();', '}', '']) return '\n'.join(ret)
-6,812,698,104,415,558,000
build template string for application
codegen/perl_codegen.py
_get_app_template
ardovm/wxGlade
python
def _get_app_template(self, app, top_win): if (not self.app_name): return None klass = app.klass if self._use_gettext: gettext1 = ['%(tab)smy $local = Wx::Locale->new("English", "en", "en"); # replace with ??', '%(tab)s$local->AddCatalog("%(textdomain)s"); # replace with the appropriate catalog name\n'] else: gettext1 = [] if klass: ret = ['package %(klass)s;', , 'use base qw(Wx::App);', 'use strict;', '%(pl_import)s', 'sub OnInit {', '%(tab)smy( $self ) = shift;', , '%(tab)sWx::InitAllImageHandlers();', , '%(tab)smy $%(top_win)s = %(top_win_class)s->new();', , '%(tab)s$self->SetTopWindow($%(top_win)s);', '%(tab)s$%(top_win)s->Show(1);', , '%(tab)sreturn 1;', '}'] if self._mark_blocks: ret.append('# end of class %(klass)s') ret += (([, 'package main;', , 'unless(caller){'] + gettext1) + ['%(tab)smy $%(name)s = %(klass)s->new();', '%(tab)s$%(name)s->MainLoop();', '}', ]) else: ret = ((['1;', , 'package main;', '%(pl_import)s', 'unless(caller){'] + gettext1) + ['%(tab)slocal *Wx::App::OnInit = sub{1};', '%(tab)smy $%(name)s = Wx::App->new();', '%(tab)sWx::InitAllImageHandlers();', , '%(tab)smy $%(top_win)s = %(top_win_class)s->new();', , '%(tab)s$%(name)s->SetTopWindow($%(top_win)s);', '%(tab)s$%(top_win)s->Show(1);', '%(tab)s$%(name)s->MainLoop();', '}', ]) return '\n'.join(ret)
def _quote_str(self, s): "Escape all unicode characters to there unicode code points in form of \\uxxxx.\n The returned string is a pure ascii string.\n Normal ascii characters like \\n or \\t won't be escaped.\n\n note: wxGlade don't handles file encoding well currently. Thereby\n we escape all unicode characters.\n\n note: The string 's' is encoded with self.app_encoding already.\n\n see: BaseLangCodeWriter._quote_str for additional details\n see: _recode_x80_xff()" s = s.replace('$', '\\$') s = s.replace('@', '\\@') if (not isinstance(s, compat.unicode)): s = s.decode(self.app_encoding) try: dummy = s.encode('ascii') if self._use_gettext: return ('_T("%s")' % s) else: return ('"%s"' % s) except UnicodeError: pass s = s.encode('raw-unicode-escape') s = self._recode_x80_xff(s) if compat.PYTHON3: s = s.decode('ASCII') s = re.sub('\\\\u([0-9a-f]{4})', '\\\\N{U+\\1}', s) if self._use_gettext: return ('_T("%s")' % s) else: return ('"%s"' % s)
-2,497,606,974,384,824,000
Escape all unicode characters to there unicode code points in form of \uxxxx. The returned string is a pure ascii string. Normal ascii characters like \n or \t won't be escaped. note: wxGlade don't handles file encoding well currently. Thereby we escape all unicode characters. note: The string 's' is encoded with self.app_encoding already. see: BaseLangCodeWriter._quote_str for additional details see: _recode_x80_xff()
codegen/perl_codegen.py
_quote_str
ardovm/wxGlade
python
def _quote_str(self, s): "Escape all unicode characters to there unicode code points in form of \\uxxxx.\n The returned string is a pure ascii string.\n Normal ascii characters like \\n or \\t won't be escaped.\n\n note: wxGlade don't handles file encoding well currently. Thereby\n we escape all unicode characters.\n\n note: The string 's' is encoded with self.app_encoding already.\n\n see: BaseLangCodeWriter._quote_str for additional details\n see: _recode_x80_xff()" s = s.replace('$', '\\$') s = s.replace('@', '\\@') if (not isinstance(s, compat.unicode)): s = s.decode(self.app_encoding) try: dummy = s.encode('ascii') if self._use_gettext: return ('_T("%s")' % s) else: return ('"%s"' % s) except UnicodeError: pass s = s.encode('raw-unicode-escape') s = self._recode_x80_xff(s) if compat.PYTHON3: s = s.decode('ASCII') s = re.sub('\\\\u([0-9a-f]{4})', '\\\\N{U+\\1}', s) if self._use_gettext: return ('_T("%s")' % s) else: return ('"%s"' % s)
def _get_class_filename(self, klass): 'Returns the name for a Perl module (.pm) to store a single class in multi file projects' return os.path.join(self.out_dir, (klass.replace('::', os.sep) + '.pm'))
2,473,388,349,519,991,000
Returns the name for a Perl module (.pm) to store a single class in multi file projects
codegen/perl_codegen.py
_get_class_filename
ardovm/wxGlade
python
def _get_class_filename(self, klass): return os.path.join(self.out_dir, (klass.replace('::', os.sep) + '.pm'))
def object_id(identifier, keep_version=False) -> str: '\n Returns the core object_id of a CURIE, with or without the version suffix.\n Note: not designed to be used with a URI (will give an invalid outcome)\n :param identifier: candidate CURIE identifier for processing\n :param keep_version: True if the version string suffix is to be retained in the identifier\n :return:\n ' if (not identifier): return identifier if (':' in identifier): identifier = identifier.split(':')[1] if ((not keep_version) and ('.' in identifier)): identifier = identifier.split('.')[0] return identifier
-7,393,254,020,889,099,000
Returns the core object_id of a CURIE, with or without the version suffix. Note: not designed to be used with a URI (will give an invalid outcome) :param identifier: candidate CURIE identifier for processing :param keep_version: True if the version string suffix is to be retained in the identifier :return:
tests/test_scripts/output/gennamespace/meta_namespaces.py
object_id
hsolbrig/biolinkml
python
def object_id(identifier, keep_version=False) -> str: '\n Returns the core object_id of a CURIE, with or without the version suffix.\n Note: not designed to be used with a URI (will give an invalid outcome)\n :param identifier: candidate CURIE identifier for processing\n :param keep_version: True if the version string suffix is to be retained in the identifier\n :return:\n ' if (not identifier): return identifier if (':' in identifier): identifier = identifier.split(':')[1] if ((not keep_version) and ('.' in identifier)): identifier = identifier.split('.')[0] return identifier
def fix_curies(identifiers, prefix=''): '\n Applies the specified XMLNS prefix to (an) identifier(s) known\n to be "raw" IDs as keys in a dictionary or elements in a list (or a simple string)\n :param identifiers:\n :param prefix:\n :return:\n ' if (not prefix): return identifiers if isinstance(identifiers, dict): curie_dict = defaultdict(dict) for key in identifiers.keys(): curie_dict[((prefix + ':') + object_id(key, keep_version=True))] = identifiers[key] return curie_dict elif isinstance(identifiers, str): return ((prefix + ':') + object_id(identifiers, keep_version=True)) elif isinstance(identifiers, Iterable): return [((prefix + ':') + object_id(x, keep_version=True)) for x in identifiers] else: raise RuntimeError("fix_curie() is not sure how to fix an instance of data type '", type(identifiers))
-171,307,059,022,725,060
Applies the specified XMLNS prefix to (an) identifier(s) known to be "raw" IDs as keys in a dictionary or elements in a list (or a simple string) :param identifiers: :param prefix: :return:
tests/test_scripts/output/gennamespace/meta_namespaces.py
fix_curies
hsolbrig/biolinkml
python
def fix_curies(identifiers, prefix=): '\n Applies the specified XMLNS prefix to (an) identifier(s) known\n to be "raw" IDs as keys in a dictionary or elements in a list (or a simple string)\n :param identifiers:\n :param prefix:\n :return:\n ' if (not prefix): return identifiers if isinstance(identifiers, dict): curie_dict = defaultdict(dict) for key in identifiers.keys(): curie_dict[((prefix + ':') + object_id(key, keep_version=True))] = identifiers[key] return curie_dict elif isinstance(identifiers, str): return ((prefix + ':') + object_id(identifiers, keep_version=True)) elif isinstance(identifiers, Iterable): return [((prefix + ':') + object_id(x, keep_version=True)) for x in identifiers] else: raise RuntimeError("fix_curie() is not sure how to fix an instance of data type '", type(identifiers))
@classmethod def parse_curie(cls, curie: str) -> Tuple[(CurieNamespace, str)]: '\n Parse a candidate CURIE\n :param curie: candidate curie string\n :return: CURIE namespace and object_id\n ' found = (CurieNamespace('', ''), curie) if (':' in curie): part = curie.split(':') prefix = part[0].upper() if (prefix in cls._get_prefix_map()): found = (cls._prefix_map[prefix], part[1]) return found
5,256,064,347,305,491,000
Parse a candidate CURIE :param curie: candidate curie string :return: CURIE namespace and object_id
tests/test_scripts/output/gennamespace/meta_namespaces.py
parse_curie
hsolbrig/biolinkml
python
@classmethod def parse_curie(cls, curie: str) -> Tuple[(CurieNamespace, str)]: '\n Parse a candidate CURIE\n :param curie: candidate curie string\n :return: CURIE namespace and object_id\n ' found = (CurieNamespace(, ), curie) if (':' in curie): part = curie.split(':') prefix = part[0].upper() if (prefix in cls._get_prefix_map()): found = (cls._prefix_map[prefix], part[1]) return found
@classmethod def parse_uri(cls, uri: str) -> Tuple[(CurieNamespace, str)]: '\n Parse a candidate URI\n :param uri: candidate URI string\n :return: namespace and object_id\n ' found = (CurieNamespace('', ''), uri) for ns in cls._namespaces: base_uri = str(ns) if uri.startswith(base_uri): object_id = uri.replace(base_uri, '') found = (ns, object_id) break return found
-3,759,787,749,700,318,700
Parse a candidate URI :param uri: candidate URI string :return: namespace and object_id
tests/test_scripts/output/gennamespace/meta_namespaces.py
parse_uri
hsolbrig/biolinkml
python
@classmethod def parse_uri(cls, uri: str) -> Tuple[(CurieNamespace, str)]: '\n Parse a candidate URI\n :param uri: candidate URI string\n :return: namespace and object_id\n ' found = (CurieNamespace(, ), uri) for ns in cls._namespaces: base_uri = str(ns) if uri.startswith(base_uri): object_id = uri.replace(base_uri, ) found = (ns, object_id) break return found
def disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=(_N_CPU + 1), drop_remainder=True, filter=None, map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=(- 1)): 'Disk image batch dataset.\n\n This function is suitable for jpg and png files\n\n Arguments:\n img_paths : String list or 1-D tensor, each of which is an iamge path\n labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label\n ' if (labels is None): dataset = tf.data.Dataset.from_tensor_slices(img_paths) elif isinstance(labels, tuple): dataset = tf.data.Dataset.from_tensor_slices(((img_paths,) + tuple(labels))) else: dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels)) def parse_func(path, *label): img = tf.read_file(path) img = tf.image.decode_png(img, 1) return ((img,) + label) if map_func: def map_func_(*args): return map_func(*parse_func(*args)) else: map_func_ = parse_func dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter, map_func_, num_threads, shuffle, buffer_size, repeat) return dataset
-6,331,786,808,887,097,000
Disk image batch dataset. This function is suitable for jpg and png files Arguments: img_paths : String list or 1-D tensor, each of which is an iamge path labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
general/utilTF1/dataset.py
disk_image_batch_dataset
duennbart/masterthesis_VAE
python
def disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=(_N_CPU + 1), drop_remainder=True, filter=None, map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=(- 1)): 'Disk image batch dataset.\n\n This function is suitable for jpg and png files\n\n Arguments:\n img_paths : String list or 1-D tensor, each of which is an iamge path\n labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label\n ' if (labels is None): dataset = tf.data.Dataset.from_tensor_slices(img_paths) elif isinstance(labels, tuple): dataset = tf.data.Dataset.from_tensor_slices(((img_paths,) + tuple(labels))) else: dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels)) def parse_func(path, *label): img = tf.read_file(path) img = tf.image.decode_png(img, 1) return ((img,) + label) if map_func: def map_func_(*args): return map_func(*parse_func(*args)) else: map_func_ = parse_func dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter, map_func_, num_threads, shuffle, buffer_size, repeat) return dataset
@option(Configs.model) def feedback_transformer(c: Configs): '\n Create [original feedback transformer](index.html).\n ' from labml_nn.transformers.feedback import FeedbackTransformer, FeedbackTransformerLayer, FeedbackAttention, FeedForward return AutoregressiveModel(c.n_tokens, c.d_model, FeedbackTransformer(FeedbackTransformerLayer(d_model=c.d_model, attn=FeedbackAttention(c.heads, c.d_model, c.dropout), feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout), dropout_prob=c.dropout), c.n_layers)).to(c.device)
-7,193,151,971,068,809,000
Create [original feedback transformer](index.html).
labml_nn/transformers/feedback/experiment.py
feedback_transformer
drpraneshkrishnan/nn
python
@option(Configs.model) def feedback_transformer(c: Configs): '\n \n ' from labml_nn.transformers.feedback import FeedbackTransformer, FeedbackTransformerLayer, FeedbackAttention, FeedForward return AutoregressiveModel(c.n_tokens, c.d_model, FeedbackTransformer(FeedbackTransformerLayer(d_model=c.d_model, attn=FeedbackAttention(c.heads, c.d_model, c.dropout), feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout), dropout_prob=c.dropout), c.n_layers)).to(c.device)
@option(Configs.model) def feedback_transformer_kv(c: Configs): '\n Create [updated feedback transformer](index.html#kv_shared), with precalculated keys and values.\n ' from labml_nn.transformers.feedback import FeedbackTransformerKV, FeedbackTransformerLayer, FeedbackAttention, FeedForward return AutoregressiveModel(c.n_tokens, c.d_model, FeedbackTransformerKV(FeedbackTransformerLayer(d_model=c.d_model, attn=FeedbackAttention(c.heads, c.d_model, c.dropout, is_kv_precomputed=True), feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout), dropout_prob=c.dropout), c.n_layers, c.d_model, c.heads)).to(c.device)
4,076,765,918,716,722,000
Create [updated feedback transformer](index.html#kv_shared), with precalculated keys and values.
labml_nn/transformers/feedback/experiment.py
feedback_transformer_kv
drpraneshkrishnan/nn
python
@option(Configs.model) def feedback_transformer_kv(c: Configs): '\n \n ' from labml_nn.transformers.feedback import FeedbackTransformerKV, FeedbackTransformerLayer, FeedbackAttention, FeedForward return AutoregressiveModel(c.n_tokens, c.d_model, FeedbackTransformerKV(FeedbackTransformerLayer(d_model=c.d_model, attn=FeedbackAttention(c.heads, c.d_model, c.dropout, is_kv_precomputed=True), feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout), dropout_prob=c.dropout), c.n_layers, c.d_model, c.heads)).to(c.device)
def get_version() -> str: '\n Get the library version from pyproject.toml\n ' path = (Path(__file__).resolve().parents[2] / 'pyproject.toml') pyproject = toml.loads(open(str(path)).read()) return cast(str, pyproject['tool']['poetry']['version'])
8,767,237,860,203,322,000
Get the library version from pyproject.toml
docs/source/conf.py
get_version
Sanjana12111994/dataprep
python
def get_version() -> str: '\n \n ' path = (Path(__file__).resolve().parents[2] / 'pyproject.toml') pyproject = toml.loads(open(str(path)).read()) return cast(str, pyproject['tool']['poetry']['version'])
def is_arithmetic_type(arrow_dtype: pa.DataType) -> bool: 'Check whether this is a type that support arithmetics.' return (pa.types.is_integer(arrow_dtype) or pa.types.is_floating(arrow_dtype) or pa.types.is_decimal(arrow_dtype))
-8,424,246,547,091,056,000
Check whether this is a type that support arithmetics.
tests/test_pandas_extension.py
is_arithmetic_type
artemru/fletcher
python
def is_arithmetic_type(arrow_dtype: pa.DataType) -> bool: return (pa.types.is_integer(arrow_dtype) or pa.types.is_floating(arrow_dtype) or pa.types.is_decimal(arrow_dtype))
@pytest.fixture(params=[True, False]) def box_in_series(request): 'Whether to box the data in a Series.' return request.param
1,452,088,377,719,459,800
Whether to box the data in a Series.
tests/test_pandas_extension.py
box_in_series
artemru/fletcher
python
@pytest.fixture(params=[True, False]) def box_in_series(request): return request.param
@pytest.fixture def data_repeated(fletcher_type, fletcher_array): 'Return different versions of data for count times.' pass def gen(count): for _ in range(count): (yield fletcher_array(fletcher_type.data_repeated(), dtype=fletcher_type.dtype)) (yield gen)
8,710,905,210,859,659,000
Return different versions of data for count times.
tests/test_pandas_extension.py
data_repeated
artemru/fletcher
python
@pytest.fixture def data_repeated(fletcher_type, fletcher_array): pass def gen(count): for _ in range(count): (yield fletcher_array(fletcher_type.data_repeated(), dtype=fletcher_type.dtype)) (yield gen)
@pytest.fixture def data_for_grouping(fletcher_type, fletcher_array): 'Fixture with data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n ' return fletcher_array(fletcher_type.data_for_grouping, dtype=fletcher_type.dtype)
-8,333,951,993,508,084,000
Fixture with data for factorization, grouping, and unique tests. Expected to be like [B, B, NA, NA, A, A, B, C] Where A < B < C and NA is missing
tests/test_pandas_extension.py
data_for_grouping
artemru/fletcher
python
@pytest.fixture def data_for_grouping(fletcher_type, fletcher_array): 'Fixture with data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n ' return fletcher_array(fletcher_type.data_for_grouping, dtype=fletcher_type.dtype)
@pytest.fixture def data_for_sorting(fletcher_type, fletcher_array): 'Length-3 array with a known sort order.\n\n This should be three items [B, C, A] with\n A < B < C\n ' return fletcher_array(fletcher_type.data_for_sorting, dtype=fletcher_type.dtype)
-7,914,527,018,796,880,000
Length-3 array with a known sort order. This should be three items [B, C, A] with A < B < C
tests/test_pandas_extension.py
data_for_sorting
artemru/fletcher
python
@pytest.fixture def data_for_sorting(fletcher_type, fletcher_array): 'Length-3 array with a known sort order.\n\n This should be three items [B, C, A] with\n A < B < C\n ' return fletcher_array(fletcher_type.data_for_sorting, dtype=fletcher_type.dtype)
@pytest.fixture def data_missing_for_sorting(fletcher_type, fletcher_array): 'Length-3 array with a known sort order.\n\n This should be three items [B, NA, A] with\n A < B and NA missing.\n ' return fletcher_array(fletcher_type.data_missing_for_sorting, dtype=fletcher_type.dtype)
5,135,210,073,270,731,000
Length-3 array with a known sort order. This should be three items [B, NA, A] with A < B and NA missing.
tests/test_pandas_extension.py
data_missing_for_sorting
artemru/fletcher
python
@pytest.fixture def data_missing_for_sorting(fletcher_type, fletcher_array): 'Length-3 array with a known sort order.\n\n This should be three items [B, NA, A] with\n A < B and NA missing.\n ' return fletcher_array(fletcher_type.data_missing_for_sorting, dtype=fletcher_type.dtype)
@pytest.fixture(params=[None, (lambda x: x)]) def sort_by_key(request): '\n Return a simple fixture for festing keys in sorting methods.\n\n Tests None (no key) and the identity key.\n ' return request.param
5,786,248,049,942,007,000
Return a simple fixture for festing keys in sorting methods. Tests None (no key) and the identity key.
tests/test_pandas_extension.py
sort_by_key
artemru/fletcher
python
@pytest.fixture(params=[None, (lambda x: x)]) def sort_by_key(request): '\n Return a simple fixture for festing keys in sorting methods.\n\n Tests None (no key) and the identity key.\n ' return request.param
def normalize_answer(s): '\n Lower text and remove punctuation, articles and extra whitespace.\n ' s = s.lower() s = re_punc.sub(' ', s) s = re_art.sub(' ', s) s = ' '.join(s.split()) return s
3,494,321,799,881,313,000
Lower text and remove punctuation, articles and extra whitespace.
parlai/core/metrics.py
normalize_answer
Totoola-Kehinde/ParlAI
python
def normalize_answer(s): '\n \n ' s = s.lower() s = re_punc.sub(' ', s) s = re_art.sub(' ', s) s = ' '.join(s.split()) return s
def aggregate_named_reports(named_reports: Dict[(str, Dict[(str, Metric)])], micro_average: bool=False) -> Dict[(str, Metric)]: '\n Aggregate metrics from multiple reports.\n\n :param reports:\n Dict of tasks -> metrics.\n :param micro_average:\n If true, top level metrics will be the micro average. By default, we\n use macro average.\n :return:\n The aggregated report\n ' if (len(named_reports) == 0): raise ValueError('Cannot aggregate empty reports.') if (len(named_reports) == 1): return next(iter(named_reports.values())) m: Dict[(str, Metric)] = {} macro_averages: Dict[(str, Dict[(str, Metric)])] = {} for (task_id, task_report) in named_reports.items(): for (each_metric, value) in task_report.items(): if value.is_global: if (each_metric not in m): m[each_metric] = value else: task_metric = f'{task_id}/{each_metric}' m[task_metric] = (m.get(task_metric) + value) if (micro_average or (not value.macro_average)): m[each_metric] = (m.get(each_metric) + value) else: if (each_metric not in macro_averages): macro_averages[each_metric] = {} macro_averages[each_metric][task_id] = value for (key, values) in macro_averages.items(): m[key] = MacroAverageMetric(values) return m
-4,107,256,599,449,128,400
Aggregate metrics from multiple reports. :param reports: Dict of tasks -> metrics. :param micro_average: If true, top level metrics will be the micro average. By default, we use macro average. :return: The aggregated report
parlai/core/metrics.py
aggregate_named_reports
Totoola-Kehinde/ParlAI
python
def aggregate_named_reports(named_reports: Dict[(str, Dict[(str, Metric)])], micro_average: bool=False) -> Dict[(str, Metric)]: '\n Aggregate metrics from multiple reports.\n\n :param reports:\n Dict of tasks -> metrics.\n :param micro_average:\n If true, top level metrics will be the micro average. By default, we\n use macro average.\n :return:\n The aggregated report\n ' if (len(named_reports) == 0): raise ValueError('Cannot aggregate empty reports.') if (len(named_reports) == 1): return next(iter(named_reports.values())) m: Dict[(str, Metric)] = {} macro_averages: Dict[(str, Dict[(str, Metric)])] = {} for (task_id, task_report) in named_reports.items(): for (each_metric, value) in task_report.items(): if value.is_global: if (each_metric not in m): m[each_metric] = value else: task_metric = f'{task_id}/{each_metric}' m[task_metric] = (m.get(task_metric) + value) if (micro_average or (not value.macro_average)): m[each_metric] = (m.get(each_metric) + value) else: if (each_metric not in macro_averages): macro_averages[each_metric] = {} macro_averages[each_metric][task_id] = value for (key, values) in macro_averages.items(): m[key] = MacroAverageMetric(values) return m
def aggregate_unnamed_reports(reports: List[Dict[(str, Metric)]]) -> Dict[(str, Metric)]: '\n Combines metrics without regard for tracking provenence.\n ' m: Dict[(str, Metric)] = {} for task_report in reports: for (each_metric, value) in task_report.items(): m[each_metric] = (m.get(each_metric) + value) return m
1,361,825,693,686,155,000
Combines metrics without regard for tracking provenence.
parlai/core/metrics.py
aggregate_unnamed_reports
Totoola-Kehinde/ParlAI
python
def aggregate_unnamed_reports(reports: List[Dict[(str, Metric)]]) -> Dict[(str, Metric)]: '\n \n ' m: Dict[(str, Metric)] = {} for task_report in reports: for (each_metric, value) in task_report.items(): m[each_metric] = (m.get(each_metric) + value) return m