body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward an ICMP packet on source IP.'
src_ip = ('0.0.0.0' if (ip_version == 'ipv4') else '0000:0000:0000:0000:0000:0000:0000:0000')
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12) | 7,093,914,005,455,209,000 | Verify that we can match and forward an ICMP packet on source IP. | tests/acl/test_acl.py | test_icmp_source_ip_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
src_ip = ('0.0.0.0' if (ip_version == 'ipv4') else '0000:0000:0000:0000:0000:0000:0000:0000')
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12) |
def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward on L4 destination port.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4631)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5) | 2,258,876,962,748,210,000 | Verify that we can match and forward on L4 destination port. | tests/acl/test_acl.py | test_l4_dport_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4631)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5) |
def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward on L4 source port.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4621)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4) | -8,989,014,313,405,647,000 | Verify that we can match and forward on L4 source port. | tests/acl/test_acl.py | test_l4_sport_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4621)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4) |
def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward on a range of L4 destination ports.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4667)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11) | -3,062,690,915,067,301,000 | Verify that we can match and forward on a range of L4 destination ports. | tests/acl/test_acl.py | test_l4_dport_range_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4667)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11) |
def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward on a range of L4 source ports.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4666)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10) | 824,063,328,700,767,600 | Verify that we can match and forward on a range of L4 source ports. | tests/acl/test_acl.py | test_l4_sport_range_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4666)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10) |
def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on a range of L4 destination ports.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4731)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22) | 7,259,250,634,837,906,000 | Verify that we can match and drop on a range of L4 destination ports. | tests/acl/test_acl.py | test_l4_dport_range_match_dropped | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4731)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22) |
def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on a range of L4 source ports.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4721)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17) | -2,294,721,210,880,217,900 | Verify that we can match and drop on a range of L4 source ports. | tests/acl/test_acl.py | test_l4_sport_range_match_dropped | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4721)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17) |
def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward on the IP protocol.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=126)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5) | -8,920,681,152,454,606,000 | Verify that we can match and forward on the IP protocol. | tests/acl/test_acl.py | test_ip_proto_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=126)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5) |
def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and forward on the TCP flags.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=27)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6) | -4,084,106,370,377,682,400 | Verify that we can match and forward on the TCP flags. | tests/acl/test_acl.py | test_tcp_flags_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=27)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6) |
def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on L4 destination port.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4731)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22) | 1,433,740,755,603,244,300 | Verify that we can match and drop on L4 destination port. | tests/acl/test_acl.py | test_l4_dport_match_dropped | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=4731)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22) |
def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on L4 source port.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4721)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10) | -3,961,363,212,683,512,300 | Verify that we can match and drop on L4 source port. | tests/acl/test_acl.py | test_l4_sport_match_dropped | KostiantynYarovyiBf/sonic-mgmt | python | def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=4721)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10) |
def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on the IP protocol.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=127)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18) | 7,359,134,178,617,844,000 | Verify that we can match and drop on the IP protocol. | tests/acl/test_acl.py | test_ip_proto_match_dropped | KostiantynYarovyiBf/sonic-mgmt | python | def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=127)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18) |
def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on the TCP flags.'
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=36)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5) | -7,891,738,788,045,145,000 | Verify that we can match and drop on the TCP flags. | tests/acl/test_acl.py | test_tcp_flags_match_dropped | KostiantynYarovyiBf/sonic-mgmt | python | def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=36)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5) |
def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
'Verify that we can match and drop on the TCP flags.'
src_ip = ('0.0.0.0' if (ip_version == 'ipv4') else '0000:0000:0000:0000:0000:0000:0000:0000')
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29) | -4,987,940,843,696,471,000 | Verify that we can match and drop on the TCP flags. | tests/acl/test_acl.py | test_icmp_match_forwarded | KostiantynYarovyiBf/sonic-mgmt | python | def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
src_ip = ('0.0.0.0' if (ip_version == 'ipv4') else '0000:0000:0000:0000:0000:0000:0000:0000')
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29) |
def setup_rules(self, dut, acl_table, ip_version):
'Setup ACL rules for testing.\n\n Args:\n dut: The DUT having ACLs applied.\n acl_table: Configuration info for the ACL table.\n\n '
table_name = acl_table['table_name']
dut.host.options['variable_manager'].extra_vars.update({'acl_table_name': table_name})
logger.info('Generating basic ACL rules config for ACL table "{}" on {}'.format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, 'acl_rules_{}.json'.format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]), dest=dut_conf_file_path)
logger.info('Applying ACL rules config "{}"'.format(dut_conf_file_path))
dut.command('config acl update full {}'.format(dut_conf_file_path)) | -9,207,895,307,831,951,000 | Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table. | tests/acl/test_acl.py | setup_rules | KostiantynYarovyiBf/sonic-mgmt | python | def setup_rules(self, dut, acl_table, ip_version):
'Setup ACL rules for testing.\n\n Args:\n dut: The DUT having ACLs applied.\n acl_table: Configuration info for the ACL table.\n\n '
table_name = acl_table['table_name']
dut.host.options['variable_manager'].extra_vars.update({'acl_table_name': table_name})
logger.info('Generating basic ACL rules config for ACL table "{}" on {}'.format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, 'acl_rules_{}.json'.format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]), dest=dut_conf_file_path)
logger.info('Applying ACL rules config "{}"'.format(dut_conf_file_path))
dut.command('config acl update full {}'.format(dut_conf_file_path)) |
def setup_rules(self, dut, acl_table, ip_version):
'Setup ACL rules for testing.\n\n Args:\n dut: The DUT having ACLs applied.\n acl_table: Configuration info for the ACL table.\n\n '
table_name = acl_table['table_name']
dut.host.options['variable_manager'].extra_vars.update({'acl_table_name': table_name})
logger.info('Generating incremental ACL rules config for ACL table "{}"'.format(table_name))
for (part, config_file) in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, 'acl_rules_{}_part_{}.json'.format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info('Applying ACL rules config "{}"'.format(dut_conf_file_path))
dut.command('config acl update incremental {}'.format(dut_conf_file_path)) | 187,118,581,047,614,400 | Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table. | tests/acl/test_acl.py | setup_rules | KostiantynYarovyiBf/sonic-mgmt | python | def setup_rules(self, dut, acl_table, ip_version):
'Setup ACL rules for testing.\n\n Args:\n dut: The DUT having ACLs applied.\n acl_table: Configuration info for the ACL table.\n\n '
table_name = acl_table['table_name']
dut.host.options['variable_manager'].extra_vars.update({'acl_table_name': table_name})
logger.info('Generating incremental ACL rules config for ACL table "{}"'.format(table_name))
for (part, config_file) in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, 'acl_rules_{}_part_{}.json'.format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info('Applying ACL rules config "{}"'.format(dut_conf_file_path))
dut.command('config acl update incremental {}'.format(dut_conf_file_path)) |
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
'Save configuration and reboot after rules are applied.\n\n Args:\n dut: The DUT having ACLs applied.\n localhost: The host from which tests are run.\n populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.\n\n '
dut.command('config save -y')
reboot(dut, localhost, wait=240)
if (dut.facts['platform'] == 'x86_64-cel_e1031-r0'):
time.sleep(240)
populate_vlan_arp_entries() | -1,384,489,803,225,975,800 | Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces. | tests/acl/test_acl.py | post_setup_hook | KostiantynYarovyiBf/sonic-mgmt | python | def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
'Save configuration and reboot after rules are applied.\n\n Args:\n dut: The DUT having ACLs applied.\n localhost: The host from which tests are run.\n populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.\n\n '
dut.command('config save -y')
reboot(dut, localhost, wait=240)
if (dut.facts['platform'] == 'x86_64-cel_e1031-r0'):
time.sleep(240)
populate_vlan_arp_entries() |
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
'Toggle ports after rules are applied.\n\n Args:\n dut: The DUT having ACLs applied.\n localhost: The host from which tests are run.\n populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.\n\n '
port_toggle(dut, tbinfo)
populate_vlan_arp_entries() | 1,083,809,853,983,803,800 | Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces. | tests/acl/test_acl.py | post_setup_hook | KostiantynYarovyiBf/sonic-mgmt | python | def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
'Toggle ports after rules are applied.\n\n Args:\n dut: The DUT having ACLs applied.\n localhost: The host from which tests are run.\n populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.\n\n '
port_toggle(dut, tbinfo)
populate_vlan_arp_entries() |
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
'Compute the average displacement error for a set of K predicted trajectories (for the same actor).\n\n Args:\n forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.\n gt_trajectory: (N, 2) ground truth trajectory.\n\n Returns:\n (K,) Average displacement error for each of the predicted trajectories.\n '
displacement_errors = np.linalg.norm((forecasted_trajectories - gt_trajectory), axis=2)
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade | -3,052,068,636,033,702,000 | Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories. | src/av2/datasets/motion_forecasting/eval/metrics.py | compute_ade | johnwlambert/argoverse2-api | python | def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
'Compute the average displacement error for a set of K predicted trajectories (for the same actor).\n\n Args:\n forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.\n gt_trajectory: (N, 2) ground truth trajectory.\n\n Returns:\n (K,) Average displacement error for each of the predicted trajectories.\n '
displacement_errors = np.linalg.norm((forecasted_trajectories - gt_trajectory), axis=2)
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade |
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
'Compute the final displacement error for a set of K predicted trajectories (for the same actor).\n\n Args:\n forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.\n gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.\n\n Returns:\n (K,) Final displacement error for each of the predicted trajectories.\n '
fde_vector = (forecasted_trajectories - gt_trajectory)[:, (- 1)]
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=(- 1))
return fde | -7,312,789,622,416,517,000 | Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories. | src/av2/datasets/motion_forecasting/eval/metrics.py | compute_fde | johnwlambert/argoverse2-api | python | def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
'Compute the final displacement error for a set of K predicted trajectories (for the same actor).\n\n Args:\n forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.\n gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.\n\n Returns:\n (K,) Final displacement error for each of the predicted trajectories.\n '
fde_vector = (forecasted_trajectories - gt_trajectory)[:, (- 1)]
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=(- 1))
return fde |
def compute_is_missed_prediction(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber, miss_threshold_m: float=2.0) -> NDArrayBool:
'Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.\n\n Args:\n forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.\n gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.\n miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.\n\n Returns:\n (K,) Bools indicating whether prediction missed by more than specified threshold.\n '
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = (fde > miss_threshold_m)
return is_missed_prediction | 3,236,725,978,363,458,000 | Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold. | src/av2/datasets/motion_forecasting/eval/metrics.py | compute_is_missed_prediction | johnwlambert/argoverse2-api | python | def compute_is_missed_prediction(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber, miss_threshold_m: float=2.0) -> NDArrayBool:
'Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.\n\n Args:\n forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.\n gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.\n miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.\n\n Returns:\n (K,) Bools indicating whether prediction missed by more than specified threshold.\n '
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = (fde > miss_threshold_m)
return is_missed_prediction |
def get_ts_pipeline(window_size):
' Function return pipeline with lagged transformation in it '
node_lagged = PrimaryNode('lagged')
node_lagged.custom_params = {'window_size': window_size}
node_final = SecondaryNode('ridge', nodes_from=[node_lagged])
pipeline = Pipeline(node_final)
return pipeline | -2,480,786,781,826,258,400 | Function return pipeline with lagged transformation in it | test/unit/data_operations/test_data_operation_params.py | get_ts_pipeline | vkirilenko/FEDOT | python | def get_ts_pipeline(window_size):
' '
node_lagged = PrimaryNode('lagged')
node_lagged.custom_params = {'window_size': window_size}
node_final = SecondaryNode('ridge', nodes_from=[node_lagged])
pipeline = Pipeline(node_final)
return pipeline |
def get_ransac_pipeline():
' Function return pipeline with lagged transformation in it '
node_ransac = PrimaryNode('ransac_lin_reg')
node_final = SecondaryNode('linear', nodes_from=[node_ransac])
pipeline = Pipeline(node_final)
return pipeline | 5,672,901,399,378,419,000 | Function return pipeline with lagged transformation in it | test/unit/data_operations/test_data_operation_params.py | get_ransac_pipeline | vkirilenko/FEDOT | python | def get_ransac_pipeline():
' '
node_ransac = PrimaryNode('ransac_lin_reg')
node_final = SecondaryNode('linear', nodes_from=[node_ransac])
pipeline = Pipeline(node_final)
return pipeline |
def test_lagged_with_invalid_params_fit_correctly():
" The function define a pipeline with incorrect parameters in the lagged\n transformation. During the training of the pipeline, the parameter 'window_size'\n is corrected\n "
window_size = 600
len_forecast = 50
project_root_path = str(fedot_project_root())
file_path = os.path.join(project_root_path, 'test/data/short_time_series.csv')
df = pd.read_csv(file_path)
time_series = np.array(df['sea_height'])
task = Task(TaskTypesEnum.ts_forecasting, TsForecastingParams(forecast_length=len_forecast))
ts_input = InputData(idx=np.arange(0, len(time_series)), features=time_series, target=time_series, task=task, data_type=DataTypesEnum.ts)
pipeline = get_ts_pipeline(window_size)
pipeline.fit(ts_input)
lagged_node = pipeline.nodes[1]
fixed_params = lagged_node.custom_params
assert pipeline.is_fitted
assert (fixed_params['window_size'] == 439) | -847,786,012,287,908,600 | The function define a pipeline with incorrect parameters in the lagged
transformation. During the training of the pipeline, the parameter 'window_size'
is corrected | test/unit/data_operations/test_data_operation_params.py | test_lagged_with_invalid_params_fit_correctly | vkirilenko/FEDOT | python | def test_lagged_with_invalid_params_fit_correctly():
" The function define a pipeline with incorrect parameters in the lagged\n transformation. During the training of the pipeline, the parameter 'window_size'\n is corrected\n "
window_size = 600
len_forecast = 50
project_root_path = str(fedot_project_root())
file_path = os.path.join(project_root_path, 'test/data/short_time_series.csv')
df = pd.read_csv(file_path)
time_series = np.array(df['sea_height'])
task = Task(TaskTypesEnum.ts_forecasting, TsForecastingParams(forecast_length=len_forecast))
ts_input = InputData(idx=np.arange(0, len(time_series)), features=time_series, target=time_series, task=task, data_type=DataTypesEnum.ts)
pipeline = get_ts_pipeline(window_size)
pipeline.fit(ts_input)
lagged_node = pipeline.nodes[1]
fixed_params = lagged_node.custom_params
assert pipeline.is_fitted
assert (fixed_params['window_size'] == 439) |
def test_ransac_with_invalid_params_fit_correctly():
' Check that on a small dataset the RANSAC anomaly search algorithm can\n adjust the values of hyperparameters\n\n As stated in the sklearn documentation, min_samples is determined by default\n based on how many features are in the dataset\n Therefore, problems can arise when there are more attributes in a dataset\n than the number of objects\n '
input_regression = get_synthetic_regression_data(n_samples=20, n_features=23)
ransac_pipeline = get_ransac_pipeline()
ransac_pipeline.fit(input_regression)
predicted = ransac_pipeline.predict(input_regression)
assert ransac_pipeline.is_fitted
assert (predicted is not None) | -6,346,821,675,250,235,000 | Check that on a small dataset the RANSAC anomaly search algorithm can
adjust the values of hyperparameters
As stated in the sklearn documentation, min_samples is determined by default
based on how many features are in the dataset
Therefore, problems can arise when there are more attributes in a dataset
than the number of objects | test/unit/data_operations/test_data_operation_params.py | test_ransac_with_invalid_params_fit_correctly | vkirilenko/FEDOT | python | def test_ransac_with_invalid_params_fit_correctly():
' Check that on a small dataset the RANSAC anomaly search algorithm can\n adjust the values of hyperparameters\n\n As stated in the sklearn documentation, min_samples is determined by default\n based on how many features are in the dataset\n Therefore, problems can arise when there are more attributes in a dataset\n than the number of objects\n '
input_regression = get_synthetic_regression_data(n_samples=20, n_features=23)
ransac_pipeline = get_ransac_pipeline()
ransac_pipeline.fit(input_regression)
predicted = ransac_pipeline.predict(input_regression)
assert ransac_pipeline.is_fitted
assert (predicted is not None) |
def undo_logger_setup():
'Undoes the automatic logging setup done by OpenAI Gym. You should call\n this function if you want to manually configure logging\n yourself. Typical usage would involve putting something like the\n following at the top of your script:\n\n gym.undo_logger_setup()\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler(sys.stderr))\n '
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET) | 5,744,155,521,019,760,000 | Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr)) | gym/configuration.py | undo_logger_setup | HosseynGT/GYM | python | def undo_logger_setup():
'Undoes the automatic logging setup done by OpenAI Gym. You should call\n this function if you want to manually configure logging\n yourself. Typical usage would involve putting something like the\n following at the top of your script:\n\n gym.undo_logger_setup()\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler(sys.stderr))\n '
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET) |
def parse_cmdline():
'Child worker command line parsing'
parser = argparse.ArgumentParser(description='Remote runner parser')
parser.add_argument('--address', action='store')
parser.add_argument('--index', action='store')
parser.add_argument('--wd', action='store')
parser.add_argument('--runpath', action='store', default=None)
parser.add_argument('--type', action='store')
parser.add_argument('--log-level', action='store', default=0, type=int)
parser.add_argument('--remote-pool-type', action='store', default='thread')
parser.add_argument('--remote-pool-size', action='store', default=1)
parser.add_argument('--sys-path-file', action='store')
return parser.parse_args() | 1,849,809,748,026,798,000 | Child worker command line parsing | testplan/runners/pools/child.py | parse_cmdline | kn-ms/testplan | python | def parse_cmdline():
parser = argparse.ArgumentParser(description='Remote runner parser')
parser.add_argument('--address', action='store')
parser.add_argument('--index', action='store')
parser.add_argument('--wd', action='store')
parser.add_argument('--runpath', action='store', default=None)
parser.add_argument('--type', action='store')
parser.add_argument('--log-level', action='store', default=0, type=int)
parser.add_argument('--remote-pool-type', action='store', default='thread')
parser.add_argument('--remote-pool-size', action='store', default=1)
parser.add_argument('--sys-path-file', action='store')
return parser.parse_args() |
def child_logic(args):
'Able to be imported child logic.'
import psutil
from testplan.runners.pools.base import Pool, Worker
from testplan.runners.pools.process import ProcessPool, ProcessWorker
from testplan.runners.pools.connection import ZMQClient
if args.log_level:
from testplan.common.utils.logger import TESTPLAN_LOGGER, STDOUT_HANDLER
TESTPLAN_LOGGER.setLevel(args.log_level)
TESTPLAN_LOGGER.removeHandler(STDOUT_HANDLER)
print('Starting child process worker on {}, {} with parent {}'.format(socket.gethostname(), os.getpid(), psutil.Process(os.getpid()).ppid()))
if args.runpath:
print('Removing old runpath: {}'.format(args.runpath))
shutil.rmtree(args.runpath, ignore_errors=True)
class NoRunpathPool(Pool):
'\n Pool that creates no runpath directory.\n Has only one worker.\n Will use the one already created by parent process.\n '
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathThreadPool(Pool):
'\n Pool that creates no runpath directory.\n Will use the one already created by parent process.\n Supports multiple thread workers.\n '
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathProcessPool(ProcessPool):
'\n Pool that creates no runpath directory.\n Will use the one already created by parent process.\n Supports multiple process workers.\n '
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
transport = ZMQClient(address=args.address, recv_timeout=30)
if (args.type == 'process_worker'):
loop = ChildLoop(args.index, transport, NoRunpathPool, 1, Worker, TESTPLAN_LOGGER)
loop.worker_loop()
elif (args.type == 'remote_worker'):
if (args.remote_pool_type == 'process'):
pool_type = NoRunpathProcessPool
worker_type = ProcessWorker
else:
pool_type = NoRunpathThreadPool
worker_type = Worker
loop = RemoteChildLoop(args.index, transport, pool_type, args.remote_pool_size, worker_type, TESTPLAN_LOGGER, runpath=args.runpath)
loop.worker_loop() | 6,453,819,297,988,365,000 | Able to be imported child logic. | testplan/runners/pools/child.py | child_logic | kn-ms/testplan | python | def child_logic(args):
import psutil
from testplan.runners.pools.base import Pool, Worker
from testplan.runners.pools.process import ProcessPool, ProcessWorker
from testplan.runners.pools.connection import ZMQClient
if args.log_level:
from testplan.common.utils.logger import TESTPLAN_LOGGER, STDOUT_HANDLER
TESTPLAN_LOGGER.setLevel(args.log_level)
TESTPLAN_LOGGER.removeHandler(STDOUT_HANDLER)
print('Starting child process worker on {}, {} with parent {}'.format(socket.gethostname(), os.getpid(), psutil.Process(os.getpid()).ppid()))
if args.runpath:
print('Removing old runpath: {}'.format(args.runpath))
shutil.rmtree(args.runpath, ignore_errors=True)
class NoRunpathPool(Pool):
'\n Pool that creates no runpath directory.\n Has only one worker.\n Will use the one already created by parent process.\n '
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathThreadPool(Pool):
'\n Pool that creates no runpath directory.\n Will use the one already created by parent process.\n Supports multiple thread workers.\n '
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathProcessPool(ProcessPool):
'\n Pool that creates no runpath directory.\n Will use the one already created by parent process.\n Supports multiple process workers.\n '
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
transport = ZMQClient(address=args.address, recv_timeout=30)
if (args.type == 'process_worker'):
loop = ChildLoop(args.index, transport, NoRunpathPool, 1, Worker, TESTPLAN_LOGGER)
loop.worker_loop()
elif (args.type == 'remote_worker'):
if (args.remote_pool_type == 'process'):
pool_type = NoRunpathProcessPool
worker_type = ProcessWorker
else:
pool_type = NoRunpathThreadPool
worker_type = Worker
loop = RemoteChildLoop(args.index, transport, pool_type, args.remote_pool_size, worker_type, TESTPLAN_LOGGER, runpath=args.runpath)
loop.worker_loop() |
def parse_syspath_file(filename):
'\n Read and parse the syspath file, which should contain each sys.path entry\n on a separate line.\n '
with open(filename) as f:
new_syspath = f.read().split('\n')
return new_syspath | -1,310,237,507,953,183,500 | Read and parse the syspath file, which should contain each sys.path entry
on a separate line. | testplan/runners/pools/child.py | parse_syspath_file | kn-ms/testplan | python | def parse_syspath_file(filename):
'\n Read and parse the syspath file, which should contain each sys.path entry\n on a separate line.\n '
with open(filename) as f:
new_syspath = f.read().split('\n')
return new_syspath |
@property
def metadata(self):
'Metadata information.'
return self._metadata | 4,025,160,648,801,091,000 | Metadata information. | testplan/runners/pools/child.py | metadata | kn-ms/testplan | python | @property
def metadata(self):
return self._metadata |
def worker_loop(self):
'\n Child process worker loop. Manages an underlying thread pool, pulls and\n sends back results to the main pool.\n '
from testplan.runners.pools.communication import Message
message = Message(**self.metadata)
try:
self._pre_loop_setup(message)
except Exception:
print('_pre_loop_setup failed')
self._transport.send_and_receive(message.make(message.SetupFailed, data=traceback.format_exc()), expect=message.Ack)
return
with self._child_pool():
message = Message(**self.metadata)
next_possible_request = time.time()
next_heartbeat = time.time()
request_delay = self._pool_cfg.active_loop_sleep
while True:
now = time.time()
if (self._pool_cfg.worker_heartbeat and (now > next_heartbeat)):
hb_resp = self._transport.send_and_receive(message.make(message.Heartbeat, data=time.time()))
if (hb_resp is None):
self.logger.critical('Pool seems dead, child exits1.')
self.exit_loop()
break
else:
self.logger.debug('Pool heartbeat response: {} at {} before {}s.'.format(hb_resp.cmd, hb_resp.data, (time.time() - hb_resp.data)))
next_heartbeat = (now + self._pool_cfg.worker_heartbeat)
if self._pool.results:
task_results = []
for uid in list(self._pool.results.keys()):
task_results.append(self._pool.results[uid])
self.logger.debug('Sending back result for {}'.format(self._pool.results[uid].task))
del self._pool.results[uid]
self._transport.send_and_receive(message.make(message.TaskResults, data=task_results), expect=message.Ack)
demand = (self._pool.workers_requests() - self._pool.unassigned.qsize())
if ((demand > 0) and (time.time() > next_possible_request)):
received = self._transport.send_and_receive(message.make(message.TaskPullRequest, data=demand))
if ((received is None) or (received.cmd == Message.Stop)):
self.logger.critical('Pool seems dead or stopping, child exits.')
self.exit_loop()
break
elif (received.cmd == Message.TaskSending):
next_possible_request = time.time()
request_delay = 0
for task in received.data:
self.logger.debug('Added {} to local pool'.format(task))
self._pool.add(task, task.uid())
for worker in self._pool._workers:
worker.requesting = 0
elif (received.cmd == Message.Ack):
request_delay = min(((request_delay + 0.2) * 1.5), self._pool_cfg.max_active_loop_sleep)
next_possible_request = (time.time() + request_delay)
pass
time.sleep(self._pool_cfg.active_loop_sleep)
self.logger.info('Local pool {} stopped.'.format(self._pool)) | 5,512,556,476,780,485,000 | Child process worker loop. Manages an underlying thread pool, pulls and
sends back results to the main pool. | testplan/runners/pools/child.py | worker_loop | kn-ms/testplan | python | def worker_loop(self):
'\n Child process worker loop. Manages an underlying thread pool, pulls and\n sends back results to the main pool.\n '
from testplan.runners.pools.communication import Message
message = Message(**self.metadata)
try:
self._pre_loop_setup(message)
except Exception:
print('_pre_loop_setup failed')
self._transport.send_and_receive(message.make(message.SetupFailed, data=traceback.format_exc()), expect=message.Ack)
return
with self._child_pool():
message = Message(**self.metadata)
next_possible_request = time.time()
next_heartbeat = time.time()
request_delay = self._pool_cfg.active_loop_sleep
while True:
now = time.time()
if (self._pool_cfg.worker_heartbeat and (now > next_heartbeat)):
hb_resp = self._transport.send_and_receive(message.make(message.Heartbeat, data=time.time()))
if (hb_resp is None):
self.logger.critical('Pool seems dead, child exits1.')
self.exit_loop()
break
else:
self.logger.debug('Pool heartbeat response: {} at {} before {}s.'.format(hb_resp.cmd, hb_resp.data, (time.time() - hb_resp.data)))
next_heartbeat = (now + self._pool_cfg.worker_heartbeat)
if self._pool.results:
task_results = []
for uid in list(self._pool.results.keys()):
task_results.append(self._pool.results[uid])
self.logger.debug('Sending back result for {}'.format(self._pool.results[uid].task))
del self._pool.results[uid]
self._transport.send_and_receive(message.make(message.TaskResults, data=task_results), expect=message.Ack)
demand = (self._pool.workers_requests() - self._pool.unassigned.qsize())
if ((demand > 0) and (time.time() > next_possible_request)):
received = self._transport.send_and_receive(message.make(message.TaskPullRequest, data=demand))
if ((received is None) or (received.cmd == Message.Stop)):
self.logger.critical('Pool seems dead or stopping, child exits.')
self.exit_loop()
break
elif (received.cmd == Message.TaskSending):
next_possible_request = time.time()
request_delay = 0
for task in received.data:
self.logger.debug('Added {} to local pool'.format(task))
self._pool.add(task, task.uid())
for worker in self._pool._workers:
worker.requesting = 0
elif (received.cmd == Message.Ack):
request_delay = min(((request_delay + 0.2) * 1.5), self._pool_cfg.max_active_loop_sleep)
next_possible_request = (time.time() + request_delay)
pass
time.sleep(self._pool_cfg.active_loop_sleep)
self.logger.info('Local pool {} stopped.'.format(self._pool)) |
@property
def contours(self):
'\n :obj:`ipywidgets.Text`: String defining sets of contours.\n Contours can be defined over an interval `50:200:10` and/or at a fix value `215`.\n Any combination of the above can be used:\n 50:200:10, 215 => Contours between values 50 and 200 every 10, with a contour at 215.\n '
return self._contours | 7,233,617,114,043,155,000 | :obj:`ipywidgets.Text`: String defining sets of contours.
Contours can be defined over an interval `50:200:10` and/or at a fix value `215`.
Any combination of the above can be used:
50:200:10, 215 => Contours between values 50 and 200 every 10, with a contour at 215. | geoapps/contours/application.py | contours | MiraGeoscience/mirageoscience-apps | python | @property
def contours(self):
'\n :obj:`ipywidgets.Text`: String defining sets of contours.\n Contours can be defined over an interval `50:200:10` and/or at a fix value `215`.\n Any combination of the above can be used:\n 50:200:10, 215 => Contours between values 50 and 200 every 10, with a contour at 215.\n '
return self._contours |
@property
def export(self):
'\n :obj:`ipywidgets.ToggleButton`: Write contours to the target geoh5\n '
return self._export | 4,496,233,297,618,209,000 | :obj:`ipywidgets.ToggleButton`: Write contours to the target geoh5 | geoapps/contours/application.py | export | MiraGeoscience/mirageoscience-apps | python | @property
def export(self):
'\n \n '
return self._export |
@property
def export_as(self):
'\n :obj:`ipywidgets.Text`: Name given to the Curve object\n '
return self._export_as | 6,315,409,722,409,907,000 | :obj:`ipywidgets.Text`: Name given to the Curve object | geoapps/contours/application.py | export_as | MiraGeoscience/mirageoscience-apps | python | @property
def export_as(self):
'\n \n '
return self._export_as |
@property
def z_value(self):
'\n :obj:`ipywidgets.Checkbox`: Assign z-coordinate based on contour values\n '
return self._z_value | 2,001,201,542,359,263,200 | :obj:`ipywidgets.Checkbox`: Assign z-coordinate based on contour values | geoapps/contours/application.py | z_value | MiraGeoscience/mirageoscience-apps | python | @property
def z_value(self):
'\n \n '
return self._z_value |
@property
def main(self):
'\n :obj:`ipywidgets.VBox`: A box containing all widgets forming the application.\n '
if (self._main is None):
self._main = VBox([self.project_panel, HBox([VBox([Label('Input options:'), self.data_panel, self.contours, self.window_selection]), VBox([Label('Save as:'), self.export_as, self.z_value, self.output_panel], layout=Layout(width='50%'))]), self.selection])
return self._main | 8,673,694,421,341,043,000 | :obj:`ipywidgets.VBox`: A box containing all widgets forming the application. | geoapps/contours/application.py | main | MiraGeoscience/mirageoscience-apps | python | @property
def main(self):
'\n \n '
if (self._main is None):
self._main = VBox([self.project_panel, HBox([VBox([Label('Input options:'), self.data_panel, self.contours, self.window_selection]), VBox([Label('Save as:'), self.export_as, self.z_value, self.output_panel], layout=Layout(width='50%'))]), self.selection])
return self._main |
def compute_plot(self, contour_values):
'\n Get current selection and trigger update\n '
(entity, data) = self.get_selected_entities()
if (data is None):
return
if (contour_values is not None):
self.contours.value = contour_values | -7,983,605,261,530,699,000 | Get current selection and trigger update | geoapps/contours/application.py | compute_plot | MiraGeoscience/mirageoscience-apps | python | def compute_plot(self, contour_values):
'\n \n '
(entity, data) = self.get_selected_entities()
if (data is None):
return
if (contour_values is not None):
self.contours.value = contour_values |
def update_contours(self):
'\n Assign\n '
if (self.data.value is not None):
self.export_as.value = ((self.data.uid_name_map[self.data.value] + '_') + self.contours.value) | 6,880,116,803,218,988,000 | Assign | geoapps/contours/application.py | update_contours | MiraGeoscience/mirageoscience-apps | python | def update_contours(self):
'\n \n '
if (self.data.value is not None):
self.export_as.value = ((self.data.uid_name_map[self.data.value] + '_') + self.contours.value) |
def clean(text: str) -> list:
'A simple function to cleanup text data'
wnl = nltk.stem.WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('english')
text = text.encode('ascii', 'ignore').decode('utf-8', 'ignore').lower()
words = re.sub('[^\\w\\s]', '', text).split()
return [wnl.lemmatize(word) for word in words if (word not in stopwords)] | 3,515,189,398,334,247,400 | A simple function to cleanup text data | fake_news_nlp_detection/fake_news_nlp_detection_2.py | clean | bflaven/BlogArticlesExamples | python | def clean(text: str) -> list:
wnl = nltk.stem.WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('english')
text = text.encode('ascii', 'ignore').decode('utf-8', 'ignore').lower()
words = re.sub('[^\\w\\s]', , text).split()
return [wnl.lemmatize(word) for word in words if (word not in stopwords)] |
def example_data_binomial():
'\n Returns an output dataframe with categorical\n features (country and test variation), and orginal features (date),\n as well as number of successes and total observations for each combination\n '
countries = ['ca', 'us']
dates = pd.date_range('2018-01-01', '2018-02-01')
variation_names = ['test', 'control', 'test2']
success_rates = [0.3, 0.32, 0.24, 0.22, 0.25, 0.42]
n_observations = [50, 80, 30, 50, 40, 50]
return_df = pd.DataFrame()
for (i, (country, variation)) in enumerate(product(countries, variation_names)):
df = pd.DataFrame({'date': dates})
df['country'] = country
df['variation_name'] = variation
df['total'] = np.random.poisson(n_observations[i], size=len(dates))
df['success'] = df['total'].apply((lambda x: np.random.binomial(x, success_rates[i])))
return_df = pd.concat([return_df, df], axis=0)
return return_df | -2,332,233,384,005,863,000 | Returns an output dataframe with categorical
features (country and test variation), and orginal features (date),
as well as number of successes and total observations for each combination | spotify_confidence/examples.py | example_data_binomial | MSchultzberg/confidence | python | def example_data_binomial():
'\n Returns an output dataframe with categorical\n features (country and test variation), and orginal features (date),\n as well as number of successes and total observations for each combination\n '
countries = ['ca', 'us']
dates = pd.date_range('2018-01-01', '2018-02-01')
variation_names = ['test', 'control', 'test2']
success_rates = [0.3, 0.32, 0.24, 0.22, 0.25, 0.42]
n_observations = [50, 80, 30, 50, 40, 50]
return_df = pd.DataFrame()
for (i, (country, variation)) in enumerate(product(countries, variation_names)):
df = pd.DataFrame({'date': dates})
df['country'] = country
df['variation_name'] = variation
df['total'] = np.random.poisson(n_observations[i], size=len(dates))
df['success'] = df['total'].apply((lambda x: np.random.binomial(x, success_rates[i])))
return_df = pd.concat([return_df, df], axis=0)
return return_df |
def _get_exogs(self):
'list of exogs, for internal use in post-estimation\n '
return (self.exog, self.exog_infl) | -4,565,227,752,000,230,000 | list of exogs, for internal use in post-estimation | statsmodels/discrete/count_model.py | _get_exogs | CCHiggins/statsmodels | python | def _get_exogs(self):
'\n '
return (self.exog, self.exog_infl) |
def loglike(self, params):
'\n Loglikelihood of Generic Zero Inflated model.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n -----\n .. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+\n \\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})\n where P - pdf of main model, L - loglike function of main model.\n '
return np.sum(self.loglikeobs(params)) | 417,392,512,980,127,400 | Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \ln L=\sum_{y_{i}=0}\ln(w_{i}+(1-w_{i})*P_{main\_model})+
\sum_{y_{i}>0}(\ln(1-w_{i})+L_{main\_model})
where P - pdf of main model, L - loglike function of main model. | statsmodels/discrete/count_model.py | loglike | CCHiggins/statsmodels | python | def loglike(self, params):
'\n Loglikelihood of Generic Zero Inflated model.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n -----\n .. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+\n \\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})\n where P - pdf of main model, L - loglike function of main model.\n '
return np.sum(self.loglikeobs(params)) |
def loglikeobs(self, params):
'\n Loglikelihood for observations of Generic Zero Inflated model.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes for definition.\n\n Notes\n -----\n .. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+\n \\ln(1-w_{i})+L_{main\\_model}\n where P - pdf of main model, L - loglike function of main model.\n\n for observations :math:`i=1,...,n`\n '
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, (1 - np.finfo(float).eps))
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero((y == 0))[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = np.log((w[zero_idx] + ((1 - w[zero_idx]) * np.exp(llf_main[zero_idx]))))
llf[nonzero_idx] = (np.log((1 - w[nonzero_idx])) + llf_main[nonzero_idx])
return llf | -223,660,727,974,160,800 | Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \ln L=\ln(w_{i}+(1-w_{i})*P_{main\_model})+
\ln(1-w_{i})+L_{main\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n` | statsmodels/discrete/count_model.py | loglikeobs | CCHiggins/statsmodels | python | def loglikeobs(self, params):
'\n Loglikelihood for observations of Generic Zero Inflated model.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes for definition.\n\n Notes\n -----\n .. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+\n \\ln(1-w_{i})+L_{main\\_model}\n where P - pdf of main model, L - loglike function of main model.\n\n for observations :math:`i=1,...,n`\n '
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, (1 - np.finfo(float).eps))
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero((y == 0))[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = np.log((w[zero_idx] + ((1 - w[zero_idx]) * np.exp(llf_main[zero_idx]))))
llf[nonzero_idx] = (np.log((1 - w[nonzero_idx])) + llf_main[nonzero_idx])
return llf |
def score_obs(self, params):
'\n Generic Zero Inflated model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params : array_like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n '
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, (1 - np.finfo(float).eps))
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero((y == 0))[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx, :] = (score_main[zero_idx].T * (1 - (w[zero_idx] / np.exp(llf[zero_idx])))).T
dldp[nonzero_idx, :] = score_main[nonzero_idx]
if (self.inflation == 'logit'):
dldw[zero_idx, :] = ((((self.exog_infl[zero_idx].T * w[zero_idx]) * (1 - w[zero_idx])) * (1 - np.exp(llf_main[zero_idx]))) / np.exp(llf[zero_idx])).T
dldw[nonzero_idx, :] = (- (self.exog_infl[nonzero_idx].T * w[nonzero_idx]).T)
elif (self.inflation == 'probit'):
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp)) | 7,217,557,069,048,554,000 | Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params` | statsmodels/discrete/count_model.py | score_obs | CCHiggins/statsmodels | python | def score_obs(self, params):
'\n Generic Zero Inflated model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params : array_like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n '
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, (1 - np.finfo(float).eps))
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero((y == 0))[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx, :] = (score_main[zero_idx].T * (1 - (w[zero_idx] / np.exp(llf[zero_idx])))).T
dldp[nonzero_idx, :] = score_main[nonzero_idx]
if (self.inflation == 'logit'):
dldw[zero_idx, :] = ((((self.exog_infl[zero_idx].T * w[zero_idx]) * (1 - w[zero_idx])) * (1 - np.exp(llf_main[zero_idx]))) / np.exp(llf[zero_idx])).T
dldw[nonzero_idx, :] = (- (self.exog_infl[nonzero_idx].T * w[nonzero_idx]).T)
elif (self.inflation == 'probit'):
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp)) |
def hessian(self, params):
'\n Generic Zero Inflated model Hessian matrix of the loglikelihood\n\n Parameters\n ----------\n params : array_like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n '
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if ((hess_arr_main is None) or (hess_arr_infl is None)):
return approx_hess(params, self.loglike)
dim = (self.k_exog + self.k_inflate)
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate, :] = hess_arr_infl
hess_arr[self.k_inflate:, self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices((self.k_exog + self.k_inflate), k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr | 4,171,844,446,509,990,000 | Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
----- | statsmodels/discrete/count_model.py | hessian | CCHiggins/statsmodels | python | def hessian(self, params):
'\n Generic Zero Inflated model Hessian matrix of the loglikelihood\n\n Parameters\n ----------\n params : array_like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n '
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if ((hess_arr_main is None) or (hess_arr_infl is None)):
return approx_hess(params, self.loglike)
dim = (self.k_exog + self.k_inflate)
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate, :] = hess_arr_infl
hess_arr[self.k_inflate:, self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices((self.k_exog + self.k_inflate), k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr |
def predict(self, params, exog=None, exog_infl=None, exposure=None, offset=None, which='mean', y_values=None):
'\n Predict response variable or other statistic given exogenous variables.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n exog : ndarray, optional\n Explanatory variables for the main count model.\n If ``exog`` is None, then the data from the model will be used.\n exog_infl : ndarray, optional\n Explanatory variables for the zero-inflation model.\n ``exog_infl`` has to be provided if ``exog`` was provided unless\n ``exog_infl`` in the model is only a constant.\n offset : ndarray, optional\n Offset is added to the linear predictor of the mean function with\n coefficient equal to 1.\n Default is zero if exog is not None, and the model offset if exog\n is None.\n exposure : ndarray, optional\n Log(exposure) is added to the linear predictor with coefficient\n equal to 1. If exposure is specified, then it will be logged by\n the method. The user does not need to log it first.\n Default is one if exog is is not None, and it is the model exposure\n if exog is None.\n which : str (optional)\n Statitistic to predict. Default is \'mean\'.\n\n - \'mean\' : the conditional expectation of endog E(y | x),\n i.e. exp of linear predictor.\n - \'linear\' : the linear predictor of the mean function.\n - \'var\' : returns the estimated variance of endog implied by the\n model.\n - \'mean-main\' : mean of the main count model\n - \'prob-main\' : probability of selecting the main model.\n The probability of zero inflation is ``1 - prob-main``.\n - \'mean-nonzero\' : expected value conditional on having observation\n larger than zero, E(y | X, y>0)\n - \'prob-zero\' : probability of observing a zero count. P(y=0 | x)\n - \'prob\' : probabilities of each count from 0 to max(endog), or\n for y_values if those are provided. This is a multivariate\n return (2-dim when predicting for several observations).\n\n y_values : array_like\n Values of the random variable endog at which pmf is evaluated.\n Only used if ``which="prob"``\n '
no_exog = False
if (exog is None):
no_exog = True
exog = self.exog
if (exog_infl is None):
if no_exog:
exog_infl = self.exog_infl
elif self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if ((exog_infl.ndim == 1) and (self.k_inflate == 1)):
exog_infl = exog_infl[:, None]
if (exposure is None):
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if (offset is None):
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = (1 - self.model_infl.predict(params_infl, exog_infl))
lin_pred = ((np.dot(exog, params_main[:self.exog.shape[1]]) + exposure) + offset)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros(exog.shape[0])
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
if (tmp_offset is False):
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
if (tmp_exposure is False):
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
prob_zero = ((1 - prob_main) + (prob_main * np.exp(llf)))
if (which == 'mean'):
return (prob_main * np.exp(lin_pred))
elif (which == 'mean-main'):
return np.exp(lin_pred)
elif (which == 'linear'):
return lin_pred
elif (which == 'mean-nonzero'):
return ((prob_main * np.exp(lin_pred)) / (1 - prob_zero))
elif (which == 'prob-zero'):
return prob_zero
elif (which == 'prob-main'):
return prob_main
elif (which == 'var'):
mu = np.exp(lin_pred)
return self._predict_var(params, mu, (1 - prob_main))
elif (which == 'prob'):
return self._predict_prob(params, exog, exog_infl, exposure, offset, y_values=y_values)
else:
raise ValueError(('which = %s is not available' % which)) | 3,388,781,732,946,094,600 | Predict response variable or other statistic given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor with coefficient
equal to 1. If exposure is specified, then it will be logged by
the method. The user does not need to log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
which : str (optional)
Statitistic to predict. Default is 'mean'.
- 'mean' : the conditional expectation of endog E(y | x),
i.e. exp of linear predictor.
- 'linear' : the linear predictor of the mean function.
- 'var' : returns the estimated variance of endog implied by the
model.
- 'mean-main' : mean of the main count model
- 'prob-main' : probability of selecting the main model.
The probability of zero inflation is ``1 - prob-main``.
- 'mean-nonzero' : expected value conditional on having observation
larger than zero, E(y | X, y>0)
- 'prob-zero' : probability of observing a zero count. P(y=0 | x)
- 'prob' : probabilities of each count from 0 to max(endog), or
for y_values if those are provided. This is a multivariate
return (2-dim when predicting for several observations).
y_values : array_like
Values of the random variable endog at which pmf is evaluated.
Only used if ``which="prob"`` | statsmodels/discrete/count_model.py | predict | CCHiggins/statsmodels | python | def predict(self, params, exog=None, exog_infl=None, exposure=None, offset=None, which='mean', y_values=None):
'\n Predict response variable or other statistic given exogenous variables.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n exog : ndarray, optional\n Explanatory variables for the main count model.\n If ``exog`` is None, then the data from the model will be used.\n exog_infl : ndarray, optional\n Explanatory variables for the zero-inflation model.\n ``exog_infl`` has to be provided if ``exog`` was provided unless\n ``exog_infl`` in the model is only a constant.\n offset : ndarray, optional\n Offset is added to the linear predictor of the mean function with\n coefficient equal to 1.\n Default is zero if exog is not None, and the model offset if exog\n is None.\n exposure : ndarray, optional\n Log(exposure) is added to the linear predictor with coefficient\n equal to 1. If exposure is specified, then it will be logged by\n the method. The user does not need to log it first.\n Default is one if exog is is not None, and it is the model exposure\n if exog is None.\n which : str (optional)\n Statitistic to predict. Default is \'mean\'.\n\n - \'mean\' : the conditional expectation of endog E(y | x),\n i.e. exp of linear predictor.\n - \'linear\' : the linear predictor of the mean function.\n - \'var\' : returns the estimated variance of endog implied by the\n model.\n - \'mean-main\' : mean of the main count model\n - \'prob-main\' : probability of selecting the main model.\n The probability of zero inflation is ``1 - prob-main``.\n - \'mean-nonzero\' : expected value conditional on having observation\n larger than zero, E(y | X, y>0)\n - \'prob-zero\' : probability of observing a zero count. P(y=0 | x)\n - \'prob\' : probabilities of each count from 0 to max(endog), or\n for y_values if those are provided. This is a multivariate\n return (2-dim when predicting for several observations).\n\n y_values : array_like\n Values of the random variable endog at which pmf is evaluated.\n Only used if ``which="prob"``\n '
no_exog = False
if (exog is None):
no_exog = True
exog = self.exog
if (exog_infl is None):
if no_exog:
exog_infl = self.exog_infl
elif self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if ((exog_infl.ndim == 1) and (self.k_inflate == 1)):
exog_infl = exog_infl[:, None]
if (exposure is None):
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if (offset is None):
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = (1 - self.model_infl.predict(params_infl, exog_infl))
lin_pred = ((np.dot(exog, params_main[:self.exog.shape[1]]) + exposure) + offset)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', False)
tmp_exposure = getattr(self.model_main, 'exposure', False)
self.model_main.exog = exog
self.model_main.endog = np.zeros(exog.shape[0])
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
if (tmp_offset is False):
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
if (tmp_exposure is False):
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
prob_zero = ((1 - prob_main) + (prob_main * np.exp(llf)))
if (which == 'mean'):
return (prob_main * np.exp(lin_pred))
elif (which == 'mean-main'):
return np.exp(lin_pred)
elif (which == 'linear'):
return lin_pred
elif (which == 'mean-nonzero'):
return ((prob_main * np.exp(lin_pred)) / (1 - prob_zero))
elif (which == 'prob-zero'):
return prob_zero
elif (which == 'prob-main'):
return prob_main
elif (which == 'var'):
mu = np.exp(lin_pred)
return self._predict_var(params, mu, (1 - prob_main))
elif (which == 'prob'):
return self._predict_prob(params, exog, exog_infl, exposure, offset, y_values=y_values)
else:
raise ValueError(('which = %s is not available' % which)) |
def _derivative_predict(self, params, exog=None, transform='dydx'):
'NotImplemented\n '
raise NotImplementedError | -5,167,511,772,959,260,000 | NotImplemented | statsmodels/discrete/count_model.py | _derivative_predict | CCHiggins/statsmodels | python | def _derivative_predict(self, params, exog=None, transform='dydx'):
'\n '
raise Error |
def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None):
'NotImplemented\n '
raise NotImplementedError | -8,214,879,038,659,339,000 | NotImplemented | statsmodels/discrete/count_model.py | _derivative_exog | CCHiggins/statsmodels | python | def _derivative_exog(self, params, exog=None, transform='dydx', dummy_idx=None, count_idx=None):
'\n '
raise Error |
def _deriv_mean_dparams(self, params):
'\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n '
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, (1 - np.finfo(float).eps))
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = ((- mu[:, None]) * score_infl)
dmat_main = ((1 - w[:, None]) * score_main)
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat | 402,093,562,052,171,800 | Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector. | statsmodels/discrete/count_model.py | _deriv_mean_dparams | CCHiggins/statsmodels | python | def _deriv_mean_dparams(self, params):
'\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n '
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, (1 - np.finfo(float).eps))
mu = self.model_main.predict(params_main)
score_infl = self.model_infl._deriv_mean_dparams(params_infl)
score_main = self.model_main._deriv_mean_dparams(params_main)
dmat_infl = ((- mu[:, None]) * score_infl)
dmat_main = ((1 - w[:, None]) * score_main)
dmat = np.column_stack((dmat_infl, dmat_main))
return dmat |
def _deriv_score_obs_dendog(self, params):
'derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog.\n '
raise NotImplementedError
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if ((y.ndim == 2) and (y.shape[1] == 1)):
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=0.01)
return ds | 2,757,558,158,860,996,000 | derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. | statsmodels/discrete/count_model.py | _deriv_score_obs_dendog | CCHiggins/statsmodels | python | def _deriv_score_obs_dendog(self, params):
'derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog.\n '
raise NotImplementedError
from statsmodels.tools.numdiff import _approx_fprime_scalar
endog_original = self.endog
def f(y):
if ((y.ndim == 2) and (y.shape[1] == 1)):
y = y[:, 0]
self.endog = y
self.model_main.endog = y
sf = self.score_obs(params)
self.endog = endog_original
self.model_main.endog = endog_original
return sf
ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=0.01)
return ds |
def _predict_var(self, params, mu, prob_infl):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n '
w = prob_infl
var_ = (((1 - w) * mu) * (1 + (w * mu)))
return var_ | -6,191,421,135,787,848,000 | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance. | statsmodels/discrete/count_model.py | _predict_var | CCHiggins/statsmodels | python | def _predict_var(self, params, mu, prob_infl):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n '
w = prob_infl
var_ = (((1 - w) * mu) * (1 + (w * mu)))
return var_ |
def get_distribution(self, params, exog=None, exog_infl=None, exposure=None, offset=None):
'Get frozen instance of distribution based on predicted parameters.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n exog : ndarray, optional\n Explanatory variables for the main count model.\n If ``exog`` is None, then the data from the model will be used.\n exog_infl : ndarray, optional\n Explanatory variables for the zero-inflation model.\n ``exog_infl`` has to be provided if ``exog`` was provided unless\n ``exog_infl`` in the model is only a constant.\n offset : ndarray, optional\n Offset is added to the linear predictor of the mean function with\n coefficient equal to 1.\n Default is zero if exog is not None, and the model offset if exog\n is None.\n exposure : ndarray, optional\n Log(exposure) is added to the linear predictor of the mean\n function with coefficient equal to 1. If exposure is specified,\n then it will be logged by the method. The user does not need to\n log it first.\n Default is one if exog is is not None, and it is the model exposure\n if exog is None.\n\n Returns\n -------\n Instance of frozen scipy distribution subclass.\n '
mu = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which='mean-main')
w = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which='prob-main')
distr = self.distribution(mu[:, None], (1 - w[:, None]))
return distr | -7,243,537,759,134,085,000 | Get frozen instance of distribution based on predicted parameters.
Parameters
----------
params : array_like
The parameters of the model.
exog : ndarray, optional
Explanatory variables for the main count model.
If ``exog`` is None, then the data from the model will be used.
exog_infl : ndarray, optional
Explanatory variables for the zero-inflation model.
``exog_infl`` has to be provided if ``exog`` was provided unless
``exog_infl`` in the model is only a constant.
offset : ndarray, optional
Offset is added to the linear predictor of the mean function with
coefficient equal to 1.
Default is zero if exog is not None, and the model offset if exog
is None.
exposure : ndarray, optional
Log(exposure) is added to the linear predictor of the mean
function with coefficient equal to 1. If exposure is specified,
then it will be logged by the method. The user does not need to
log it first.
Default is one if exog is is not None, and it is the model exposure
if exog is None.
Returns
-------
Instance of frozen scipy distribution subclass. | statsmodels/discrete/count_model.py | get_distribution | CCHiggins/statsmodels | python | def get_distribution(self, params, exog=None, exog_infl=None, exposure=None, offset=None):
'Get frozen instance of distribution based on predicted parameters.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n exog : ndarray, optional\n Explanatory variables for the main count model.\n If ``exog`` is None, then the data from the model will be used.\n exog_infl : ndarray, optional\n Explanatory variables for the zero-inflation model.\n ``exog_infl`` has to be provided if ``exog`` was provided unless\n ``exog_infl`` in the model is only a constant.\n offset : ndarray, optional\n Offset is added to the linear predictor of the mean function with\n coefficient equal to 1.\n Default is zero if exog is not None, and the model offset if exog\n is None.\n exposure : ndarray, optional\n Log(exposure) is added to the linear predictor of the mean\n function with coefficient equal to 1. If exposure is specified,\n then it will be logged by the method. The user does not need to\n log it first.\n Default is one if exog is is not None, and it is the model exposure\n if exog is None.\n\n Returns\n -------\n Instance of frozen scipy distribution subclass.\n '
mu = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which='mean-main')
w = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which='prob-main')
distr = self.distribution(mu[:, None], (1 - w[:, None]))
return distr |
def _predict_var(self, params, mu, prob_infl):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n '
alpha = params[(- 1)]
w = prob_infl
p = self.model_main.parameterization
var_ = (((1 - w) * mu) * (((1 + (alpha * (mu ** p))) ** 2) + (w * mu)))
return var_ | -991,814,010,985,005,300 | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance. | statsmodels/discrete/count_model.py | _predict_var | CCHiggins/statsmodels | python | def _predict_var(self, params, mu, prob_infl):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n '
alpha = params[(- 1)]
w = prob_infl
p = self.model_main.parameterization
var_ = (((1 - w) * mu) * (((1 + (alpha * (mu ** p))) ** 2) + (w * mu)))
return var_ |
def _predict_var(self, params, mu, prob_infl):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n '
alpha = params[(- 1)]
w = prob_infl
p = self.model_main.parameterization
var_ = (((1 - w) * mu) * ((1 + (alpha * (mu ** (p - 1)))) + (w * mu)))
return var_ | -6,530,486,237,952,336,000 | predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters. This is only used to extract extra params
like dispersion parameter.
mu : array_like
Array of mean predictions for main model.
prob_inlf : array_like
Array of predicted probabilities of zero-inflation `w`.
Returns
-------
Predicted conditional variance. | statsmodels/discrete/count_model.py | _predict_var | CCHiggins/statsmodels | python | def _predict_var(self, params, mu, prob_infl):
'predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n '
alpha = params[(- 1)]
w = prob_infl
p = self.model_main.parameterization
var_ = (((1 - w) * mu) * ((1 + (alpha * (mu ** (p - 1)))) + (w * mu)))
return var_ |
def get_influence(self):
'\n Influence and outlier measures\n\n See notes section for influence measures that do not apply for\n zero inflated models.\n\n Returns\n -------\n MLEInfluence\n The instance has methods to calculate the main influence and\n outlier measures as attributes.\n\n See Also\n --------\n statsmodels.stats.outliers_influence.MLEInfluence\n\n Notes\n -----\n ZeroInflated models have functions that are not differentiable\n with respect to sample endog if endog=0. This means that generalized\n leverage cannot be computed in the usual definition.\n\n Currently, both the generalized leverage, in `hat_matrix_diag`\n attribute and studetized residuals are not available. In the influence\n plot generalized leverage is replaced by a hat matrix diagonal that\n only takes combined exog into account, computed in the same way as\n for OLS. This is a measure for exog outliers but does not take\n specific features of the model into account.\n '
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) | -3,719,969,506,475,364,000 | Influence and outlier measures
See notes section for influence measures that do not apply for
zero inflated models.
Returns
-------
MLEInfluence
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.MLEInfluence
Notes
-----
ZeroInflated models have functions that are not differentiable
with respect to sample endog if endog=0. This means that generalized
leverage cannot be computed in the usual definition.
Currently, both the generalized leverage, in `hat_matrix_diag`
attribute and studetized residuals are not available. In the influence
plot generalized leverage is replaced by a hat matrix diagonal that
only takes combined exog into account, computed in the same way as
for OLS. This is a measure for exog outliers but does not take
specific features of the model into account. | statsmodels/discrete/count_model.py | get_influence | CCHiggins/statsmodels | python | def get_influence(self):
'\n Influence and outlier measures\n\n See notes section for influence measures that do not apply for\n zero inflated models.\n\n Returns\n -------\n MLEInfluence\n The instance has methods to calculate the main influence and\n outlier measures as attributes.\n\n See Also\n --------\n statsmodels.stats.outliers_influence.MLEInfluence\n\n Notes\n -----\n ZeroInflated models have functions that are not differentiable\n with respect to sample endog if endog=0. This means that generalized\n leverage cannot be computed in the usual definition.\n\n Currently, both the generalized leverage, in `hat_matrix_diag`\n attribute and studetized residuals are not available. In the influence\n plot generalized leverage is replaced by a hat matrix diagonal that\n only takes combined exog into account, computed in the same way as\n for OLS. This is a measure for exog outliers but does not take\n specific features of the model into account.\n '
from statsmodels.stats.outliers_influence import MLEInfluence
return MLEInfluence(self) |
def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False):
'Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n '
raise NotImplementedError('not yet implemented for zero inflation') | 2,202,878,501,041,609,000 | Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models | statsmodels/discrete/count_model.py | get_margeff | CCHiggins/statsmodels | python | def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False):
'Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n '
raise NotImplementedError('not yet implemented for zero inflation') |
def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False):
'Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n '
raise NotImplementedError('not yet implemented for zero inflation') | 2,202,878,501,041,609,000 | Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models | statsmodels/discrete/count_model.py | get_margeff | CCHiggins/statsmodels | python | def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False):
'Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n '
raise NotImplementedError('not yet implemented for zero inflation') |
def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False):
'Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n '
raise NotImplementedError('not yet implemented for zero inflation') | 2,202,878,501,041,609,000 | Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models | statsmodels/discrete/count_model.py | get_margeff | CCHiggins/statsmodels | python | def get_margeff(self, at='overall', method='dydx', atexog=None, dummy=False, count=False):
'Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n '
raise NotImplementedError('not yet implemented for zero inflation') |
def __init__(self, x, y, p):
'\n :param x: The x-coordinate of a chip, between 0 and 255\n :type x: int\n :param y: The y-coordinate of a chip, between 0 and 255\n :type y: int\n :param p: The processor running the dropped packet reinjector, between 0 and 17\n :type p: int\n '
AbstractSCPRequest.__init__(self, SDPHeader(flags=SDPFlag.REPLY_EXPECTED, destination_port=0, destination_cpu=p, destination_chip_x=x, destination_chip_y=y), SCPRequestHeader(command=SCPCommand.CMD_DPRI), argument_1=SCPDPRICommand.GET_STATUS.value) | 52,052,433,146,354,320 | :param x: The x-coordinate of a chip, between 0 and 255
:type x: int
:param y: The y-coordinate of a chip, between 0 and 255
:type y: int
:param p: The processor running the dropped packet reinjector, between 0 and 17
:type p: int | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_dpri_get_status_request.py | __init__ | Roboy/LSM_SpiNNaker_MyoArm | python | def __init__(self, x, y, p):
'\n :param x: The x-coordinate of a chip, between 0 and 255\n :type x: int\n :param y: The y-coordinate of a chip, between 0 and 255\n :type y: int\n :param p: The processor running the dropped packet reinjector, between 0 and 17\n :type p: int\n '
AbstractSCPRequest.__init__(self, SDPHeader(flags=SDPFlag.REPLY_EXPECTED, destination_port=0, destination_cpu=p, destination_chip_x=x, destination_chip_y=y), SCPRequestHeader(command=SCPCommand.CMD_DPRI), argument_1=SCPDPRICommand.GET_STATUS.value) |
def __init__(self, *args):
' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature '
pass | -90,002,593,062,007,400 | x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature | release/stubs.min/System/Runtime/InteropServices/__init___parts/ComConversionLossAttribute.py | __init__ | YKato521/ironpython-stubs | python | def __init__(self, *args):
' '
pass |
@property
def compressed(self):
' Is this section compressed?\n '
return self._compressed | -6,363,948,235,129,757,000 | Is this section compressed? | elftools/elf/sections.py | compressed | ClangBuiltLinux/pyelftools | python | @property
def compressed(self):
' \n '
return self._compressed |
@property
def data_size(self):
" Return the logical size for this section's data.\n\n This can be different from the .sh_size header field when the section\n is compressed.\n "
return self._decompressed_size | 4,456,647,565,215,163,400 | Return the logical size for this section's data.
This can be different from the .sh_size header field when the section
is compressed. | elftools/elf/sections.py | data_size | ClangBuiltLinux/pyelftools | python | @property
def data_size(self):
" Return the logical size for this section's data.\n\n This can be different from the .sh_size header field when the section\n is compressed.\n "
return self._decompressed_size |
@property
def data_alignment(self):
" Return the logical alignment for this section's data.\n\n This can be different from the .sh_addralign header field when the\n section is compressed.\n "
return self._decompressed_align | -6,104,594,075,586,130,000 | Return the logical alignment for this section's data.
This can be different from the .sh_addralign header field when the
section is compressed. | elftools/elf/sections.py | data_alignment | ClangBuiltLinux/pyelftools | python | @property
def data_alignment(self):
" Return the logical alignment for this section's data.\n\n This can be different from the .sh_addralign header field when the\n section is compressed.\n "
return self._decompressed_align |
def data(self):
' The section data from the file.\n\n Note that data is decompressed if the stored section data is\n compressed.\n '
if (self.header['sh_type'] == 'SHT_NOBITS'):
return (b'\x00' * self.data_size)
if self.compressed:
c_type = self._compression_type
if (c_type == 'ELFCOMPRESS_ZLIB'):
hdr_size = self.structs.Elf_Chdr.sizeof()
self.stream.seek((self['sh_offset'] + hdr_size))
compressed = self.stream.read((self['sh_size'] - hdr_size))
decomp = zlib.decompressobj()
result = decomp.decompress(compressed, self.data_size)
else:
raise ELFCompressionError('Unknown compression type: {:#0x}'.format(c_type))
if (len(result) != self._decompressed_size):
raise ELFCompressionError('Decompressed data is {} bytes long, should be {} bytes long'.format(len(result), self._decompressed_size))
else:
self.stream.seek(self['sh_offset'])
result = self.stream.read(self._decompressed_size)
return result | -3,319,943,269,107,768,000 | The section data from the file.
Note that data is decompressed if the stored section data is
compressed. | elftools/elf/sections.py | data | ClangBuiltLinux/pyelftools | python | def data(self):
' The section data from the file.\n\n Note that data is decompressed if the stored section data is\n compressed.\n '
if (self.header['sh_type'] == 'SHT_NOBITS'):
return (b'\x00' * self.data_size)
if self.compressed:
c_type = self._compression_type
if (c_type == 'ELFCOMPRESS_ZLIB'):
hdr_size = self.structs.Elf_Chdr.sizeof()
self.stream.seek((self['sh_offset'] + hdr_size))
compressed = self.stream.read((self['sh_size'] - hdr_size))
decomp = zlib.decompressobj()
result = decomp.decompress(compressed, self.data_size)
else:
raise ELFCompressionError('Unknown compression type: {:#0x}'.format(c_type))
if (len(result) != self._decompressed_size):
raise ELFCompressionError('Decompressed data is {} bytes long, should be {} bytes long'.format(len(result), self._decompressed_size))
else:
self.stream.seek(self['sh_offset'])
result = self.stream.read(self._decompressed_size)
return result |
def is_null(self):
' Is this a null section?\n '
return False | -1,337,224,107,243,885,000 | Is this a null section? | elftools/elf/sections.py | is_null | ClangBuiltLinux/pyelftools | python | def is_null(self):
' \n '
return False |
def __getitem__(self, name):
' Implement dict-like access to header entries\n '
return self.header[name] | 4,776,510,660,069,088,000 | Implement dict-like access to header entries | elftools/elf/sections.py | __getitem__ | ClangBuiltLinux/pyelftools | python | def __getitem__(self, name):
' \n '
return self.header[name] |
def get_string(self, offset):
' Get the string stored at the given offset in this string table.\n '
table_offset = self['sh_offset']
s = parse_cstring_from_stream(self.stream, (table_offset + offset))
return (s.decode('utf-8', errors='replace') if s else '') | -2,138,778,314,726,144,300 | Get the string stored at the given offset in this string table. | elftools/elf/sections.py | get_string | ClangBuiltLinux/pyelftools | python | def get_string(self, offset):
' \n '
table_offset = self['sh_offset']
s = parse_cstring_from_stream(self.stream, (table_offset + offset))
return (s.decode('utf-8', errors='replace') if s else ) |
def num_symbols(self):
' Number of symbols in the table\n '
return (self['sh_size'] // self['sh_entsize']) | -2,688,675,508,393,189,400 | Number of symbols in the table | elftools/elf/sections.py | num_symbols | ClangBuiltLinux/pyelftools | python | def num_symbols(self):
' \n '
return (self['sh_size'] // self['sh_entsize']) |
def get_symbol(self, n):
' Get the symbol at index #n from the table (Symbol object)\n '
entry_offset = (self['sh_offset'] + (n * self['sh_entsize']))
entry = struct_parse(self.structs.Elf_Sym, self.stream, stream_pos=entry_offset)
name = self.stringtable.get_string(entry['st_name'])
return Symbol(entry, name) | -657,288,017,696,840,000 | Get the symbol at index #n from the table (Symbol object) | elftools/elf/sections.py | get_symbol | ClangBuiltLinux/pyelftools | python | def get_symbol(self, n):
' \n '
entry_offset = (self['sh_offset'] + (n * self['sh_entsize']))
entry = struct_parse(self.structs.Elf_Sym, self.stream, stream_pos=entry_offset)
name = self.stringtable.get_string(entry['st_name'])
return Symbol(entry, name) |
def get_symbol_by_name(self, name):
' Get a symbol(s) by name. Return None if no symbol by the given name\n exists.\n '
if (self._symbol_name_map is None):
self._symbol_name_map = defaultdict(list)
for (i, sym) in enumerate(self.iter_symbols()):
self._symbol_name_map[sym.name].append(i)
symnums = self._symbol_name_map.get(name)
return ([self.get_symbol(i) for i in symnums] if symnums else None) | 1,906,267,310,430,917,000 | Get a symbol(s) by name. Return None if no symbol by the given name
exists. | elftools/elf/sections.py | get_symbol_by_name | ClangBuiltLinux/pyelftools | python | def get_symbol_by_name(self, name):
' Get a symbol(s) by name. Return None if no symbol by the given name\n exists.\n '
if (self._symbol_name_map is None):
self._symbol_name_map = defaultdict(list)
for (i, sym) in enumerate(self.iter_symbols()):
self._symbol_name_map[sym.name].append(i)
symnums = self._symbol_name_map.get(name)
return ([self.get_symbol(i) for i in symnums] if symnums else None) |
def iter_symbols(self):
' Yield all the symbols in the table\n '
for i in range(self.num_symbols()):
(yield self.get_symbol(i)) | -7,099,017,408,749,516,000 | Yield all the symbols in the table | elftools/elf/sections.py | iter_symbols | ClangBuiltLinux/pyelftools | python | def iter_symbols(self):
' \n '
for i in range(self.num_symbols()):
(yield self.get_symbol(i)) |
def __getitem__(self, name):
' Implement dict-like access to entries\n '
return self.entry[name] | 2,554,947,221,678,688,000 | Implement dict-like access to entries | elftools/elf/sections.py | __getitem__ | ClangBuiltLinux/pyelftools | python | def __getitem__(self, name):
' \n '
return self.entry[name] |
def num_symbols(self):
' Number of symbols in the table\n '
return ((self['sh_size'] // self['sh_entsize']) - 1) | -3,125,342,592,382,193,700 | Number of symbols in the table | elftools/elf/sections.py | num_symbols | ClangBuiltLinux/pyelftools | python | def num_symbols(self):
' \n '
return ((self['sh_size'] // self['sh_entsize']) - 1) |
def get_symbol(self, n):
' Get the symbol at index #n from the table (Symbol object).\n It begins at 1 and not 0 since the first entry is used to\n store the current version of the syminfo table.\n '
entry_offset = (self['sh_offset'] + (n * self['sh_entsize']))
entry = struct_parse(self.structs.Elf_Sunw_Syminfo, self.stream, stream_pos=entry_offset)
name = self.symboltable.get_symbol(n).name
return Symbol(entry, name) | 2,648,948,556,581,666,000 | Get the symbol at index #n from the table (Symbol object).
It begins at 1 and not 0 since the first entry is used to
store the current version of the syminfo table. | elftools/elf/sections.py | get_symbol | ClangBuiltLinux/pyelftools | python | def get_symbol(self, n):
' Get the symbol at index #n from the table (Symbol object).\n It begins at 1 and not 0 since the first entry is used to\n store the current version of the syminfo table.\n '
entry_offset = (self['sh_offset'] + (n * self['sh_entsize']))
entry = struct_parse(self.structs.Elf_Sunw_Syminfo, self.stream, stream_pos=entry_offset)
name = self.symboltable.get_symbol(n).name
return Symbol(entry, name) |
def iter_symbols(self):
' Yield all the symbols in the table\n '
for i in range(1, (self.num_symbols() + 1)):
(yield self.get_symbol(i)) | -2,949,914,189,310,369,300 | Yield all the symbols in the table | elftools/elf/sections.py | iter_symbols | ClangBuiltLinux/pyelftools | python | def iter_symbols(self):
' \n '
for i in range(1, (self.num_symbols() + 1)):
(yield self.get_symbol(i)) |
def iter_notes(self):
' Yield all the notes in the section. Each result is a dictionary-\n like object with "n_name", "n_type", and "n_desc" fields, amongst\n others.\n '
return iter_notes(self.elffile, self['sh_offset'], self['sh_size']) | 2,130,948,451,512,502,000 | Yield all the notes in the section. Each result is a dictionary-
like object with "n_name", "n_type", and "n_desc" fields, amongst
others. | elftools/elf/sections.py | iter_notes | ClangBuiltLinux/pyelftools | python | def iter_notes(self):
' Yield all the notes in the section. Each result is a dictionary-\n like object with "n_name", "n_type", and "n_desc" fields, amongst\n others.\n '
return iter_notes(self.elffile, self['sh_offset'], self['sh_size']) |
def iter_stabs(self):
' Yield all stab entries. Result type is ELFStructs.Elf_Stabs.\n '
offset = self['sh_offset']
size = self['sh_size']
end = (offset + size)
while (offset < end):
stabs = struct_parse(self.structs.Elf_Stabs, self.stream, stream_pos=offset)
stabs['n_offset'] = offset
offset += self.structs.Elf_Stabs.sizeof()
self.stream.seek(offset)
(yield stabs) | -220,474,480,617,224,350 | Yield all stab entries. Result type is ELFStructs.Elf_Stabs. | elftools/elf/sections.py | iter_stabs | ClangBuiltLinux/pyelftools | python | def iter_stabs(self):
' \n '
offset = self['sh_offset']
size = self['sh_size']
end = (offset + size)
while (offset < end):
stabs = struct_parse(self.structs.Elf_Stabs, self.stream, stream_pos=offset)
stabs['n_offset'] = offset
offset += self.structs.Elf_Stabs.sizeof()
self.stream.seek(offset)
(yield stabs) |
def iter_attributes(self, tag=None):
' Yield all attributes (limit to |tag| if specified).\n '
for attribute in self._make_attributes():
if ((tag is None) or (attribute.tag == tag)):
(yield attribute) | 2,320,219,259,272,997,000 | Yield all attributes (limit to |tag| if specified). | elftools/elf/sections.py | iter_attributes | ClangBuiltLinux/pyelftools | python | def iter_attributes(self, tag=None):
' \n '
for attribute in self._make_attributes():
if ((tag is None) or (attribute.tag == tag)):
(yield attribute) |
@property
def num_attributes(self):
' Number of attributes in the subsubsection.\n '
return (sum((1 for _ in self.iter_attributes())) + 1) | -6,371,965,361,598,495,000 | Number of attributes in the subsubsection. | elftools/elf/sections.py | num_attributes | ClangBuiltLinux/pyelftools | python | @property
def num_attributes(self):
' \n '
return (sum((1 for _ in self.iter_attributes())) + 1) |
@property
def attributes(self):
' List of all attributes in the subsubsection.\n '
return ([self.header] + list(self.iter_attributes())) | -8,697,860,494,321,396,000 | List of all attributes in the subsubsection. | elftools/elf/sections.py | attributes | ClangBuiltLinux/pyelftools | python | @property
def attributes(self):
' \n '
return ([self.header] + list(self.iter_attributes())) |
def _make_attributes(self):
' Create all attributes for this subsubsection except the first one\n which is the header.\n '
end = (self.offset + self.header.value)
self.stream.seek(self.attr_start)
while (self.stream.tell() != end):
(yield ARMAttribute(self.structs, self.stream)) | -5,594,182,459,205,179,000 | Create all attributes for this subsubsection except the first one
which is the header. | elftools/elf/sections.py | _make_attributes | ClangBuiltLinux/pyelftools | python | def _make_attributes(self):
' Create all attributes for this subsubsection except the first one\n which is the header.\n '
end = (self.offset + self.header.value)
self.stream.seek(self.attr_start)
while (self.stream.tell() != end):
(yield ARMAttribute(self.structs, self.stream)) |
def iter_subsubsections(self, scope=None):
' Yield all subsubsections (limit to |scope| if specified).\n '
for subsubsec in self._make_subsubsections():
if ((scope is None) or (subsubsec.header.tag == scope)):
(yield subsubsec) | -1,735,162,264,016,125,000 | Yield all subsubsections (limit to |scope| if specified). | elftools/elf/sections.py | iter_subsubsections | ClangBuiltLinux/pyelftools | python | def iter_subsubsections(self, scope=None):
' \n '
for subsubsec in self._make_subsubsections():
if ((scope is None) or (subsubsec.header.tag == scope)):
(yield subsubsec) |
@property
def num_subsubsections(self):
' Number of subsubsections in the subsection.\n '
return sum((1 for _ in self.iter_subsubsections())) | 5,231,861,895,987,968,000 | Number of subsubsections in the subsection. | elftools/elf/sections.py | num_subsubsections | ClangBuiltLinux/pyelftools | python | @property
def num_subsubsections(self):
' \n '
return sum((1 for _ in self.iter_subsubsections())) |
@property
def subsubsections(self):
' List of all subsubsections in the subsection.\n '
return list(self.iter_subsubsections()) | -6,974,233,147,596,687,000 | List of all subsubsections in the subsection. | elftools/elf/sections.py | subsubsections | ClangBuiltLinux/pyelftools | python | @property
def subsubsections(self):
' \n '
return list(self.iter_subsubsections()) |
def _make_subsubsections(self):
' Create all subsubsections for this subsection.\n '
end = (self.offset + self['length'])
self.stream.seek(self.subsubsec_start)
while (self.stream.tell() != end):
subsubsec = ARMAttributesSubsubsection(self.stream, self.structs, self.stream.tell())
self.stream.seek((self.subsubsec_start + subsubsec.header.value))
(yield subsubsec) | 835,896,700,131,857,900 | Create all subsubsections for this subsection. | elftools/elf/sections.py | _make_subsubsections | ClangBuiltLinux/pyelftools | python | def _make_subsubsections(self):
' \n '
end = (self.offset + self['length'])
self.stream.seek(self.subsubsec_start)
while (self.stream.tell() != end):
subsubsec = ARMAttributesSubsubsection(self.stream, self.structs, self.stream.tell())
self.stream.seek((self.subsubsec_start + subsubsec.header.value))
(yield subsubsec) |
def __getitem__(self, name):
' Implement dict-like access to header entries.\n '
return self.header[name] | -1,310,959,397,063,310,300 | Implement dict-like access to header entries. | elftools/elf/sections.py | __getitem__ | ClangBuiltLinux/pyelftools | python | def __getitem__(self, name):
' \n '
return self.header[name] |
def iter_subsections(self, vendor_name=None):
' Yield all subsections (limit to |vendor_name| if specified).\n '
for subsec in self._make_subsections():
if ((vendor_name is None) or (subsec['vendor_name'] == vendor_name)):
(yield subsec) | 3,568,005,328,186,459,600 | Yield all subsections (limit to |vendor_name| if specified). | elftools/elf/sections.py | iter_subsections | ClangBuiltLinux/pyelftools | python | def iter_subsections(self, vendor_name=None):
' \n '
for subsec in self._make_subsections():
if ((vendor_name is None) or (subsec['vendor_name'] == vendor_name)):
(yield subsec) |
@property
def num_subsections(self):
' Number of subsections in the section.\n '
return sum((1 for _ in self.iter_subsections())) | 8,485,527,661,879,181,000 | Number of subsections in the section. | elftools/elf/sections.py | num_subsections | ClangBuiltLinux/pyelftools | python | @property
def num_subsections(self):
' \n '
return sum((1 for _ in self.iter_subsections())) |
@property
def subsections(self):
' List of all subsections in the section.\n '
return list(self.iter_subsections()) | -5,966,918,885,840,222,000 | List of all subsections in the section. | elftools/elf/sections.py | subsections | ClangBuiltLinux/pyelftools | python | @property
def subsections(self):
' \n '
return list(self.iter_subsections()) |
def _make_subsections(self):
' Create all subsections for this section.\n '
end = (self['sh_offset'] + self.data_size)
self.stream.seek(self.subsec_start)
while (self.stream.tell() != end):
subsec = ARMAttributesSubsection(self.stream, self.structs, self.stream.tell())
self.stream.seek((self.subsec_start + subsec['length']))
(yield subsec) | 5,842,532,078,691,184,000 | Create all subsections for this section. | elftools/elf/sections.py | _make_subsections | ClangBuiltLinux/pyelftools | python | def _make_subsections(self):
' \n '
end = (self['sh_offset'] + self.data_size)
self.stream.seek(self.subsec_start)
while (self.stream.tell() != end):
subsec = ARMAttributesSubsection(self.stream, self.structs, self.stream.tell())
self.stream.seek((self.subsec_start + subsec['length']))
(yield subsec) |
def register_task(self, task, callback_function):
'Register a function with this worker\n\n def function_callback(calling_gearman_worker, current_job):\n return current_job.data\n '
self.worker_abilities[task] = callback_function
self._update_initial_state()
for command_handler in self.handler_to_connection_map.keys():
command_handler.set_abilities(self.handler_initial_state['abilities'])
return task | 4,803,041,758,010,575,000 | Register a function with this worker
def function_callback(calling_gearman_worker, current_job):
return current_job.data | client/python3_gearman/worker.py | register_task | aixiwang/gearman_test | python | def register_task(self, task, callback_function):
'Register a function with this worker\n\n def function_callback(calling_gearman_worker, current_job):\n return current_job.data\n '
self.worker_abilities[task] = callback_function
self._update_initial_state()
for command_handler in self.handler_to_connection_map.keys():
command_handler.set_abilities(self.handler_initial_state['abilities'])
return task |
def unregister_task(self, task):
'Unregister a function with worker'
self.worker_abilities.pop(task, None)
self._update_initial_state()
for command_handler in self.handler_to_connection_map.keys():
command_handler.set_abilities(self.handler_initial_state['abilities'])
return task | 5,574,631,156,250,707,000 | Unregister a function with worker | client/python3_gearman/worker.py | unregister_task | aixiwang/gearman_test | python | def unregister_task(self, task):
self.worker_abilities.pop(task, None)
self._update_initial_state()
for command_handler in self.handler_to_connection_map.keys():
command_handler.set_abilities(self.handler_initial_state['abilities'])
return task |
def set_client_id(self, client_id):
'Notify the server that we should be identified as this client ID'
self.worker_client_id = client_id
self._update_initial_state()
for command_handler in self.handler_to_connection_map.keys():
command_handler.set_client_id(self.handler_initial_state['client_id'])
return client_id | -357,448,471,174,599,600 | Notify the server that we should be identified as this client ID | client/python3_gearman/worker.py | set_client_id | aixiwang/gearman_test | python | def set_client_id(self, client_id):
self.worker_client_id = client_id
self._update_initial_state()
for command_handler in self.handler_to_connection_map.keys():
command_handler.set_client_id(self.handler_initial_state['client_id'])
return client_id |
def work(self, poll_timeout=POLL_TIMEOUT_IN_SECONDS):
'Loop indefinitely, complete tasks from all connections.'
continue_working = True
worker_connections = []
def continue_while_connections_alive(any_activity):
return self.after_poll(any_activity)
while continue_working:
worker_connections = self.establish_worker_connections()
continue_working = self.poll_connections_until_stopped(worker_connections, continue_while_connections_alive, timeout=poll_timeout)
for current_connection in worker_connections:
current_connection.close() | -5,721,381,411,356,950,000 | Loop indefinitely, complete tasks from all connections. | client/python3_gearman/worker.py | work | aixiwang/gearman_test | python | def work(self, poll_timeout=POLL_TIMEOUT_IN_SECONDS):
continue_working = True
worker_connections = []
def continue_while_connections_alive(any_activity):
return self.after_poll(any_activity)
while continue_working:
worker_connections = self.establish_worker_connections()
continue_working = self.poll_connections_until_stopped(worker_connections, continue_while_connections_alive, timeout=poll_timeout)
for current_connection in worker_connections:
current_connection.close() |
def establish_worker_connections(self):
'Return a shuffled list of connections that are alive, and try to\n reconnect to dead connections if necessary.'
self.randomized_connections = list(self.connection_list)
random.shuffle(self.randomized_connections)
output_connections = []
for current_connection in self.randomized_connections:
try:
valid_connection = self.establish_connection(current_connection)
output_connections.append(valid_connection)
except ConnectionError:
pass
return output_connections | -699,241,287,962,732,400 | Return a shuffled list of connections that are alive, and try to
reconnect to dead connections if necessary. | client/python3_gearman/worker.py | establish_worker_connections | aixiwang/gearman_test | python | def establish_worker_connections(self):
'Return a shuffled list of connections that are alive, and try to\n reconnect to dead connections if necessary.'
self.randomized_connections = list(self.connection_list)
random.shuffle(self.randomized_connections)
output_connections = []
for current_connection in self.randomized_connections:
try:
valid_connection = self.establish_connection(current_connection)
output_connections.append(valid_connection)
except ConnectionError:
pass
return output_connections |
def after_poll(self, any_activity):
'Polling callback to notify any outside listeners whats going on\n with the GearmanWorker.\n\n Return True to continue polling, False to exit the work loop'
return True | -8,650,101,808,506,423,000 | Polling callback to notify any outside listeners whats going on
with the GearmanWorker.
Return True to continue polling, False to exit the work loop | client/python3_gearman/worker.py | after_poll | aixiwang/gearman_test | python | def after_poll(self, any_activity):
'Polling callback to notify any outside listeners whats going on\n with the GearmanWorker.\n\n Return True to continue polling, False to exit the work loop'
return True |
def handle_error(self, current_connection):
'If we discover that a connection has a problem, we better release\n the job lock'
current_handler = self.connection_to_handler_map.get(current_connection)
if current_handler:
self.set_job_lock(current_handler, lock=False)
super(GearmanWorker, self).handle_error(current_connection) | -3,679,210,255,511,415,300 | If we discover that a connection has a problem, we better release
the job lock | client/python3_gearman/worker.py | handle_error | aixiwang/gearman_test | python | def handle_error(self, current_connection):
'If we discover that a connection has a problem, we better release\n the job lock'
current_handler = self.connection_to_handler_map.get(current_connection)
if current_handler:
self.set_job_lock(current_handler, lock=False)
super(GearmanWorker, self).handle_error(current_connection) |
def send_job_status(self, current_job, numerator, denominator, poll_timeout=None):
'Send a Gearman JOB_STATUS update for an inflight job'
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_status(current_job, numerator=numerator, denominator=denominator)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) | -3,530,889,081,956,161,500 | Send a Gearman JOB_STATUS update for an inflight job | client/python3_gearman/worker.py | send_job_status | aixiwang/gearman_test | python | def send_job_status(self, current_job, numerator, denominator, poll_timeout=None):
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_status(current_job, numerator=numerator, denominator=denominator)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) |
def send_job_failure(self, current_job, poll_timeout=None):
'Removes a job from the queue if its backgrounded'
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_failure(current_job)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) | -4,447,113,273,108,501,500 | Removes a job from the queue if its backgrounded | client/python3_gearman/worker.py | send_job_failure | aixiwang/gearman_test | python | def send_job_failure(self, current_job, poll_timeout=None):
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_failure(current_job)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) |
def send_job_exception(self, current_job, data, poll_timeout=None):
'Removes a job from the queue if its backgrounded'
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_exception(current_job, data=data)
current_handler.send_job_failure(current_job)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) | 732,896,769,008,873,300 | Removes a job from the queue if its backgrounded | client/python3_gearman/worker.py | send_job_exception | aixiwang/gearman_test | python | def send_job_exception(self, current_job, data, poll_timeout=None):
current_handler = self._get_handler_for_job(current_job)
current_handler.send_job_exception(current_job, data=data)
current_handler.send_job_failure(current_job)
self.wait_until_updates_sent([current_job], poll_timeout=poll_timeout) |
Subsets and Splits