body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@profiler def create_routes_for_payment(self, *, amount_msat: int, final_total_msat: int, invoice_pubkey, min_cltv_expiry, r_tags, invoice_features: int, payment_hash, payment_secret, trampoline_fee_level: int, use_two_trampolines: bool, fwd_trampoline_onion=None, full_path: LNPaymentPath=None) -> Sequence[Tuple[(LNPaymentRoute, int)]]: 'Creates multiple routes for splitting a payment over the available\n private channels.\n\n We first try to conduct the payment over a single channel. If that fails\n and mpp is supported by the receiver, we will split the payment.' invoice_features = LnFeatures(invoice_features) trampoline_features = LnFeatures.VAR_ONION_OPT local_height = self.network.get_local_height() active_channels = [chan for chan in self.channels.values() if (chan.is_active() and (not chan.is_frozen_for_sending()))] try: if (not self.channel_db): for chan in active_channels: if (not self.is_trampoline_peer(chan.node_id)): continue if (chan.node_id == invoice_pubkey): trampoline_onion = None trampoline_payment_secret = payment_secret trampoline_total_msat = final_total_msat amount_with_fees = amount_msat cltv_delta = min_cltv_expiry else: (trampoline_onion, amount_with_fees, cltv_delta) = create_trampoline_route_and_onion(amount_msat=amount_msat, total_msat=final_total_msat, min_cltv_expiry=min_cltv_expiry, my_pubkey=self.node_keypair.pubkey, invoice_pubkey=invoice_pubkey, invoice_features=invoice_features, node_id=chan.node_id, r_tags=r_tags, payment_hash=payment_hash, payment_secret=payment_secret, local_height=local_height, trampoline_fee_level=trampoline_fee_level, use_two_trampolines=use_two_trampolines) trampoline_payment_secret = os.urandom(32) trampoline_total_msat = amount_with_fees if (chan.available_to_spend(LOCAL, strict=True) < amount_with_fees): continue route = [RouteEdge(start_node=self.node_keypair.pubkey, end_node=chan.node_id, short_channel_id=chan.short_channel_id, fee_base_msat=0, fee_proportional_millionths=0, cltv_expiry_delta=0, node_features=trampoline_features)] routes = [(route, amount_with_fees, trampoline_total_msat, amount_msat, cltv_delta, trampoline_payment_secret, trampoline_onion)] break else: raise NoPathFound() else: route = self.create_route_for_payment(amount_msat=amount_msat, invoice_pubkey=invoice_pubkey, min_cltv_expiry=min_cltv_expiry, r_tags=r_tags, invoice_features=invoice_features, channels=active_channels, full_path=full_path) routes = [(route, amount_msat, final_total_msat, amount_msat, min_cltv_expiry, payment_secret, fwd_trampoline_onion)] except NoPathFound: if (not invoice_features.supports(LnFeatures.BASIC_MPP_OPT)): raise channels_with_funds = {(chan.channel_id, chan.node_id): int(chan.available_to_spend(HTLCOwner.LOCAL)) for chan in active_channels} self.logger.info(f'channels_with_funds: {channels_with_funds}') use_singe_node = ((not self.channel_db) and (constants.net is constants.BitcoinMainnet)) split_configurations = suggest_splits(amount_msat, channels_with_funds, single_node=use_singe_node) self.logger.info(f'suggest_split {amount_msat} returned {len(split_configurations)} configurations') for s in split_configurations: self.logger.info(f'trying split configuration: {s[0].values()} rating: {s[1]}') routes = [] try: if (not self.channel_db): buckets = defaultdict(list) for ((chan_id, _), part_amount_msat) in s[0].items(): chan = self.channels[chan_id] if part_amount_msat: buckets[chan.node_id].append((chan_id, part_amount_msat)) for (node_id, bucket) in buckets.items(): bucket_amount_msat = sum([x[1] for x in bucket]) (trampoline_onion, bucket_amount_with_fees, bucket_cltv_delta) = create_trampoline_route_and_onion(amount_msat=bucket_amount_msat, total_msat=final_total_msat, min_cltv_expiry=min_cltv_expiry, my_pubkey=self.node_keypair.pubkey, invoice_pubkey=invoice_pubkey, invoice_features=invoice_features, node_id=node_id, r_tags=r_tags, payment_hash=payment_hash, payment_secret=payment_secret, local_height=local_height, trampoline_fee_level=trampoline_fee_level, use_two_trampolines=use_two_trampolines) bucket_payment_secret = os.urandom(32) bucket_fees = (bucket_amount_with_fees - bucket_amount_msat) self.logger.info(f'bucket_fees {bucket_fees}') for (chan_id, part_amount_msat) in bucket: chan = self.channels[chan_id] margin = (chan.available_to_spend(LOCAL, strict=True) - part_amount_msat) delta_fee = min(bucket_fees, margin) part_amount_msat_with_fees = (part_amount_msat + delta_fee) bucket_fees -= delta_fee route = [RouteEdge(start_node=self.node_keypair.pubkey, end_node=node_id, short_channel_id=chan.short_channel_id, fee_base_msat=0, fee_proportional_millionths=0, cltv_expiry_delta=0, node_features=trampoline_features)] self.logger.info(f'adding route {part_amount_msat} {delta_fee} {margin}') routes.append((route, part_amount_msat_with_fees, bucket_amount_with_fees, part_amount_msat, bucket_cltv_delta, bucket_payment_secret, trampoline_onion)) if (bucket_fees != 0): self.logger.info('not enough margin to pay trampoline fee') raise NoPathFound() else: for ((chan_id, _), part_amount_msat) in s[0].items(): if part_amount_msat: channel = self.channels[chan_id] route = self.create_route_for_payment(amount_msat=part_amount_msat, invoice_pubkey=invoice_pubkey, min_cltv_expiry=min_cltv_expiry, r_tags=r_tags, invoice_features=invoice_features, channels=[channel], full_path=None) routes.append((route, part_amount_msat, final_total_msat, part_amount_msat, min_cltv_expiry, payment_secret, fwd_trampoline_onion)) self.logger.info(f'found acceptable split configuration: {list(s[0].values())} rating: {s[1]}') break except NoPathFound: continue else: raise NoPathFound() return routes
-5,895,710,188,087,822,000
Creates multiple routes for splitting a payment over the available private channels. We first try to conduct the payment over a single channel. If that fails and mpp is supported by the receiver, we will split the payment.
electrum/lnworker.py
create_routes_for_payment
jeroz1/electrum-ravencoin-utd
python
@profiler def create_routes_for_payment(self, *, amount_msat: int, final_total_msat: int, invoice_pubkey, min_cltv_expiry, r_tags, invoice_features: int, payment_hash, payment_secret, trampoline_fee_level: int, use_two_trampolines: bool, fwd_trampoline_onion=None, full_path: LNPaymentPath=None) -> Sequence[Tuple[(LNPaymentRoute, int)]]: 'Creates multiple routes for splitting a payment over the available\n private channels.\n\n We first try to conduct the payment over a single channel. If that fails\n and mpp is supported by the receiver, we will split the payment.' invoice_features = LnFeatures(invoice_features) trampoline_features = LnFeatures.VAR_ONION_OPT local_height = self.network.get_local_height() active_channels = [chan for chan in self.channels.values() if (chan.is_active() and (not chan.is_frozen_for_sending()))] try: if (not self.channel_db): for chan in active_channels: if (not self.is_trampoline_peer(chan.node_id)): continue if (chan.node_id == invoice_pubkey): trampoline_onion = None trampoline_payment_secret = payment_secret trampoline_total_msat = final_total_msat amount_with_fees = amount_msat cltv_delta = min_cltv_expiry else: (trampoline_onion, amount_with_fees, cltv_delta) = create_trampoline_route_and_onion(amount_msat=amount_msat, total_msat=final_total_msat, min_cltv_expiry=min_cltv_expiry, my_pubkey=self.node_keypair.pubkey, invoice_pubkey=invoice_pubkey, invoice_features=invoice_features, node_id=chan.node_id, r_tags=r_tags, payment_hash=payment_hash, payment_secret=payment_secret, local_height=local_height, trampoline_fee_level=trampoline_fee_level, use_two_trampolines=use_two_trampolines) trampoline_payment_secret = os.urandom(32) trampoline_total_msat = amount_with_fees if (chan.available_to_spend(LOCAL, strict=True) < amount_with_fees): continue route = [RouteEdge(start_node=self.node_keypair.pubkey, end_node=chan.node_id, short_channel_id=chan.short_channel_id, fee_base_msat=0, fee_proportional_millionths=0, cltv_expiry_delta=0, node_features=trampoline_features)] routes = [(route, amount_with_fees, trampoline_total_msat, amount_msat, cltv_delta, trampoline_payment_secret, trampoline_onion)] break else: raise NoPathFound() else: route = self.create_route_for_payment(amount_msat=amount_msat, invoice_pubkey=invoice_pubkey, min_cltv_expiry=min_cltv_expiry, r_tags=r_tags, invoice_features=invoice_features, channels=active_channels, full_path=full_path) routes = [(route, amount_msat, final_total_msat, amount_msat, min_cltv_expiry, payment_secret, fwd_trampoline_onion)] except NoPathFound: if (not invoice_features.supports(LnFeatures.BASIC_MPP_OPT)): raise channels_with_funds = {(chan.channel_id, chan.node_id): int(chan.available_to_spend(HTLCOwner.LOCAL)) for chan in active_channels} self.logger.info(f'channels_with_funds: {channels_with_funds}') use_singe_node = ((not self.channel_db) and (constants.net is constants.BitcoinMainnet)) split_configurations = suggest_splits(amount_msat, channels_with_funds, single_node=use_singe_node) self.logger.info(f'suggest_split {amount_msat} returned {len(split_configurations)} configurations') for s in split_configurations: self.logger.info(f'trying split configuration: {s[0].values()} rating: {s[1]}') routes = [] try: if (not self.channel_db): buckets = defaultdict(list) for ((chan_id, _), part_amount_msat) in s[0].items(): chan = self.channels[chan_id] if part_amount_msat: buckets[chan.node_id].append((chan_id, part_amount_msat)) for (node_id, bucket) in buckets.items(): bucket_amount_msat = sum([x[1] for x in bucket]) (trampoline_onion, bucket_amount_with_fees, bucket_cltv_delta) = create_trampoline_route_and_onion(amount_msat=bucket_amount_msat, total_msat=final_total_msat, min_cltv_expiry=min_cltv_expiry, my_pubkey=self.node_keypair.pubkey, invoice_pubkey=invoice_pubkey, invoice_features=invoice_features, node_id=node_id, r_tags=r_tags, payment_hash=payment_hash, payment_secret=payment_secret, local_height=local_height, trampoline_fee_level=trampoline_fee_level, use_two_trampolines=use_two_trampolines) bucket_payment_secret = os.urandom(32) bucket_fees = (bucket_amount_with_fees - bucket_amount_msat) self.logger.info(f'bucket_fees {bucket_fees}') for (chan_id, part_amount_msat) in bucket: chan = self.channels[chan_id] margin = (chan.available_to_spend(LOCAL, strict=True) - part_amount_msat) delta_fee = min(bucket_fees, margin) part_amount_msat_with_fees = (part_amount_msat + delta_fee) bucket_fees -= delta_fee route = [RouteEdge(start_node=self.node_keypair.pubkey, end_node=node_id, short_channel_id=chan.short_channel_id, fee_base_msat=0, fee_proportional_millionths=0, cltv_expiry_delta=0, node_features=trampoline_features)] self.logger.info(f'adding route {part_amount_msat} {delta_fee} {margin}') routes.append((route, part_amount_msat_with_fees, bucket_amount_with_fees, part_amount_msat, bucket_cltv_delta, bucket_payment_secret, trampoline_onion)) if (bucket_fees != 0): self.logger.info('not enough margin to pay trampoline fee') raise NoPathFound() else: for ((chan_id, _), part_amount_msat) in s[0].items(): if part_amount_msat: channel = self.channels[chan_id] route = self.create_route_for_payment(amount_msat=part_amount_msat, invoice_pubkey=invoice_pubkey, min_cltv_expiry=min_cltv_expiry, r_tags=r_tags, invoice_features=invoice_features, channels=[channel], full_path=None) routes.append((route, part_amount_msat, final_total_msat, part_amount_msat, min_cltv_expiry, payment_secret, fwd_trampoline_onion)) self.logger.info(f'found acceptable split configuration: {list(s[0].values())} rating: {s[1]}') break except NoPathFound: continue else: raise NoPathFound() return routes
def get_payment_info(self, payment_hash: bytes) -> Optional[PaymentInfo]: 'returns None if payment_hash is a payment we are forwarding' key = payment_hash.hex() with self.lock: if (key in self.payments): (amount_msat, direction, status) = self.payments[key] return PaymentInfo(payment_hash, amount_msat, direction, status)
8,287,857,047,201,814,000
returns None if payment_hash is a payment we are forwarding
electrum/lnworker.py
get_payment_info
jeroz1/electrum-ravencoin-utd
python
def get_payment_info(self, payment_hash: bytes) -> Optional[PaymentInfo]: key = payment_hash.hex() with self.lock: if (key in self.payments): (amount_msat, direction, status) = self.payments[key] return PaymentInfo(payment_hash, amount_msat, direction, status)
def check_received_mpp_htlc(self, payment_secret, short_channel_id, htlc: UpdateAddHtlc, expected_msat: int) -> Optional[bool]: ' return MPP status: True (accepted), False (expired) or None ' payment_hash = htlc.payment_hash (is_expired, is_accepted, htlc_set) = self.received_mpp_htlcs.get(payment_secret, (False, False, set())) if (self.get_payment_status(payment_hash) == PR_PAID): is_accepted = True is_expired = False key = (short_channel_id, htlc) if (key not in htlc_set): htlc_set.add(key) if ((not is_accepted) and (not is_expired)): total = sum([_htlc.amount_msat for (scid, _htlc) in htlc_set]) first_timestamp = min([_htlc.timestamp for (scid, _htlc) in htlc_set]) if self.stopping_soon: is_expired = True elif ((time.time() - first_timestamp) > self.MPP_EXPIRY): is_expired = True elif (total == expected_msat): is_accepted = True if (is_accepted or is_expired): htlc_set.remove(key) if (len(htlc_set) > 0): self.received_mpp_htlcs[payment_secret] = (is_expired, is_accepted, htlc_set) elif (payment_secret in self.received_mpp_htlcs): self.received_mpp_htlcs.pop(payment_secret) return (True if is_accepted else (False if is_expired else None))
-3,202,478,210,011,258,400
return MPP status: True (accepted), False (expired) or None
electrum/lnworker.py
check_received_mpp_htlc
jeroz1/electrum-ravencoin-utd
python
def check_received_mpp_htlc(self, payment_secret, short_channel_id, htlc: UpdateAddHtlc, expected_msat: int) -> Optional[bool]: ' ' payment_hash = htlc.payment_hash (is_expired, is_accepted, htlc_set) = self.received_mpp_htlcs.get(payment_secret, (False, False, set())) if (self.get_payment_status(payment_hash) == PR_PAID): is_accepted = True is_expired = False key = (short_channel_id, htlc) if (key not in htlc_set): htlc_set.add(key) if ((not is_accepted) and (not is_expired)): total = sum([_htlc.amount_msat for (scid, _htlc) in htlc_set]) first_timestamp = min([_htlc.timestamp for (scid, _htlc) in htlc_set]) if self.stopping_soon: is_expired = True elif ((time.time() - first_timestamp) > self.MPP_EXPIRY): is_expired = True elif (total == expected_msat): is_accepted = True if (is_accepted or is_expired): htlc_set.remove(key) if (len(htlc_set) > 0): self.received_mpp_htlcs[payment_secret] = (is_expired, is_accepted, htlc_set) elif (payment_secret in self.received_mpp_htlcs): self.received_mpp_htlcs.pop(payment_secret) return (True if is_accepted else (False if is_expired else None))
async def _calc_routing_hints_for_invoice(self, amount_msat: Optional[int]): "calculate routing hints (BOLT-11 'r' field)" routing_hints = [] channels = list(self.channels.values()) channels = [chan for chan in channels if (chan.is_open() and (not chan.is_frozen_for_receiving()))] channels = sorted(channels, key=(lambda chan: ((not chan.is_active()), (- chan.available_to_spend(REMOTE))))) channels = channels[:15] random.shuffle(channels) scid_to_my_channels = {chan.short_channel_id: chan for chan in channels if (chan.short_channel_id is not None)} for chan in channels: chan_id = chan.short_channel_id assert isinstance(chan_id, bytes), chan_id channel_info = get_mychannel_info(chan_id, scid_to_my_channels) fee_base_msat = fee_proportional_millionths = 0 cltv_expiry_delta = 1 missing_info = True if channel_info: policy = get_mychannel_policy(channel_info.short_channel_id, chan.node_id, scid_to_my_channels) if policy: fee_base_msat = policy.fee_base_msat fee_proportional_millionths = policy.fee_proportional_millionths cltv_expiry_delta = policy.cltv_expiry_delta missing_info = False if missing_info: self.logger.info(f'Warning. Missing channel update for our channel {chan_id}; filling invoice with incorrect data.') routing_hints.append(('r', [(chan.node_id, chan_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta)])) return routing_hints
-7,572,827,854,677,782,000
calculate routing hints (BOLT-11 'r' field)
electrum/lnworker.py
_calc_routing_hints_for_invoice
jeroz1/electrum-ravencoin-utd
python
async def _calc_routing_hints_for_invoice(self, amount_msat: Optional[int]): routing_hints = [] channels = list(self.channels.values()) channels = [chan for chan in channels if (chan.is_open() and (not chan.is_frozen_for_receiving()))] channels = sorted(channels, key=(lambda chan: ((not chan.is_active()), (- chan.available_to_spend(REMOTE))))) channels = channels[:15] random.shuffle(channels) scid_to_my_channels = {chan.short_channel_id: chan for chan in channels if (chan.short_channel_id is not None)} for chan in channels: chan_id = chan.short_channel_id assert isinstance(chan_id, bytes), chan_id channel_info = get_mychannel_info(chan_id, scid_to_my_channels) fee_base_msat = fee_proportional_millionths = 0 cltv_expiry_delta = 1 missing_info = True if channel_info: policy = get_mychannel_policy(channel_info.short_channel_id, chan.node_id, scid_to_my_channels) if policy: fee_base_msat = policy.fee_base_msat fee_proportional_millionths = policy.fee_proportional_millionths cltv_expiry_delta = policy.cltv_expiry_delta missing_info = False if missing_info: self.logger.info(f'Warning. Missing channel update for our channel {chan_id}; filling invoice with incorrect data.') routing_hints.append(('r', [(chan.node_id, chan_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta)])) return routing_hints
def has_conflicting_backup_with(self, remote_node_id: bytes): ' Returns whether we have an active channel with this node on another device, using same local node id. ' channel_backup_peers = [cb.node_id for cb in self.channel_backups.values() if ((not cb.is_closed()) and (cb.get_local_pubkey() == self.node_keypair.pubkey))] return any((remote_node_id.startswith(cb_peer_nodeid) for cb_peer_nodeid in channel_backup_peers))
7,341,898,079,577,866,000
Returns whether we have an active channel with this node on another device, using same local node id.
electrum/lnworker.py
has_conflicting_backup_with
jeroz1/electrum-ravencoin-utd
python
def has_conflicting_backup_with(self, remote_node_id: bytes): ' ' channel_backup_peers = [cb.node_id for cb in self.channel_backups.values() if ((not cb.is_closed()) and (cb.get_local_pubkey() == self.node_keypair.pubkey))] return any((remote_node_id.startswith(cb_peer_nodeid) for cb_peer_nodeid in channel_backup_peers))
def get_backend(): 'The backend is this module itself.' return Connection()
9,118,483,233,459,801,000
The backend is this module itself.
fm-rest-api/fm/fm/db/sqlalchemy/api.py
get_backend
MarioCarrilloA/fault
python
def get_backend(): return Connection()
def model_query(model, *args, **kwargs): 'Query helper for simpler session usage.\n\n :param session: if present, the session to use\n ' with _session_for_read() as session: query = session.query(model, *args) return query
6,410,123,238,035,086,000
Query helper for simpler session usage. :param session: if present, the session to use
fm-rest-api/fm/fm/db/sqlalchemy/api.py
model_query
MarioCarrilloA/fault
python
def model_query(model, *args, **kwargs): 'Query helper for simpler session usage.\n\n :param session: if present, the session to use\n ' with _session_for_read() as session: query = session.query(model, *args) return query
def add_event_log_filter_by_event_suppression(query, include_suppress): 'Adds an event_suppression filter to a query.\n\n Filters results by suppression status\n\n :param query: Initial query to add filter to.\n :param include_suppress: Value for filtering results by.\n :return: Modified query.\n ' query = query.outerjoin(models.EventSuppression, (models.EventLog.event_log_id == models.EventSuppression.alarm_id)) query = query.add_columns(models.EventSuppression.suppression_status) if include_suppress: return query return query.filter(or_((models.EventLog.state == 'log'), (models.EventSuppression.suppression_status == constants.FM_UNSUPPRESSED)))
-807,128,944,204,826,800
Adds an event_suppression filter to a query. Filters results by suppression status :param query: Initial query to add filter to. :param include_suppress: Value for filtering results by. :return: Modified query.
fm-rest-api/fm/fm/db/sqlalchemy/api.py
add_event_log_filter_by_event_suppression
MarioCarrilloA/fault
python
def add_event_log_filter_by_event_suppression(query, include_suppress): 'Adds an event_suppression filter to a query.\n\n Filters results by suppression status\n\n :param query: Initial query to add filter to.\n :param include_suppress: Value for filtering results by.\n :return: Modified query.\n ' query = query.outerjoin(models.EventSuppression, (models.EventLog.event_log_id == models.EventSuppression.alarm_id)) query = query.add_columns(models.EventSuppression.suppression_status) if include_suppress: return query return query.filter(or_((models.EventLog.state == 'log'), (models.EventSuppression.suppression_status == constants.FM_UNSUPPRESSED)))
def add_alarm_filter_by_event_suppression(query, include_suppress): 'Adds an event_suppression filter to a query.\n\n Filters results by suppression status\n\n :param query: Initial query to add filter to.\n :param include_suppress: Value for filtering results by.\n :return: Modified query.\n ' query = query.join(models.EventSuppression, (models.Alarm.alarm_id == models.EventSuppression.alarm_id)) query = query.add_columns(models.EventSuppression.suppression_status) if include_suppress: return query return query.filter((models.EventSuppression.suppression_status == constants.FM_UNSUPPRESSED))
-449,629,066,408,219,000
Adds an event_suppression filter to a query. Filters results by suppression status :param query: Initial query to add filter to. :param include_suppress: Value for filtering results by. :return: Modified query.
fm-rest-api/fm/fm/db/sqlalchemy/api.py
add_alarm_filter_by_event_suppression
MarioCarrilloA/fault
python
def add_alarm_filter_by_event_suppression(query, include_suppress): 'Adds an event_suppression filter to a query.\n\n Filters results by suppression status\n\n :param query: Initial query to add filter to.\n :param include_suppress: Value for filtering results by.\n :return: Modified query.\n ' query = query.join(models.EventSuppression, (models.Alarm.alarm_id == models.EventSuppression.alarm_id)) query = query.add_columns(models.EventSuppression.suppression_status) if include_suppress: return query return query.filter((models.EventSuppression.suppression_status == constants.FM_UNSUPPRESSED))
def add_alarm_mgmt_affecting_by_event_suppression(query): 'Adds a mgmt_affecting attribute from event_suppression to query.\n\n :param query: Initial query.\n :return: Modified query.\n ' query = query.add_columns(models.EventSuppression.mgmt_affecting) return query
5,047,030,622,101,545,000
Adds a mgmt_affecting attribute from event_suppression to query. :param query: Initial query. :return: Modified query.
fm-rest-api/fm/fm/db/sqlalchemy/api.py
add_alarm_mgmt_affecting_by_event_suppression
MarioCarrilloA/fault
python
def add_alarm_mgmt_affecting_by_event_suppression(query): 'Adds a mgmt_affecting attribute from event_suppression to query.\n\n :param query: Initial query.\n :return: Modified query.\n ' query = query.add_columns(models.EventSuppression.mgmt_affecting) return query
def add_alarm_degrade_affecting_by_event_suppression(query): 'Adds a degrade_affecting attribute from event_suppression to query.\n\n :param query: Initial query.\n :return: Modified query.\n ' query = query.add_columns(models.EventSuppression.degrade_affecting) return query
-8,166,228,253,472,973,000
Adds a degrade_affecting attribute from event_suppression to query. :param query: Initial query. :return: Modified query.
fm-rest-api/fm/fm/db/sqlalchemy/api.py
add_alarm_degrade_affecting_by_event_suppression
MarioCarrilloA/fault
python
def add_alarm_degrade_affecting_by_event_suppression(query): 'Adds a degrade_affecting attribute from event_suppression to query.\n\n :param query: Initial query.\n :return: Modified query.\n ' query = query.add_columns(models.EventSuppression.degrade_affecting) return query
def test_modify_left_param(self): ' inner function' inp = self._pipeline.parallelize([[1, 2, 3], [6, 5, 4]]) def _sum(x, y): x[0] += y[0] x[1] += y[1] x[2] += y[2] return x result = transforms.union(inp.reduce(_sum), inp.reduce(_sum)).get() self.assertEqual([[7, 7, 7], [7, 7, 7]], result)
6,170,765,211,557,646,000
inner function
bigflow_python/python/bigflow/transform_impls/test/reduce_test.py
test_modify_left_param
aiplat/bigflow
python
def test_modify_left_param(self): ' ' inp = self._pipeline.parallelize([[1, 2, 3], [6, 5, 4]]) def _sum(x, y): x[0] += y[0] x[1] += y[1] x[2] += y[2] return x result = transforms.union(inp.reduce(_sum), inp.reduce(_sum)).get() self.assertEqual([[7, 7, 7], [7, 7, 7]], result)
def train(opt): ' dataset preparation ' if (not opt.data_filtering_off): print('Filtering the images containing characters which are not in opt.character') print('Filtering the images whose label is longer than opt.batch_max_length') opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD) (valid_dataset, valid_dataset_log) = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print(('-' * 80)) log.write((('-' * 80) + '\n')) log.close() ' model configuration ' if ('CTC' in opt.Prediction): converter = CTCLabelConverter(opt.character) else: converter = AttnLabelConverter(opt.character) opt.num_class = len(converter.character) if opt.rgb: opt.input_channel = 3 model = Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling, opt.Prediction) for (name, param) in model.named_parameters(): if ('localization_fc2' in name): print(f'Skip {name} as it is already initialized') continue try: if ('bias' in name): init.constant_(param, 0.0) elif ('weight' in name): init.kaiming_normal_(param) except Exception as e: if ('weight' in name): param.data.fill_(1) continue model = torch.nn.DataParallel(model).to(device) model.train() if (opt.saved_model != ''): print(f'loading pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print('Model:') print(model) ' setup loss ' if ('CTC' in opt.Prediction): criterion = torch.nn.CTCLoss(zero_infinity=True).to(device) else: criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) loss_avg = Averager() filtered_parameters = [] params_num = [] for p in filter((lambda p: p.requires_grad), model.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num)) if opt.adam: optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) else: optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps) print('Optimizer:') print(optimizer) ' final options ' with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------ Options -------------\n' args = vars(opt) for (k, v) in args.items(): opt_log += f'''{str(k)}: {str(v)} ''' opt_log += '---------------------------------------\n' print(opt_log) opt_file.write(opt_log) ' start training ' start_iter = 0 if (opt.saved_model != ''): try: start_iter = int(opt.saved_model.split('_')[(- 1)].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except: pass start_time = time.time() best_accuracy = (- 1) best_norm_ED = (- 1) iteration = start_iter while True: (image_tensors, labels) = train_dataset.get_batch() image = image_tensors.to(device) (text, length) = converter.encode(labels, batch_max_length=opt.batch_max_length) batch_size = image.size(0) if ('CTC' in opt.Prediction): preds = model(image, text) preds_size = torch.IntTensor(([preds.size(1)] * batch_size)) preds = preds.log_softmax(2).permute(1, 0, 2) cost = criterion(preds, text, preds_size, length) else: preds = model(image, text[:, :(- 1)]) target = text[:, 1:] cost = criterion(preds.view((- 1), preds.shape[(- 1)]), target.contiguous().view((- 1))) model.zero_grad() cost.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() loss_avg.add(cost) if ((((iteration + 1) % opt.valInterval) == 0) or (iteration == 0)): elapsed_time = (time.time() - start_time) with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: model.eval() with torch.no_grad(): (valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data) = validation(model, criterion, valid_loader, converter, opt) model.train() loss_log = f'[{(iteration + 1)}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}' loss_avg.reset() current_model_log = f"{'Current_accuracy':17s}: {current_accuracy:0.3f}, {'Current_norm_ED':17s}: {current_norm_ED:0.2f}" if (current_accuracy > best_accuracy): best_accuracy = current_accuracy torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth') if (current_norm_ED > best_norm_ED): best_norm_ED = current_norm_ED torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth') best_model_log = f"{'Best_accuracy':17s}: {best_accuracy:0.3f}, {'Best_norm_ED':17s}: {best_norm_ED:0.2f}" loss_model_log = f'''{loss_log} {current_model_log} {best_model_log}''' print(loss_model_log) log.write((loss_model_log + '\n')) dashed_line = ('-' * 80) head = f"{'Ground Truth':25s} | {'Prediction':25s} | Confidence Score & T/F" predicted_result_log = f'''{dashed_line} {head} {dashed_line} ''' for (gt, pred, confidence) in zip(labels[:5], preds[:5], confidence_score[:5]): if ('Attn' in opt.Prediction): gt = gt[:gt.find('[s]')] pred = pred[:pred.find('[s]')] predicted_result_log += f'''{gt:25s} | {pred:25s} | {confidence:0.4f} {str((pred == gt))} ''' predicted_result_log += f'{dashed_line}' print(predicted_result_log) log.write((predicted_result_log + '\n')) if (((iteration + 1) % 100000.0) == 0): torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{(iteration + 1)}.pth') if ((iteration + 1) == opt.num_iter): print('end the training') sys.exit() iteration += 1
-7,748,018,635,452,727,000
dataset preparation
train.py
train
unanan/deep-text-recognition-benchmark-mnn-ncnn
python
def train(opt): ' ' if (not opt.data_filtering_off): print('Filtering the images containing characters which are not in opt.character') print('Filtering the images whose label is longer than opt.batch_max_length') opt.select_data = opt.select_data.split('-') opt.batch_ratio = opt.batch_ratio.split('-') train_dataset = Batch_Balanced_Dataset(opt) log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a') AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD) (valid_dataset, valid_dataset_log) = hierarchical_dataset(root=opt.valid_data, opt=opt) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=int(opt.workers), collate_fn=AlignCollate_valid, pin_memory=True) log.write(valid_dataset_log) print(('-' * 80)) log.write((('-' * 80) + '\n')) log.close() ' model configuration ' if ('CTC' in opt.Prediction): converter = CTCLabelConverter(opt.character) else: converter = AttnLabelConverter(opt.character) opt.num_class = len(converter.character) if opt.rgb: opt.input_channel = 3 model = Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling, opt.Prediction) for (name, param) in model.named_parameters(): if ('localization_fc2' in name): print(f'Skip {name} as it is already initialized') continue try: if ('bias' in name): init.constant_(param, 0.0) elif ('weight' in name): init.kaiming_normal_(param) except Exception as e: if ('weight' in name): param.data.fill_(1) continue model = torch.nn.DataParallel(model).to(device) model.train() if (opt.saved_model != ): print(f'loading pretrained model from {opt.saved_model}') if opt.FT: model.load_state_dict(torch.load(opt.saved_model), strict=False) else: model.load_state_dict(torch.load(opt.saved_model)) print('Model:') print(model) ' setup loss ' if ('CTC' in opt.Prediction): criterion = torch.nn.CTCLoss(zero_infinity=True).to(device) else: criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) loss_avg = Averager() filtered_parameters = [] params_num = [] for p in filter((lambda p: p.requires_grad), model.parameters()): filtered_parameters.append(p) params_num.append(np.prod(p.size())) print('Trainable params num : ', sum(params_num)) if opt.adam: optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999)) else: optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps) print('Optimizer:') print(optimizer) ' final options ' with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file: opt_log = '------------ Options -------------\n' args = vars(opt) for (k, v) in args.items(): opt_log += f'{str(k)}: {str(v)} ' opt_log += '---------------------------------------\n' print(opt_log) opt_file.write(opt_log) ' start training ' start_iter = 0 if (opt.saved_model != ): try: start_iter = int(opt.saved_model.split('_')[(- 1)].split('.')[0]) print(f'continue to train, start_iter: {start_iter}') except: pass start_time = time.time() best_accuracy = (- 1) best_norm_ED = (- 1) iteration = start_iter while True: (image_tensors, labels) = train_dataset.get_batch() image = image_tensors.to(device) (text, length) = converter.encode(labels, batch_max_length=opt.batch_max_length) batch_size = image.size(0) if ('CTC' in opt.Prediction): preds = model(image, text) preds_size = torch.IntTensor(([preds.size(1)] * batch_size)) preds = preds.log_softmax(2).permute(1, 0, 2) cost = criterion(preds, text, preds_size, length) else: preds = model(image, text[:, :(- 1)]) target = text[:, 1:] cost = criterion(preds.view((- 1), preds.shape[(- 1)]), target.contiguous().view((- 1))) model.zero_grad() cost.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() loss_avg.add(cost) if ((((iteration + 1) % opt.valInterval) == 0) or (iteration == 0)): elapsed_time = (time.time() - start_time) with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log: model.eval() with torch.no_grad(): (valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data) = validation(model, criterion, valid_loader, converter, opt) model.train() loss_log = f'[{(iteration + 1)}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}' loss_avg.reset() current_model_log = f"{'Current_accuracy':17s}: {current_accuracy:0.3f}, {'Current_norm_ED':17s}: {current_norm_ED:0.2f}" if (current_accuracy > best_accuracy): best_accuracy = current_accuracy torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth') if (current_norm_ED > best_norm_ED): best_norm_ED = current_norm_ED torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth') best_model_log = f"{'Best_accuracy':17s}: {best_accuracy:0.3f}, {'Best_norm_ED':17s}: {best_norm_ED:0.2f}" loss_model_log = f'{loss_log} {current_model_log} {best_model_log}' print(loss_model_log) log.write((loss_model_log + '\n')) dashed_line = ('-' * 80) head = f"{'Ground Truth':25s} | {'Prediction':25s} | Confidence Score & T/F" predicted_result_log = f'{dashed_line} {head} {dashed_line} ' for (gt, pred, confidence) in zip(labels[:5], preds[:5], confidence_score[:5]): if ('Attn' in opt.Prediction): gt = gt[:gt.find('[s]')] pred = pred[:pred.find('[s]')] predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f} {str((pred == gt))} ' predicted_result_log += f'{dashed_line}' print(predicted_result_log) log.write((predicted_result_log + '\n')) if (((iteration + 1) % 100000.0) == 0): torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{(iteration + 1)}.pth') if ((iteration + 1) == opt.num_iter): print('end the training') sys.exit() iteration += 1
def tomography_basis(basis, prep_fun=None, meas_fun=None): '\n Generate a TomographyBasis object.\n\n See TomographyBasis for further details.abs\n\n Args:\n prep_fun (callable) optional: the function which adds preparation\n gates to a circuit.\n meas_fun (callable) optional: the function which adds measurement\n gates to a circuit.\n\n Returns:\n TomographyBasis: A tomography basis.\n ' ret = TomographyBasis(basis) ret.prep_fun = prep_fun ret.meas_fun = meas_fun return ret
226,170,564,236,531,970
Generate a TomographyBasis object. See TomographyBasis for further details.abs Args: prep_fun (callable) optional: the function which adds preparation gates to a circuit. meas_fun (callable) optional: the function which adds measurement gates to a circuit. Returns: TomographyBasis: A tomography basis.
qiskit/tools/qcvv/tomography.py
tomography_basis
filemaster/qiskit-terra
python
def tomography_basis(basis, prep_fun=None, meas_fun=None): '\n Generate a TomographyBasis object.\n\n See TomographyBasis for further details.abs\n\n Args:\n prep_fun (callable) optional: the function which adds preparation\n gates to a circuit.\n meas_fun (callable) optional: the function which adds measurement\n gates to a circuit.\n\n Returns:\n TomographyBasis: A tomography basis.\n ' ret = TomographyBasis(basis) ret.prep_fun = prep_fun ret.meas_fun = meas_fun return ret
def __pauli_prep_gates(circuit, qreg, op): '\n Add state preparation gates to a circuit.\n ' (bas, proj) = op if (bas not in ['X', 'Y', 'Z']): raise QiskitError("There's no X, Y or Z basis for this Pauli preparation") if (bas == 'X'): if (proj == 1): circuit.u2(np.pi, np.pi, qreg) else: circuit.u2(0.0, np.pi, qreg) elif (bas == 'Y'): if (proj == 1): circuit.u2(((- 0.5) * np.pi), np.pi, qreg) else: circuit.u2((0.5 * np.pi), np.pi, qreg) elif ((bas == 'Z') and (proj == 1)): circuit.u3(np.pi, 0.0, np.pi, qreg)
-5,024,664,810,581,299,000
Add state preparation gates to a circuit.
qiskit/tools/qcvv/tomography.py
__pauli_prep_gates
filemaster/qiskit-terra
python
def __pauli_prep_gates(circuit, qreg, op): '\n \n ' (bas, proj) = op if (bas not in ['X', 'Y', 'Z']): raise QiskitError("There's no X, Y or Z basis for this Pauli preparation") if (bas == 'X'): if (proj == 1): circuit.u2(np.pi, np.pi, qreg) else: circuit.u2(0.0, np.pi, qreg) elif (bas == 'Y'): if (proj == 1): circuit.u2(((- 0.5) * np.pi), np.pi, qreg) else: circuit.u2((0.5 * np.pi), np.pi, qreg) elif ((bas == 'Z') and (proj == 1)): circuit.u3(np.pi, 0.0, np.pi, qreg)
def __pauli_meas_gates(circuit, qreg, op): '\n Add state measurement gates to a circuit.\n ' if (op not in ['X', 'Y', 'Z']): raise QiskitError("There's no X, Y or Z basis for this Pauli measurement") if (op == 'X'): circuit.u2(0.0, np.pi, qreg) elif (op == 'Y'): circuit.u2(0.0, (0.5 * np.pi), qreg)
-7,524,631,782,530,808,000
Add state measurement gates to a circuit.
qiskit/tools/qcvv/tomography.py
__pauli_meas_gates
filemaster/qiskit-terra
python
def __pauli_meas_gates(circuit, qreg, op): '\n \n ' if (op not in ['X', 'Y', 'Z']): raise QiskitError("There's no X, Y or Z basis for this Pauli measurement") if (op == 'X'): circuit.u2(0.0, np.pi, qreg) elif (op == 'Y'): circuit.u2(0.0, (0.5 * np.pi), qreg)
def __sic_prep_gates(circuit, qreg, op): '\n Add state preparation gates to a circuit.\n ' (bas, proj) = op if (bas != 'S'): raise QiskitError('Not in SIC basis!') theta = ((- 2) * np.arctan(np.sqrt(2))) if (proj == 1): circuit.u3(theta, np.pi, 0.0, qreg) elif (proj == 2): circuit.u3(theta, (np.pi / 3), 0.0, qreg) elif (proj == 3): circuit.u3(theta, ((- np.pi) / 3), 0.0, qreg)
8,698,999,360,930,628,000
Add state preparation gates to a circuit.
qiskit/tools/qcvv/tomography.py
__sic_prep_gates
filemaster/qiskit-terra
python
def __sic_prep_gates(circuit, qreg, op): '\n \n ' (bas, proj) = op if (bas != 'S'): raise QiskitError('Not in SIC basis!') theta = ((- 2) * np.arctan(np.sqrt(2))) if (proj == 1): circuit.u3(theta, np.pi, 0.0, qreg) elif (proj == 2): circuit.u3(theta, (np.pi / 3), 0.0, qreg) elif (proj == 3): circuit.u3(theta, ((- np.pi) / 3), 0.0, qreg)
def tomography_set(meas_qubits, meas_basis='Pauli', prep_qubits=None, prep_basis=None): '\n Generate a dictionary of tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis \'SIC\'\n or \'Pauli\'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is \'Pauli\'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or None): The optional qubit preparation\n basis. If no basis is specified state tomography will be performed\n instead of process tomography. A built in basis may be specified by\n \'SIC\' or \'Pauli\' (SIC basis recommended for > 2 qubits).\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields "qubits", "meas_basis", "circuits". It may also optionally\n contain a field "prep_basis" for process tomography experiments.\n ```\n {\n \'qubits\': qubits (list[ints]),\n \'meas_basis\': meas_basis (tomography_basis),\n \'circuit_labels\': (list[string]),\n \'circuits\': (list[dict]) # prep and meas configurations\n # optionally for process tomography experiments:\n \'prep_basis\': prep_basis (tomography_basis)\n }\n ```\n Raises:\n QiskitError: if the Qubits argument is not a list.\n ' if (not isinstance(meas_qubits, list)): raise QiskitError('Qubits argument must be a list') num_of_qubits = len(meas_qubits) if (prep_qubits is None): prep_qubits = meas_qubits if (not isinstance(prep_qubits, list)): raise QiskitError('prep_qubits argument must be a list') if (len(prep_qubits) != len(meas_qubits)): raise QiskitError('meas_qubits and prep_qubitsare different length') if isinstance(meas_basis, str): if (meas_basis.lower() == 'pauli'): meas_basis = PAULI_BASIS if isinstance(prep_basis, str): if (prep_basis.lower() == 'pauli'): prep_basis = PAULI_BASIS elif (prep_basis.lower() == 'sic'): prep_basis = SIC_BASIS circuits = [] circuit_labels = [] if (prep_basis is None): for meas_product in product(meas_basis.keys(), repeat=num_of_qubits): meas = dict(zip(meas_qubits, meas_product)) circuits.append({'meas': meas}) label = '_meas_' for (qubit, op) in meas.items(): label += ('%s(%d)' % (op[0], qubit)) circuit_labels.append(label) return {'qubits': meas_qubits, 'circuits': circuits, 'circuit_labels': circuit_labels, 'meas_basis': meas_basis} num_of_s = len(list(prep_basis.values())[0]) plst_single = [(b, s) for b in prep_basis.keys() for s in range(num_of_s)] for plst_product in product(plst_single, repeat=num_of_qubits): for meas_product in product(meas_basis.keys(), repeat=num_of_qubits): prep = dict(zip(prep_qubits, plst_product)) meas = dict(zip(meas_qubits, meas_product)) circuits.append({'prep': prep, 'meas': meas}) label = '_prep_' for (qubit, op) in prep.items(): label += ('%s%d(%d)' % (op[0], op[1], qubit)) label += '_meas_' for (qubit, op) in meas.items(): label += ('%s(%d)' % (op[0], qubit)) circuit_labels.append(label) return {'qubits': meas_qubits, 'circuits': circuits, 'circuit_labels': circuit_labels, 'prep_basis': prep_basis, 'meas_basis': meas_basis}
-3,920,841,991,119,536,600
Generate a dictionary of tomography experiment configurations. This returns a data structure that is used by other tomography functions to generate state and process tomography circuits, and extract tomography data from results after execution on a backend. Quantum State Tomography: Be default it will return a set for performing Quantum State Tomography where individual qubits are measured in the Pauli basis. A custom measurement basis may also be used by defining a user `tomography_basis` and passing this in for the `meas_basis` argument. Quantum Process Tomography: A quantum process tomography set is created by specifying a preparation basis along with a measurement basis. The preparation basis may be a user defined `tomography_basis`, or one of the two built in basis 'SIC' or 'Pauli'. - SIC: Is a minimal symmetric informationally complete preparation basis for 4 states for each qubit (4 ^ number of qubits total preparation states). These correspond to the |0> state and the 3 other vertices of a tetrahedron on the Bloch-sphere. - Pauli: Is a tomographically overcomplete preparation basis of the six eigenstates of the 3 Pauli operators (6 ^ number of qubits total preparation states). Args: meas_qubits (list): The qubits being measured. meas_basis (tomography_basis or str): The qubit measurement basis. The default value is 'Pauli'. prep_qubits (list or None): The qubits being prepared. If None then meas_qubits will be used for process tomography experiments. prep_basis (tomography_basis or None): The optional qubit preparation basis. If no basis is specified state tomography will be performed instead of process tomography. A built in basis may be specified by 'SIC' or 'Pauli' (SIC basis recommended for > 2 qubits). Returns: dict: A dict of tomography configurations that can be parsed by `create_tomography_circuits` and `tomography_data` functions for implementing quantum tomography experiments. This output contains fields "qubits", "meas_basis", "circuits". It may also optionally contain a field "prep_basis" for process tomography experiments. ``` { 'qubits': qubits (list[ints]), 'meas_basis': meas_basis (tomography_basis), 'circuit_labels': (list[string]), 'circuits': (list[dict]) # prep and meas configurations # optionally for process tomography experiments: 'prep_basis': prep_basis (tomography_basis) } ``` Raises: QiskitError: if the Qubits argument is not a list.
qiskit/tools/qcvv/tomography.py
tomography_set
filemaster/qiskit-terra
python
def tomography_set(meas_qubits, meas_basis='Pauli', prep_qubits=None, prep_basis=None): '\n Generate a dictionary of tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis \'SIC\'\n or \'Pauli\'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is \'Pauli\'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or None): The optional qubit preparation\n basis. If no basis is specified state tomography will be performed\n instead of process tomography. A built in basis may be specified by\n \'SIC\' or \'Pauli\' (SIC basis recommended for > 2 qubits).\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields "qubits", "meas_basis", "circuits". It may also optionally\n contain a field "prep_basis" for process tomography experiments.\n ```\n {\n \'qubits\': qubits (list[ints]),\n \'meas_basis\': meas_basis (tomography_basis),\n \'circuit_labels\': (list[string]),\n \'circuits\': (list[dict]) # prep and meas configurations\n # optionally for process tomography experiments:\n \'prep_basis\': prep_basis (tomography_basis)\n }\n ```\n Raises:\n QiskitError: if the Qubits argument is not a list.\n ' if (not isinstance(meas_qubits, list)): raise QiskitError('Qubits argument must be a list') num_of_qubits = len(meas_qubits) if (prep_qubits is None): prep_qubits = meas_qubits if (not isinstance(prep_qubits, list)): raise QiskitError('prep_qubits argument must be a list') if (len(prep_qubits) != len(meas_qubits)): raise QiskitError('meas_qubits and prep_qubitsare different length') if isinstance(meas_basis, str): if (meas_basis.lower() == 'pauli'): meas_basis = PAULI_BASIS if isinstance(prep_basis, str): if (prep_basis.lower() == 'pauli'): prep_basis = PAULI_BASIS elif (prep_basis.lower() == 'sic'): prep_basis = SIC_BASIS circuits = [] circuit_labels = [] if (prep_basis is None): for meas_product in product(meas_basis.keys(), repeat=num_of_qubits): meas = dict(zip(meas_qubits, meas_product)) circuits.append({'meas': meas}) label = '_meas_' for (qubit, op) in meas.items(): label += ('%s(%d)' % (op[0], qubit)) circuit_labels.append(label) return {'qubits': meas_qubits, 'circuits': circuits, 'circuit_labels': circuit_labels, 'meas_basis': meas_basis} num_of_s = len(list(prep_basis.values())[0]) plst_single = [(b, s) for b in prep_basis.keys() for s in range(num_of_s)] for plst_product in product(plst_single, repeat=num_of_qubits): for meas_product in product(meas_basis.keys(), repeat=num_of_qubits): prep = dict(zip(prep_qubits, plst_product)) meas = dict(zip(meas_qubits, meas_product)) circuits.append({'prep': prep, 'meas': meas}) label = '_prep_' for (qubit, op) in prep.items(): label += ('%s%d(%d)' % (op[0], op[1], qubit)) label += '_meas_' for (qubit, op) in meas.items(): label += ('%s(%d)' % (op[0], qubit)) circuit_labels.append(label) return {'qubits': meas_qubits, 'circuits': circuits, 'circuit_labels': circuit_labels, 'prep_basis': prep_basis, 'meas_basis': meas_basis}
def state_tomography_set(qubits, meas_basis='Pauli'): '\n Generate a dictionary of state tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis \'SIC\'\n or \'Pauli\'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is \'Pauli\'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields "qubits", "meas_basis", "circuits".\n ```\n {\n \'qubits\': qubits (list[ints]),\n \'meas_basis\': meas_basis (tomography_basis),\n \'circuit_labels\': (list[string]),\n \'circuits\': (list[dict]) # prep and meas configurations\n }\n ```\n ' return tomography_set(qubits, meas_basis=meas_basis)
-6,288,560,604,954,466,000
Generate a dictionary of state tomography experiment configurations. This returns a data structure that is used by other tomography functions to generate state and process tomography circuits, and extract tomography data from results after execution on a backend. Quantum State Tomography: Be default it will return a set for performing Quantum State Tomography where individual qubits are measured in the Pauli basis. A custom measurement basis may also be used by defining a user `tomography_basis` and passing this in for the `meas_basis` argument. Quantum Process Tomography: A quantum process tomography set is created by specifying a preparation basis along with a measurement basis. The preparation basis may be a user defined `tomography_basis`, or one of the two built in basis 'SIC' or 'Pauli'. - SIC: Is a minimal symmetric informationally complete preparation basis for 4 states for each qubit (4 ^ number of qubits total preparation states). These correspond to the |0> state and the 3 other vertices of a tetrahedron on the Bloch-sphere. - Pauli: Is a tomographically overcomplete preparation basis of the six eigenstates of the 3 Pauli operators (6 ^ number of qubits total preparation states). Args: qubits (list): The qubits being measured. meas_basis (tomography_basis or str): The qubit measurement basis. The default value is 'Pauli'. Returns: dict: A dict of tomography configurations that can be parsed by `create_tomography_circuits` and `tomography_data` functions for implementing quantum tomography experiments. This output contains fields "qubits", "meas_basis", "circuits". ``` { 'qubits': qubits (list[ints]), 'meas_basis': meas_basis (tomography_basis), 'circuit_labels': (list[string]), 'circuits': (list[dict]) # prep and meas configurations } ```
qiskit/tools/qcvv/tomography.py
state_tomography_set
filemaster/qiskit-terra
python
def state_tomography_set(qubits, meas_basis='Pauli'): '\n Generate a dictionary of state tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n Quantum State Tomography:\n Be default it will return a set for performing Quantum State\n Tomography where individual qubits are measured in the Pauli basis.\n A custom measurement basis may also be used by defining a user\n `tomography_basis` and passing this in for the `meas_basis` argument.\n\n Quantum Process Tomography:\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis \'SIC\'\n or \'Pauli\'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is \'Pauli\'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields "qubits", "meas_basis", "circuits".\n ```\n {\n \'qubits\': qubits (list[ints]),\n \'meas_basis\': meas_basis (tomography_basis),\n \'circuit_labels\': (list[string]),\n \'circuits\': (list[dict]) # prep and meas configurations\n }\n ```\n ' return tomography_set(qubits, meas_basis=meas_basis)
def process_tomography_set(meas_qubits, meas_basis='Pauli', prep_qubits=None, prep_basis='SIC'): '\n Generate a dictionary of process tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis \'SIC\'\n or \'Pauli\'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is \'Pauli\'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or str): The qubit preparation basis.\n The default value is \'SIC\'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields "qubits", "meas_basis", "prep_basus", circuits".\n ```\n {\n \'qubits\': qubits (list[ints]),\n \'meas_basis\': meas_basis (tomography_basis),\n \'prep_basis\': prep_basis (tomography_basis),\n \'circuit_labels\': (list[string]),\n \'circuits\': (list[dict]) # prep and meas configurations\n }\n ```\n ' return tomography_set(meas_qubits, meas_basis=meas_basis, prep_qubits=prep_qubits, prep_basis=prep_basis)
2,854,921,193,160,348,000
Generate a dictionary of process tomography experiment configurations. This returns a data structure that is used by other tomography functions to generate state and process tomography circuits, and extract tomography data from results after execution on a backend. A quantum process tomography set is created by specifying a preparation basis along with a measurement basis. The preparation basis may be a user defined `tomography_basis`, or one of the two built in basis 'SIC' or 'Pauli'. - SIC: Is a minimal symmetric informationally complete preparation basis for 4 states for each qubit (4 ^ number of qubits total preparation states). These correspond to the |0> state and the 3 other vertices of a tetrahedron on the Bloch-sphere. - Pauli: Is a tomographically overcomplete preparation basis of the six eigenstates of the 3 Pauli operators (6 ^ number of qubits total preparation states). Args: meas_qubits (list): The qubits being measured. meas_basis (tomography_basis or str): The qubit measurement basis. The default value is 'Pauli'. prep_qubits (list or None): The qubits being prepared. If None then meas_qubits will be used for process tomography experiments. prep_basis (tomography_basis or str): The qubit preparation basis. The default value is 'SIC'. Returns: dict: A dict of tomography configurations that can be parsed by `create_tomography_circuits` and `tomography_data` functions for implementing quantum tomography experiments. This output contains fields "qubits", "meas_basis", "prep_basus", circuits". ``` { 'qubits': qubits (list[ints]), 'meas_basis': meas_basis (tomography_basis), 'prep_basis': prep_basis (tomography_basis), 'circuit_labels': (list[string]), 'circuits': (list[dict]) # prep and meas configurations } ```
qiskit/tools/qcvv/tomography.py
process_tomography_set
filemaster/qiskit-terra
python
def process_tomography_set(meas_qubits, meas_basis='Pauli', prep_qubits=None, prep_basis='SIC'): '\n Generate a dictionary of process tomography experiment configurations.\n\n This returns a data structure that is used by other tomography functions\n to generate state and process tomography circuits, and extract tomography\n data from results after execution on a backend.\n\n A quantum process tomography set is created by specifying a preparation\n basis along with a measurement basis. The preparation basis may be a\n user defined `tomography_basis`, or one of the two built in basis \'SIC\'\n or \'Pauli\'.\n - SIC: Is a minimal symmetric informationally complete preparation\n basis for 4 states for each qubit (4 ^ number of qubits total\n preparation states). These correspond to the |0> state and the 3\n other vertices of a tetrahedron on the Bloch-sphere.\n - Pauli: Is a tomographically overcomplete preparation basis of the six\n eigenstates of the 3 Pauli operators (6 ^ number of qubits\n total preparation states).\n\n Args:\n meas_qubits (list): The qubits being measured.\n meas_basis (tomography_basis or str): The qubit measurement basis.\n The default value is \'Pauli\'.\n prep_qubits (list or None): The qubits being prepared. If None then\n meas_qubits will be used for process tomography experiments.\n prep_basis (tomography_basis or str): The qubit preparation basis.\n The default value is \'SIC\'.\n\n Returns:\n dict: A dict of tomography configurations that can be parsed by\n `create_tomography_circuits` and `tomography_data` functions\n for implementing quantum tomography experiments. This output contains\n fields "qubits", "meas_basis", "prep_basus", circuits".\n ```\n {\n \'qubits\': qubits (list[ints]),\n \'meas_basis\': meas_basis (tomography_basis),\n \'prep_basis\': prep_basis (tomography_basis),\n \'circuit_labels\': (list[string]),\n \'circuits\': (list[dict]) # prep and meas configurations\n }\n ```\n ' return tomography_set(meas_qubits, meas_basis=meas_basis, prep_qubits=prep_qubits, prep_basis=prep_basis)
def tomography_circuit_names(tomo_set, name=''): '\n Return a list of tomography circuit names.\n\n The returned list is the same as the one returned by\n `create_tomography_circuits` and can be used by a QuantumProgram\n to execute tomography circuits and extract measurement results.\n\n Args:\n tomo_set (tomography_set): a tomography set generated by\n `tomography_set`.\n name (str): the name of the base QuantumCircuit used by the\n tomography experiment.\n\n Returns:\n list: A list of circuit names.\n ' return [(name + l) for l in tomo_set['circuit_labels']]
3,232,676,696,004,374,000
Return a list of tomography circuit names. The returned list is the same as the one returned by `create_tomography_circuits` and can be used by a QuantumProgram to execute tomography circuits and extract measurement results. Args: tomo_set (tomography_set): a tomography set generated by `tomography_set`. name (str): the name of the base QuantumCircuit used by the tomography experiment. Returns: list: A list of circuit names.
qiskit/tools/qcvv/tomography.py
tomography_circuit_names
filemaster/qiskit-terra
python
def tomography_circuit_names(tomo_set, name=): '\n Return a list of tomography circuit names.\n\n The returned list is the same as the one returned by\n `create_tomography_circuits` and can be used by a QuantumProgram\n to execute tomography circuits and extract measurement results.\n\n Args:\n tomo_set (tomography_set): a tomography set generated by\n `tomography_set`.\n name (str): the name of the base QuantumCircuit used by the\n tomography experiment.\n\n Returns:\n list: A list of circuit names.\n ' return [(name + l) for l in tomo_set['circuit_labels']]
def create_tomography_circuits(circuit, qreg, creg, tomoset): "\n Add tomography measurement circuits to a QuantumProgram.\n\n The quantum program must contain a circuit 'name', which is treated as a\n state preparation circuit for state tomography, or as teh circuit being\n measured for process tomography. This function then appends the circuit\n with a set of measurements specified by the input `tomography_set`,\n optionally it also prepends the circuit with state preparation circuits if\n they are specified in the `tomography_set`.\n\n For n-qubit tomography with a tomographically complete set of preparations\n and measurements this results in $4^n 3^n$ circuits being added to the\n quantum program.\n\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of quantum tomography circuits for the input circuit.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit\n\n Example:\n For a tomography set specifying state tomography of qubit-0 prepared\n by a circuit 'circ' this would return:\n ```\n ['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']\n ```\n For process tomography of the same circuit with preparation in the\n SIC-POVM basis it would return:\n ```\n [\n 'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',\n 'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',\n 'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',\n 'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',\n 'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',\n 'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'\n ]\n ```\n " if (not isinstance(circuit, QuantumCircuit)): raise QiskitError('Input circuit must be a QuantumCircuit object') dics = tomoset['circuits'] labels = tomography_circuit_names(tomoset, circuit.name) tomography_circuits = [] for (label, conf) in zip(labels, dics): tmp = circuit if ('prep' in conf): prep = QuantumCircuit(qreg, creg, name='tmp_prep') for (qubit, op) in conf['prep'].items(): tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op) prep.barrier(qreg[qubit]) tmp = (prep + tmp) meas = QuantumCircuit(qreg, creg, name='tmp_meas') for (qubit, op) in conf['meas'].items(): meas.barrier(qreg[qubit]) tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op) meas.measure(qreg[qubit], creg[qubit]) tmp = (tmp + meas) tmp.name = label tomography_circuits.append(tmp) logger.info('>> created tomography circuits for "%s"', circuit.name) return tomography_circuits
5,433,345,039,005,012,000
Add tomography measurement circuits to a QuantumProgram. The quantum program must contain a circuit 'name', which is treated as a state preparation circuit for state tomography, or as teh circuit being measured for process tomography. This function then appends the circuit with a set of measurements specified by the input `tomography_set`, optionally it also prepends the circuit with state preparation circuits if they are specified in the `tomography_set`. For n-qubit tomography with a tomographically complete set of preparations and measurements this results in $4^n 3^n$ circuits being added to the quantum program. Args: circuit (QuantumCircuit): The circuit to be appended with tomography state preparation and/or measurements. qreg (QuantumRegister): the quantum register containing qubits to be measured. creg (ClassicalRegister): the classical register containing bits to store measurement outcomes. tomoset (tomography_set): the dict of tomography configurations. Returns: list: A list of quantum tomography circuits for the input circuit. Raises: QiskitError: if circuit is not a valid QuantumCircuit Example: For a tomography set specifying state tomography of qubit-0 prepared by a circuit 'circ' this would return: ``` ['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)'] ``` For process tomography of the same circuit with preparation in the SIC-POVM basis it would return: ``` [ 'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)', 'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)', 'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)', 'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)', 'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)', 'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)' ] ```
qiskit/tools/qcvv/tomography.py
create_tomography_circuits
filemaster/qiskit-terra
python
def create_tomography_circuits(circuit, qreg, creg, tomoset): "\n Add tomography measurement circuits to a QuantumProgram.\n\n The quantum program must contain a circuit 'name', which is treated as a\n state preparation circuit for state tomography, or as teh circuit being\n measured for process tomography. This function then appends the circuit\n with a set of measurements specified by the input `tomography_set`,\n optionally it also prepends the circuit with state preparation circuits if\n they are specified in the `tomography_set`.\n\n For n-qubit tomography with a tomographically complete set of preparations\n and measurements this results in $4^n 3^n$ circuits being added to the\n quantum program.\n\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of quantum tomography circuits for the input circuit.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit\n\n Example:\n For a tomography set specifying state tomography of qubit-0 prepared\n by a circuit 'circ' this would return:\n ```\n ['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']\n ```\n For process tomography of the same circuit with preparation in the\n SIC-POVM basis it would return:\n ```\n [\n 'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',\n 'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',\n 'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',\n 'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',\n 'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',\n 'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'\n ]\n ```\n " if (not isinstance(circuit, QuantumCircuit)): raise QiskitError('Input circuit must be a QuantumCircuit object') dics = tomoset['circuits'] labels = tomography_circuit_names(tomoset, circuit.name) tomography_circuits = [] for (label, conf) in zip(labels, dics): tmp = circuit if ('prep' in conf): prep = QuantumCircuit(qreg, creg, name='tmp_prep') for (qubit, op) in conf['prep'].items(): tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op) prep.barrier(qreg[qubit]) tmp = (prep + tmp) meas = QuantumCircuit(qreg, creg, name='tmp_meas') for (qubit, op) in conf['meas'].items(): meas.barrier(qreg[qubit]) tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op) meas.measure(qreg[qubit], creg[qubit]) tmp = (tmp + meas) tmp.name = label tomography_circuits.append(tmp) logger.info('>> created tomography circuits for "%s"', circuit.name) return tomography_circuits
def tomography_data(results, name, tomoset): '\n Return a results dict for a state or process tomography experiment.\n\n Args:\n results (Result): Results from execution of a process tomography\n circuits on a backend.\n name (string): The name of the circuit being reconstructed.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of dicts for the outcome of each process tomography\n measurement circuit.\n ' labels = tomography_circuit_names(tomoset, name) circuits = tomoset['circuits'] data = [] prep = None for (j, _) in enumerate(labels): counts = marginal_counts(results.get_counts(labels[j]), tomoset['qubits']) shots = sum(counts.values()) meas = circuits[j]['meas'] prep = circuits[j].get('prep', None) meas_qubits = sorted(meas.keys()) if prep: prep_qubits = sorted(prep.keys()) circuit = {} for c in counts.keys(): circuit[c] = {} circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[((- 1) - k)])) for k in range(len(meas_qubits))] if prep: circuit[c]['prep'] = [prep[prep_qubits[k]] for k in range(len(prep_qubits))] data.append({'counts': counts, 'shots': shots, 'circuit': circuit}) ret = {'data': data, 'meas_basis': tomoset['meas_basis']} if prep: ret['prep_basis'] = tomoset['prep_basis'] return ret
4,341,700,413,205,243,400
Return a results dict for a state or process tomography experiment. Args: results (Result): Results from execution of a process tomography circuits on a backend. name (string): The name of the circuit being reconstructed. tomoset (tomography_set): the dict of tomography configurations. Returns: list: A list of dicts for the outcome of each process tomography measurement circuit.
qiskit/tools/qcvv/tomography.py
tomography_data
filemaster/qiskit-terra
python
def tomography_data(results, name, tomoset): '\n Return a results dict for a state or process tomography experiment.\n\n Args:\n results (Result): Results from execution of a process tomography\n circuits on a backend.\n name (string): The name of the circuit being reconstructed.\n tomoset (tomography_set): the dict of tomography configurations.\n\n Returns:\n list: A list of dicts for the outcome of each process tomography\n measurement circuit.\n ' labels = tomography_circuit_names(tomoset, name) circuits = tomoset['circuits'] data = [] prep = None for (j, _) in enumerate(labels): counts = marginal_counts(results.get_counts(labels[j]), tomoset['qubits']) shots = sum(counts.values()) meas = circuits[j]['meas'] prep = circuits[j].get('prep', None) meas_qubits = sorted(meas.keys()) if prep: prep_qubits = sorted(prep.keys()) circuit = {} for c in counts.keys(): circuit[c] = {} circuit[c]['meas'] = [(meas[meas_qubits[k]], int(c[((- 1) - k)])) for k in range(len(meas_qubits))] if prep: circuit[c]['prep'] = [prep[prep_qubits[k]] for k in range(len(prep_qubits))] data.append({'counts': counts, 'shots': shots, 'circuit': circuit}) ret = {'data': data, 'meas_basis': tomoset['meas_basis']} if prep: ret['prep_basis'] = tomoset['prep_basis'] return ret
def marginal_counts(counts, meas_qubits): "\n Compute the marginal counts for a subset of measured qubits.\n\n Args:\n counts (dict): the counts returned from a backend ({str: int}).\n meas_qubits (list[int]): the qubits to return the marginal\n counts distribution for.\n\n Returns:\n dict: A counts dict for the meas_qubits.abs\n Example: if `counts = {'00': 10, '01': 5}`\n `marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.\n `marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.\n " num_of_qubits = len(list(counts.keys())[0]) qs = sorted(meas_qubits, reverse=True) meas_keys = count_keys(len(qs)) rgx = [reduce((lambda x, y: ((key[qs.index(y)] if (y in qs) else '\\d') + x)), range(num_of_qubits), '') for key in meas_keys] meas_counts = [] for m in rgx: c = 0 for (key, val) in counts.items(): if match(m, key): c += val meas_counts.append(c) return dict(zip(meas_keys, meas_counts))
-5,976,532,595,156,514,000
Compute the marginal counts for a subset of measured qubits. Args: counts (dict): the counts returned from a backend ({str: int}). meas_qubits (list[int]): the qubits to return the marginal counts distribution for. Returns: dict: A counts dict for the meas_qubits.abs Example: if `counts = {'00': 10, '01': 5}` `marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`. `marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.
qiskit/tools/qcvv/tomography.py
marginal_counts
filemaster/qiskit-terra
python
def marginal_counts(counts, meas_qubits): "\n Compute the marginal counts for a subset of measured qubits.\n\n Args:\n counts (dict): the counts returned from a backend ({str: int}).\n meas_qubits (list[int]): the qubits to return the marginal\n counts distribution for.\n\n Returns:\n dict: A counts dict for the meas_qubits.abs\n Example: if `counts = {'00': 10, '01': 5}`\n `marginal_counts(counts, [0])` returns `{'0': 15, '1': 0}`.\n `marginal_counts(counts, [0])` returns `{'0': 10, '1': 5}`.\n " num_of_qubits = len(list(counts.keys())[0]) qs = sorted(meas_qubits, reverse=True) meas_keys = count_keys(len(qs)) rgx = [reduce((lambda x, y: ((key[qs.index(y)] if (y in qs) else '\\d') + x)), range(num_of_qubits), ) for key in meas_keys] meas_counts = [] for m in rgx: c = 0 for (key, val) in counts.items(): if match(m, key): c += val meas_counts.append(c) return dict(zip(meas_keys, meas_counts))
def count_keys(n): "Generate outcome bitstrings for n-qubits.\n\n Args:\n n (int): the number of qubits.\n\n Returns:\n list: A list of bitstrings ordered as follows:\n Example: n=2 returns ['00', '01', '10', '11'].\n " return [bin(j)[2:].zfill(n) for j in range((2 ** n))]
4,993,537,896,861,154,000
Generate outcome bitstrings for n-qubits. Args: n (int): the number of qubits. Returns: list: A list of bitstrings ordered as follows: Example: n=2 returns ['00', '01', '10', '11'].
qiskit/tools/qcvv/tomography.py
count_keys
filemaster/qiskit-terra
python
def count_keys(n): "Generate outcome bitstrings for n-qubits.\n\n Args:\n n (int): the number of qubits.\n\n Returns:\n list: A list of bitstrings ordered as follows:\n Example: n=2 returns ['00', '01', '10', '11'].\n " return [bin(j)[2:].zfill(n) for j in range((2 ** n))]
def fit_tomography_data(tomo_data, method='wizard', options=None): "\n Reconstruct a density matrix or process-matrix from tomography data.\n\n If the input data is state_tomography_data the returned operator will\n be a density matrix. If the input data is process_tomography_data the\n returned operator will be a Choi-matrix in the column-vectorization\n convention.\n\n Args:\n tomo_data (dict): process tomography measurement data.\n method (str): the fitting method to use.\n Available methods:\n - 'wizard' (default)\n - 'leastsq'\n options (dict or None): additional options for fitting method.\n\n Returns:\n numpy.array: The fitted operator.\n\n Available methods:\n - 'wizard' (Default): The returned operator will be constrained to be\n positive-semidefinite.\n Options:\n - 'trace': the trace of the returned operator.\n The default value is 1.\n - 'beta': hedging parameter for computing frequencies from\n zero-count data. The default value is 0.50922.\n - 'epsilon: threshold for truncating small eigenvalues to zero.\n The default value is 0\n - 'leastsq': Fitting without positive-semidefinite constraint.\n Options:\n - 'trace': Same as for 'wizard' method.\n - 'beta': Same as for 'wizard' method.\n Raises:\n Exception: if the `method` parameter is not valid.\n " if (isinstance(method, str) and (method.lower() in ['wizard', 'leastsq'])): trace = __get_option('trace', options) beta = __get_option('beta', options) rho = __leastsq_fit(tomo_data, trace=trace, beta=beta) if (method == 'wizard'): epsilon = __get_option('epsilon', options) rho = __wizard(rho, epsilon=epsilon) return rho else: raise Exception(('Invalid reconstruction method "%s"' % method))
1,225,486,021,359,920,400
Reconstruct a density matrix or process-matrix from tomography data. If the input data is state_tomography_data the returned operator will be a density matrix. If the input data is process_tomography_data the returned operator will be a Choi-matrix in the column-vectorization convention. Args: tomo_data (dict): process tomography measurement data. method (str): the fitting method to use. Available methods: - 'wizard' (default) - 'leastsq' options (dict or None): additional options for fitting method. Returns: numpy.array: The fitted operator. Available methods: - 'wizard' (Default): The returned operator will be constrained to be positive-semidefinite. Options: - 'trace': the trace of the returned operator. The default value is 1. - 'beta': hedging parameter for computing frequencies from zero-count data. The default value is 0.50922. - 'epsilon: threshold for truncating small eigenvalues to zero. The default value is 0 - 'leastsq': Fitting without positive-semidefinite constraint. Options: - 'trace': Same as for 'wizard' method. - 'beta': Same as for 'wizard' method. Raises: Exception: if the `method` parameter is not valid.
qiskit/tools/qcvv/tomography.py
fit_tomography_data
filemaster/qiskit-terra
python
def fit_tomography_data(tomo_data, method='wizard', options=None): "\n Reconstruct a density matrix or process-matrix from tomography data.\n\n If the input data is state_tomography_data the returned operator will\n be a density matrix. If the input data is process_tomography_data the\n returned operator will be a Choi-matrix in the column-vectorization\n convention.\n\n Args:\n tomo_data (dict): process tomography measurement data.\n method (str): the fitting method to use.\n Available methods:\n - 'wizard' (default)\n - 'leastsq'\n options (dict or None): additional options for fitting method.\n\n Returns:\n numpy.array: The fitted operator.\n\n Available methods:\n - 'wizard' (Default): The returned operator will be constrained to be\n positive-semidefinite.\n Options:\n - 'trace': the trace of the returned operator.\n The default value is 1.\n - 'beta': hedging parameter for computing frequencies from\n zero-count data. The default value is 0.50922.\n - 'epsilon: threshold for truncating small eigenvalues to zero.\n The default value is 0\n - 'leastsq': Fitting without positive-semidefinite constraint.\n Options:\n - 'trace': Same as for 'wizard' method.\n - 'beta': Same as for 'wizard' method.\n Raises:\n Exception: if the `method` parameter is not valid.\n " if (isinstance(method, str) and (method.lower() in ['wizard', 'leastsq'])): trace = __get_option('trace', options) beta = __get_option('beta', options) rho = __leastsq_fit(tomo_data, trace=trace, beta=beta) if (method == 'wizard'): epsilon = __get_option('epsilon', options) rho = __wizard(rho, epsilon=epsilon) return rho else: raise Exception(('Invalid reconstruction method "%s"' % method))
def __get_option(opt, options): '\n Return an optional value or None if not found.\n ' if (options is not None): if (opt in options): return options[opt] return None
-5,979,756,822,097,294,000
Return an optional value or None if not found.
qiskit/tools/qcvv/tomography.py
__get_option
filemaster/qiskit-terra
python
def __get_option(opt, options): '\n \n ' if (options is not None): if (opt in options): return options[opt] return None
def __leastsq_fit(tomo_data, weights=None, trace=None, beta=None): '\n Reconstruct a state from unconstrained least-squares fitting.\n\n Args:\n tomo_data (list[dict]): state or process tomography data.\n weights (list or array or None): weights to use for least squares\n fitting. The default is standard deviation from a binomial\n distribution.\n trace (float or None): trace of returned operator. The default is 1.\n beta (float or None): hedge parameter (>=0) for computing frequencies\n from zero-count data. The default value is 0.50922.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n ' if (trace is None): trace = 1.0 data = tomo_data['data'] keys = data[0]['circuit'].keys() counts = [] shots = [] ops = [] for dat in data: for key in keys: counts.append(dat['counts'][key]) shots.append(dat['shots']) projectors = dat['circuit'][key] op = __projector(projectors['meas'], tomo_data['meas_basis']) if ('prep' in projectors): op_prep = __projector(projectors['prep'], tomo_data['prep_basis']) op = np.kron(op_prep.conj(), op) ops.append(op) counts = np.array(counts) shots = np.array(shots) freqs = (counts / shots) if (weights is None): if (beta is None): beta = 0.50922 K = len(keys) freqs_hedged = ((counts + beta) / (shots + (K * beta))) weights = np.sqrt((shots / (freqs_hedged * (1 - freqs_hedged)))) return __tomo_linear_inv(freqs, ops, weights, trace=trace)
5,078,531,888,912,305,000
Reconstruct a state from unconstrained least-squares fitting. Args: tomo_data (list[dict]): state or process tomography data. weights (list or array or None): weights to use for least squares fitting. The default is standard deviation from a binomial distribution. trace (float or None): trace of returned operator. The default is 1. beta (float or None): hedge parameter (>=0) for computing frequencies from zero-count data. The default value is 0.50922. Returns: numpy.array: A numpy array of the reconstructed operator.
qiskit/tools/qcvv/tomography.py
__leastsq_fit
filemaster/qiskit-terra
python
def __leastsq_fit(tomo_data, weights=None, trace=None, beta=None): '\n Reconstruct a state from unconstrained least-squares fitting.\n\n Args:\n tomo_data (list[dict]): state or process tomography data.\n weights (list or array or None): weights to use for least squares\n fitting. The default is standard deviation from a binomial\n distribution.\n trace (float or None): trace of returned operator. The default is 1.\n beta (float or None): hedge parameter (>=0) for computing frequencies\n from zero-count data. The default value is 0.50922.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n ' if (trace is None): trace = 1.0 data = tomo_data['data'] keys = data[0]['circuit'].keys() counts = [] shots = [] ops = [] for dat in data: for key in keys: counts.append(dat['counts'][key]) shots.append(dat['shots']) projectors = dat['circuit'][key] op = __projector(projectors['meas'], tomo_data['meas_basis']) if ('prep' in projectors): op_prep = __projector(projectors['prep'], tomo_data['prep_basis']) op = np.kron(op_prep.conj(), op) ops.append(op) counts = np.array(counts) shots = np.array(shots) freqs = (counts / shots) if (weights is None): if (beta is None): beta = 0.50922 K = len(keys) freqs_hedged = ((counts + beta) / (shots + (K * beta))) weights = np.sqrt((shots / (freqs_hedged * (1 - freqs_hedged)))) return __tomo_linear_inv(freqs, ops, weights, trace=trace)
def __projector(op_list, basis): 'Returns a projectors.\n ' ret = 1 for op in op_list: (label, eigenstate) = op ret = np.kron(basis[label][eigenstate], ret) return ret
6,264,293,341,667,256,000
Returns a projectors.
qiskit/tools/qcvv/tomography.py
__projector
filemaster/qiskit-terra
python
def __projector(op_list, basis): '\n ' ret = 1 for op in op_list: (label, eigenstate) = op ret = np.kron(basis[label][eigenstate], ret) return ret
def __tomo_linear_inv(freqs, ops, weights=None, trace=None): '\n Reconstruct a matrix through linear inversion.\n\n Args:\n freqs (list[float]): list of observed frequences.\n ops (list[np.array]): list of corresponding projectors.\n weights (list[float] or array_like):\n weights to be used for weighted fitting.\n trace (float or None): trace of returned operator.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n ' if (weights is not None): W = np.array(weights) if (W.ndim == 1): W = np.diag(W) S = np.array([vectorize(m).conj() for m in ops]).reshape(len(ops), ops[0].size) if (weights is not None): S = np.dot(W, S) v = np.array(freqs) if (weights is not None): v = np.dot(W, freqs) Sdg = S.T.conj() inv = np.linalg.pinv(np.dot(Sdg, S)) ret = devectorize(np.dot(inv, np.dot(Sdg, v))) if (trace is not None): ret = ((trace * ret) / np.trace(ret)) return ret
2,932,244,388,342,729,700
Reconstruct a matrix through linear inversion. Args: freqs (list[float]): list of observed frequences. ops (list[np.array]): list of corresponding projectors. weights (list[float] or array_like): weights to be used for weighted fitting. trace (float or None): trace of returned operator. Returns: numpy.array: A numpy array of the reconstructed operator.
qiskit/tools/qcvv/tomography.py
__tomo_linear_inv
filemaster/qiskit-terra
python
def __tomo_linear_inv(freqs, ops, weights=None, trace=None): '\n Reconstruct a matrix through linear inversion.\n\n Args:\n freqs (list[float]): list of observed frequences.\n ops (list[np.array]): list of corresponding projectors.\n weights (list[float] or array_like):\n weights to be used for weighted fitting.\n trace (float or None): trace of returned operator.\n\n Returns:\n numpy.array: A numpy array of the reconstructed operator.\n ' if (weights is not None): W = np.array(weights) if (W.ndim == 1): W = np.diag(W) S = np.array([vectorize(m).conj() for m in ops]).reshape(len(ops), ops[0].size) if (weights is not None): S = np.dot(W, S) v = np.array(freqs) if (weights is not None): v = np.dot(W, freqs) Sdg = S.T.conj() inv = np.linalg.pinv(np.dot(Sdg, S)) ret = devectorize(np.dot(inv, np.dot(Sdg, v))) if (trace is not None): ret = ((trace * ret) / np.trace(ret)) return ret
def __wizard(rho, epsilon=None): '\n Returns the nearest positive semidefinite operator to an operator.\n\n This method is based on reference [1]. It constrains positivity\n by setting negative eigenvalues to zero and rescaling the positive\n eigenvalues.\n\n Args:\n rho (array_like): the input operator.\n epsilon(float or None): threshold (>=0) for truncating small\n eigenvalues values to zero.\n\n Returns:\n numpy.array: A positive semidefinite numpy array.\n ' if (epsilon is None): epsilon = 0.0 dim = len(rho) rho_wizard = np.zeros([dim, dim]) (v, w) = np.linalg.eigh(rho) for j in range(dim): if (v[j] < epsilon): tmp = v[j] v[j] = 0.0 x = 0.0 for k in range((j + 1), dim): x += (tmp / (dim - (j + 1))) v[k] = (v[k] + (tmp / (dim - (j + 1)))) for j in range(dim): rho_wizard = (rho_wizard + (v[j] * outer(w[:, j]))) return rho_wizard
-4,302,117,271,755,895,300
Returns the nearest positive semidefinite operator to an operator. This method is based on reference [1]. It constrains positivity by setting negative eigenvalues to zero and rescaling the positive eigenvalues. Args: rho (array_like): the input operator. epsilon(float or None): threshold (>=0) for truncating small eigenvalues values to zero. Returns: numpy.array: A positive semidefinite numpy array.
qiskit/tools/qcvv/tomography.py
__wizard
filemaster/qiskit-terra
python
def __wizard(rho, epsilon=None): '\n Returns the nearest positive semidefinite operator to an operator.\n\n This method is based on reference [1]. It constrains positivity\n by setting negative eigenvalues to zero and rescaling the positive\n eigenvalues.\n\n Args:\n rho (array_like): the input operator.\n epsilon(float or None): threshold (>=0) for truncating small\n eigenvalues values to zero.\n\n Returns:\n numpy.array: A positive semidefinite numpy array.\n ' if (epsilon is None): epsilon = 0.0 dim = len(rho) rho_wizard = np.zeros([dim, dim]) (v, w) = np.linalg.eigh(rho) for j in range(dim): if (v[j] < epsilon): tmp = v[j] v[j] = 0.0 x = 0.0 for k in range((j + 1), dim): x += (tmp / (dim - (j + 1))) v[k] = (v[k] + (tmp / (dim - (j + 1)))) for j in range(dim): rho_wizard = (rho_wizard + (v[j] * outer(w[:, j]))) return rho_wizard
def build_wigner_circuits(circuit, phis, thetas, qubits, qreg, creg): 'Create the circuits to rotate to points in phase space\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n phis (np.matrix[[complex]]): phis\n thetas (np.matrix[[complex]]): thetas\n qubits (list[int]): a list of the qubit indexes of qreg to be measured.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n\n Returns:\n list: A list of names of the added wigner function circuits.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit.\n ' if (not isinstance(circuit, QuantumCircuit)): raise QiskitError('Input circuit must be a QuantumCircuit object') tomography_circuits = [] points = len(phis[0]) for point in range(points): label = '_wigner_phase_point' label += str(point) tmp_circ = QuantumCircuit(qreg, creg, name=label) for (qubit, _) in enumerate(qubits): tmp_circ.u3(thetas[qubit][point], 0, phis[qubit][point], qreg[qubits[qubit]]) tmp_circ.measure(qreg[qubits[qubit]], creg[qubits[qubit]]) tmp_circ = (circuit + tmp_circ) tmp_circ.name = (circuit.name + label) tomography_circuits.append(tmp_circ) logger.info('>> Created Wigner function circuits for "%s"', circuit.name) return tomography_circuits
8,931,927,208,068,485,000
Create the circuits to rotate to points in phase space Args: circuit (QuantumCircuit): The circuit to be appended with tomography state preparation and/or measurements. phis (np.matrix[[complex]]): phis thetas (np.matrix[[complex]]): thetas qubits (list[int]): a list of the qubit indexes of qreg to be measured. qreg (QuantumRegister): the quantum register containing qubits to be measured. creg (ClassicalRegister): the classical register containing bits to store measurement outcomes. Returns: list: A list of names of the added wigner function circuits. Raises: QiskitError: if circuit is not a valid QuantumCircuit.
qiskit/tools/qcvv/tomography.py
build_wigner_circuits
filemaster/qiskit-terra
python
def build_wigner_circuits(circuit, phis, thetas, qubits, qreg, creg): 'Create the circuits to rotate to points in phase space\n Args:\n circuit (QuantumCircuit): The circuit to be appended with tomography\n state preparation and/or measurements.\n phis (np.matrix[[complex]]): phis\n thetas (np.matrix[[complex]]): thetas\n qubits (list[int]): a list of the qubit indexes of qreg to be measured.\n qreg (QuantumRegister): the quantum register containing qubits to be\n measured.\n creg (ClassicalRegister): the classical register containing bits to\n store measurement outcomes.\n\n Returns:\n list: A list of names of the added wigner function circuits.\n\n Raises:\n QiskitError: if circuit is not a valid QuantumCircuit.\n ' if (not isinstance(circuit, QuantumCircuit)): raise QiskitError('Input circuit must be a QuantumCircuit object') tomography_circuits = [] points = len(phis[0]) for point in range(points): label = '_wigner_phase_point' label += str(point) tmp_circ = QuantumCircuit(qreg, creg, name=label) for (qubit, _) in enumerate(qubits): tmp_circ.u3(thetas[qubit][point], 0, phis[qubit][point], qreg[qubits[qubit]]) tmp_circ.measure(qreg[qubits[qubit]], creg[qubits[qubit]]) tmp_circ = (circuit + tmp_circ) tmp_circ.name = (circuit.name + label) tomography_circuits.append(tmp_circ) logger.info('>> Created Wigner function circuits for "%s"', circuit.name) return tomography_circuits
def wigner_data(q_result, meas_qubits, labels, shots=None): 'Get the value of the Wigner function from measurement results.\n\n Args:\n q_result (Result): Results from execution of a state tomography\n circuits on a backend.\n meas_qubits (list[int]): a list of the qubit indexes measured.\n labels (list[str]): a list of names of the circuits\n shots (int): number of shots\n\n Returns:\n list: The values of the Wigner function at measured points in\n phase space\n ' num = len(meas_qubits) dim = (2 ** num) p = [(0.5 + (0.5 * np.sqrt(3))), (0.5 - (0.5 * np.sqrt(3)))] parity = 1 for i in range(num): parity = np.kron(parity, p) w = ([0] * len(labels)) wpt = 0 counts = [marginal_counts(q_result.get_counts(circ), meas_qubits) for circ in labels] for entry in counts: x = ([0] * dim) for i in range(dim): if (bin(i)[2:].zfill(num) in entry): x[i] = float(entry[bin(i)[2:].zfill(num)]) if (shots is None): shots = np.sum(x) for i in range(dim): w[wpt] = (w[wpt] + ((x[i] / shots) * parity[i])) wpt += 1 return w
-6,619,506,774,687,886,000
Get the value of the Wigner function from measurement results. Args: q_result (Result): Results from execution of a state tomography circuits on a backend. meas_qubits (list[int]): a list of the qubit indexes measured. labels (list[str]): a list of names of the circuits shots (int): number of shots Returns: list: The values of the Wigner function at measured points in phase space
qiskit/tools/qcvv/tomography.py
wigner_data
filemaster/qiskit-terra
python
def wigner_data(q_result, meas_qubits, labels, shots=None): 'Get the value of the Wigner function from measurement results.\n\n Args:\n q_result (Result): Results from execution of a state tomography\n circuits on a backend.\n meas_qubits (list[int]): a list of the qubit indexes measured.\n labels (list[str]): a list of names of the circuits\n shots (int): number of shots\n\n Returns:\n list: The values of the Wigner function at measured points in\n phase space\n ' num = len(meas_qubits) dim = (2 ** num) p = [(0.5 + (0.5 * np.sqrt(3))), (0.5 - (0.5 * np.sqrt(3)))] parity = 1 for i in range(num): parity = np.kron(parity, p) w = ([0] * len(labels)) wpt = 0 counts = [marginal_counts(q_result.get_counts(circ), meas_qubits) for circ in labels] for entry in counts: x = ([0] * dim) for i in range(dim): if (bin(i)[2:].zfill(num) in entry): x[i] = float(entry[bin(i)[2:].zfill(num)]) if (shots is None): shots = np.sum(x) for i in range(dim): w[wpt] = (w[wpt] + ((x[i] / shots) * parity[i])) wpt += 1 return w
def prep_gate(self, circuit, qreg, op): '\n Add state preparation gates to a circuit.\n\n Args:\n circuit (QuantumCircuit): circuit to add a preparation to.\n qreg (tuple(QuantumRegister,int)): quantum register to apply\n preparation to.\n op (tuple(str, int)): the basis label and index for the\n preparation op.\n ' if (self.prep_fun is None): pass else: self.prep_fun(circuit, qreg, op)
-3,505,176,667,774,246,000
Add state preparation gates to a circuit. Args: circuit (QuantumCircuit): circuit to add a preparation to. qreg (tuple(QuantumRegister,int)): quantum register to apply preparation to. op (tuple(str, int)): the basis label and index for the preparation op.
qiskit/tools/qcvv/tomography.py
prep_gate
filemaster/qiskit-terra
python
def prep_gate(self, circuit, qreg, op): '\n Add state preparation gates to a circuit.\n\n Args:\n circuit (QuantumCircuit): circuit to add a preparation to.\n qreg (tuple(QuantumRegister,int)): quantum register to apply\n preparation to.\n op (tuple(str, int)): the basis label and index for the\n preparation op.\n ' if (self.prep_fun is None): pass else: self.prep_fun(circuit, qreg, op)
def meas_gate(self, circuit, qreg, op): '\n Add measurement gates to a circuit.\n\n Args:\n circuit (QuantumCircuit): circuit to add measurement to.\n qreg (tuple(QuantumRegister,int)): quantum register being measured.\n op (str): the basis label for the measurement.\n ' if (self.meas_fun is None): pass else: self.meas_fun(circuit, qreg, op)
-2,191,418,647,831,939,800
Add measurement gates to a circuit. Args: circuit (QuantumCircuit): circuit to add measurement to. qreg (tuple(QuantumRegister,int)): quantum register being measured. op (str): the basis label for the measurement.
qiskit/tools/qcvv/tomography.py
meas_gate
filemaster/qiskit-terra
python
def meas_gate(self, circuit, qreg, op): '\n Add measurement gates to a circuit.\n\n Args:\n circuit (QuantumCircuit): circuit to add measurement to.\n qreg (tuple(QuantumRegister,int)): quantum register being measured.\n op (str): the basis label for the measurement.\n ' if (self.meas_fun is None): pass else: self.meas_fun(circuit, qreg, op)
def __init__(self, cmndpipe, rspdpipe): '\n Create a PyQt viewer which reads commands from the Pipe\n cmndpipe and writes responses back to rspdpipe.\n ' super(PipedImagerPQ, self).__init__() self.__cmndpipe = cmndpipe self.__rspdpipe = rspdpipe signal.signal(signal.SIGINT, signal.SIG_IGN) self.__sceneimage = None self.__scenedata = None self.__loadingimage = False self.__scenewidth = int((10.8 * self.physicalDpiX())) self.__sceneheight = int((8.8 * self.physicalDpiY())) self.__noalpha = False self.__lastclearcolor = QColor(16777215) self.__lastclearcolor.setAlpha(255) self.__scalefactor = 1.0 self.__autoscale = True self.__minsize = 128 self.__scrollarea = QScrollArea(self) self.__label = QLabel(self.__scrollarea) self.__label.setMinimumSize(self.__scenewidth, self.__sceneheight) self.__label.resize(self.__scenewidth, self.__sceneheight) self.__scrollarea.setWidget(self.__label) self.__scrollarea.setBackgroundRole(QPalette.Dark) self.setCentralWidget(self.__scrollarea) self.__lastfilename = 'ferret.png' self.__lastformat = 'png' self.__helper = CmndHelperPQ(self) self.__scaleact = QAction(self.tr('&Scale'), self, shortcut=self.tr('Ctrl+S'), statusTip=self.tr('Scale the image (canvas and image change size)'), triggered=self.inquireSceneScale) self.__saveact = QAction(self.tr('Save &As...'), self, shortcut=self.tr('Ctrl+A'), statusTip=self.tr('Save the image to file'), triggered=self.inquireSaveFilename) self.__redrawact = QAction(self.tr('&Redraw'), self, shortcut=self.tr('Ctrl+R'), statusTip=self.tr('Clear and redraw the image'), triggered=self.redrawScene) self.__aboutact = QAction(self.tr('&About'), self, statusTip=self.tr('Show information about this viewer'), triggered=self.aboutMsg) self.__aboutqtact = QAction(self.tr('About &Qt'), self, statusTip=self.tr('Show information about the Qt library'), triggered=self.aboutQtMsg) self.createMenus() self.__framedelta = 4 mwwidth = (self.__scenewidth + self.__framedelta) mwheight = (((self.__sceneheight + self.__framedelta) + self.menuBar().height()) + self.statusBar().height()) self.resize(mwwidth, mwheight) self.__timer = QTimer(self) self.__timer.timeout.connect(self.checkCommandPipe) self.__timer.setInterval(0) self.__timer.start()
-8,444,863,606,549,846,000
Create a PyQt viewer which reads commands from the Pipe cmndpipe and writes responses back to rspdpipe.
pviewmod/pipedimagerpq.py
__init__
Jhongesell/PyFerret
python
def __init__(self, cmndpipe, rspdpipe): '\n Create a PyQt viewer which reads commands from the Pipe\n cmndpipe and writes responses back to rspdpipe.\n ' super(PipedImagerPQ, self).__init__() self.__cmndpipe = cmndpipe self.__rspdpipe = rspdpipe signal.signal(signal.SIGINT, signal.SIG_IGN) self.__sceneimage = None self.__scenedata = None self.__loadingimage = False self.__scenewidth = int((10.8 * self.physicalDpiX())) self.__sceneheight = int((8.8 * self.physicalDpiY())) self.__noalpha = False self.__lastclearcolor = QColor(16777215) self.__lastclearcolor.setAlpha(255) self.__scalefactor = 1.0 self.__autoscale = True self.__minsize = 128 self.__scrollarea = QScrollArea(self) self.__label = QLabel(self.__scrollarea) self.__label.setMinimumSize(self.__scenewidth, self.__sceneheight) self.__label.resize(self.__scenewidth, self.__sceneheight) self.__scrollarea.setWidget(self.__label) self.__scrollarea.setBackgroundRole(QPalette.Dark) self.setCentralWidget(self.__scrollarea) self.__lastfilename = 'ferret.png' self.__lastformat = 'png' self.__helper = CmndHelperPQ(self) self.__scaleact = QAction(self.tr('&Scale'), self, shortcut=self.tr('Ctrl+S'), statusTip=self.tr('Scale the image (canvas and image change size)'), triggered=self.inquireSceneScale) self.__saveact = QAction(self.tr('Save &As...'), self, shortcut=self.tr('Ctrl+A'), statusTip=self.tr('Save the image to file'), triggered=self.inquireSaveFilename) self.__redrawact = QAction(self.tr('&Redraw'), self, shortcut=self.tr('Ctrl+R'), statusTip=self.tr('Clear and redraw the image'), triggered=self.redrawScene) self.__aboutact = QAction(self.tr('&About'), self, statusTip=self.tr('Show information about this viewer'), triggered=self.aboutMsg) self.__aboutqtact = QAction(self.tr('About &Qt'), self, statusTip=self.tr('Show information about the Qt library'), triggered=self.aboutQtMsg) self.createMenus() self.__framedelta = 4 mwwidth = (self.__scenewidth + self.__framedelta) mwheight = (((self.__sceneheight + self.__framedelta) + self.menuBar().height()) + self.statusBar().height()) self.resize(mwwidth, mwheight) self.__timer = QTimer(self) self.__timer.timeout.connect(self.checkCommandPipe) self.__timer.setInterval(0) self.__timer.start()
def createMenus(self): '\n Create the menu items for the viewer\n using the previously created actions.\n ' menuBar = self.menuBar() sceneMenu = menuBar.addMenu(menuBar.tr('&Image')) sceneMenu.addAction(self.__scaleact) sceneMenu.addAction(self.__saveact) sceneMenu.addAction(self.__redrawact) helpMenu = menuBar.addMenu(menuBar.tr('&Help')) helpMenu.addAction(self.__aboutact) helpMenu.addAction(self.__aboutqtact)
-6,105,742,166,732,878,000
Create the menu items for the viewer using the previously created actions.
pviewmod/pipedimagerpq.py
createMenus
Jhongesell/PyFerret
python
def createMenus(self): '\n Create the menu items for the viewer\n using the previously created actions.\n ' menuBar = self.menuBar() sceneMenu = menuBar.addMenu(menuBar.tr('&Image')) sceneMenu.addAction(self.__scaleact) sceneMenu.addAction(self.__saveact) sceneMenu.addAction(self.__redrawact) helpMenu = menuBar.addMenu(menuBar.tr('&Help')) helpMenu.addAction(self.__aboutact) helpMenu.addAction(self.__aboutqtact)
def resizeEvent(self, event): '\n Monitor resizing in case auto-scaling of the image is selected.\n ' if self.__autoscale: if self.autoScaleScene(): event.accept() else: event.ignore() else: event.accept()
-7,212,234,922,400,166,000
Monitor resizing in case auto-scaling of the image is selected.
pviewmod/pipedimagerpq.py
resizeEvent
Jhongesell/PyFerret
python
def resizeEvent(self, event): '\n \n ' if self.__autoscale: if self.autoScaleScene(): event.accept() else: event.ignore() else: event.accept()
def closeEvent(self, event): '\n Clean up and send the WINDOW_CLOSED_MESSAGE on the response pipe \n before closing the window.\n ' self.__timer.stop() self.__cmndpipe.close() try: try: self.__rspdpipe.send(WINDOW_CLOSED_MESSAGE) finally: self.__rspdpipe.close() except Exception: pass event.accept()
4,528,090,795,886,455,300
Clean up and send the WINDOW_CLOSED_MESSAGE on the response pipe before closing the window.
pviewmod/pipedimagerpq.py
closeEvent
Jhongesell/PyFerret
python
def closeEvent(self, event): '\n Clean up and send the WINDOW_CLOSED_MESSAGE on the response pipe \n before closing the window.\n ' self.__timer.stop() self.__cmndpipe.close() try: try: self.__rspdpipe.send(WINDOW_CLOSED_MESSAGE) finally: self.__rspdpipe.close() except Exception: pass event.accept()
def exitViewer(self): '\n Close and exit the viewer.\n ' self.close()
9,079,184,898,135,921,000
Close and exit the viewer.
pviewmod/pipedimagerpq.py
exitViewer
Jhongesell/PyFerret
python
def exitViewer(self): '\n \n ' self.close()
def ignoreAlpha(self): '\n Return whether the alpha channel in colors should always be ignored.\n ' return self.__noalpha
-836,406,128,240,010,500
Return whether the alpha channel in colors should always be ignored.
pviewmod/pipedimagerpq.py
ignoreAlpha
Jhongesell/PyFerret
python
def ignoreAlpha(self): '\n \n ' return self.__noalpha
def updateScene(self): '\n Clear the displayed scene using self.__lastclearcolor,\n then draw the scaled current image.\n ' labelwidth = int(((self.__scalefactor * self.__scenewidth) + 0.5)) labelheight = int(((self.__scalefactor * self.__sceneheight) + 0.5)) newpixmap = QPixmap(labelwidth, labelheight) newpixmap.fill(self.__lastclearcolor) if (self.__sceneimage != None): mypainter = QPainter(newpixmap) trgrect = QRectF(0.0, 0.0, float(labelwidth), float(labelheight)) srcrect = QRectF(0.0, 0.0, float(self.__scenewidth), float(self.__sceneheight)) mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor) mypainter.end() self.__label.setPixmap(newpixmap) self.__label.setMinimumSize(labelwidth, labelheight) self.__label.resize(labelwidth, labelheight) self.__label.update()
-1,039,652,449,111,709,000
Clear the displayed scene using self.__lastclearcolor, then draw the scaled current image.
pviewmod/pipedimagerpq.py
updateScene
Jhongesell/PyFerret
python
def updateScene(self): '\n Clear the displayed scene using self.__lastclearcolor,\n then draw the scaled current image.\n ' labelwidth = int(((self.__scalefactor * self.__scenewidth) + 0.5)) labelheight = int(((self.__scalefactor * self.__sceneheight) + 0.5)) newpixmap = QPixmap(labelwidth, labelheight) newpixmap.fill(self.__lastclearcolor) if (self.__sceneimage != None): mypainter = QPainter(newpixmap) trgrect = QRectF(0.0, 0.0, float(labelwidth), float(labelheight)) srcrect = QRectF(0.0, 0.0, float(self.__scenewidth), float(self.__sceneheight)) mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor) mypainter.end() self.__label.setPixmap(newpixmap) self.__label.setMinimumSize(labelwidth, labelheight) self.__label.resize(labelwidth, labelheight) self.__label.update()
def clearScene(self, bkgcolor=None): '\n Deletes the scene image and fills the label with bkgcolor.\n If bkgcolor is None or an invalid color, the color used is \n the one used from the last clearScene or redrawScene call \n with a valid color (or opaque white if a color has never \n been specified).\n ' if bkgcolor: if bkgcolor.isValid(): self.__lastclearcolor = bkgcolor self.__sceneimage = None self.__scenedata = None self.updateScene()
-7,456,187,501,108,598,000
Deletes the scene image and fills the label with bkgcolor. If bkgcolor is None or an invalid color, the color used is the one used from the last clearScene or redrawScene call with a valid color (or opaque white if a color has never been specified).
pviewmod/pipedimagerpq.py
clearScene
Jhongesell/PyFerret
python
def clearScene(self, bkgcolor=None): '\n Deletes the scene image and fills the label with bkgcolor.\n If bkgcolor is None or an invalid color, the color used is \n the one used from the last clearScene or redrawScene call \n with a valid color (or opaque white if a color has never \n been specified).\n ' if bkgcolor: if bkgcolor.isValid(): self.__lastclearcolor = bkgcolor self.__sceneimage = None self.__scenedata = None self.updateScene()
def redrawScene(self, bkgcolor=None): '\n Clear and redraw the displayed scene.\n ' if bkgcolor: if bkgcolor.isValid(): self.__lastclearcolor = bkgcolor QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Redrawing image')) try: self.updateScene() finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor()
-6,725,467,494,940,689,000
Clear and redraw the displayed scene.
pviewmod/pipedimagerpq.py
redrawScene
Jhongesell/PyFerret
python
def redrawScene(self, bkgcolor=None): '\n \n ' if bkgcolor: if bkgcolor.isValid(): self.__lastclearcolor = bkgcolor QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Redrawing image')) try: self.updateScene() finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor()
def resizeScene(self, width, height): '\n Resize the scene to the given width and height in units of pixels.\n If the size changes, this deletes the current image and clear the\n displayed scene.\n ' newwidth = int((width + 0.5)) if (newwidth < self.__minsize): newwidth = self.__minsize newheight = int((height + 0.5)) if (newheight < self.__minsize): newheight = self.__minsize if ((newwidth != self.__scenewidth) or (newheight != self.__sceneheight)): self.__scenewidth = newwidth self.__sceneheight = newheight if self.__autoscale: self.__scalefactor = 1.0 barheights = (self.menuBar().height() + self.statusBar().height()) self.resize((newwidth + self.__framedelta), ((newheight + self.__framedelta) + barheights)) self.clearScene(None)
-7,876,032,258,201,786,000
Resize the scene to the given width and height in units of pixels. If the size changes, this deletes the current image and clear the displayed scene.
pviewmod/pipedimagerpq.py
resizeScene
Jhongesell/PyFerret
python
def resizeScene(self, width, height): '\n Resize the scene to the given width and height in units of pixels.\n If the size changes, this deletes the current image and clear the\n displayed scene.\n ' newwidth = int((width + 0.5)) if (newwidth < self.__minsize): newwidth = self.__minsize newheight = int((height + 0.5)) if (newheight < self.__minsize): newheight = self.__minsize if ((newwidth != self.__scenewidth) or (newheight != self.__sceneheight)): self.__scenewidth = newwidth self.__sceneheight = newheight if self.__autoscale: self.__scalefactor = 1.0 barheights = (self.menuBar().height() + self.statusBar().height()) self.resize((newwidth + self.__framedelta), ((newheight + self.__framedelta) + barheights)) self.clearScene(None)
def loadNewSceneImage(self, imageinfo): '\n Create a new scene image from the information given in this\n and subsequent dictionaries imageinfo. The image is created\n from multiple calls to this function since there is a limit\n on the size of a single object passed through a pipe.\n \n The first imageinfo dictionary given when creating an image\n must define the following key and value pairs:\n "width": width of the image in pixels\n "height": height of the image in pixels\n "stride": number of bytes in one line of the image\n in the bytearray\n The scene image data is initialized to all zero (transparent)\n at this time.\n\n This initialization call must be followed by (multiple) calls\n to this method with imageinfo dictionaries defining the key\n and value pairs:\n "blocknum": data block number (1, 2, ... numblocks)\n "numblocks": total number of image data blocks\n "startindex": index in the bytearray of image data\n where this block of image data starts\n "blockdata": this block of data as a bytearray\n\n On receipt of the last block of data (blocknum == numblocks)\n the scene image will be created and the scene will be updated. \n\n Raises:\n KeyError - if one of the above keys is not given\n ValueError - if a value for a key is not valid\n ' if (not self.__loadingimage): myimgwidth = int(imageinfo['width']) myimgheight = int(imageinfo['height']) myimgstride = int(imageinfo['stride']) if ((myimgwidth < self.__minsize) or (myimgheight < self.__minsize)): raise ValueError(('image width and height cannot be less than %s' % str(self.__minsize))) if (myimgstride != (4 * myimgwidth)): raise ValueError('image stride is not four times the image width') self.__scenedata = bytearray((myimgstride * myimgheight)) self.__scenewidth = myimgwidth self.__sceneheight = myimgheight self.__loadingimage = True QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Loading new image')) return myblocknum = int(imageinfo['blocknum']) mynumblocks = int(imageinfo['numblocks']) mystartindex = int(imageinfo['startindex']) myblockdata = imageinfo['blockdata'] if ((myblocknum < 1) or (myblocknum > mynumblocks)): self.statusBar().clearMessage() QApplication.restoreOverrideCursor() raise ValueError('invalid image data block number or number of blocks') if ((mystartindex < 0) or (mystartindex >= len(self.__scenedata))): self.statusBar().clearMessage() QApplication.restoreOverrideCursor() raise ValueError('invalid start index for an image data block') myblocksize = len(myblockdata) myendindex = (mystartindex + myblocksize) if ((myblocksize < 1) or (myendindex > len(self.__scenedata))): self.statusBar().clearMessage() QApplication.restoreOverrideCursor() raise ValueError('invalid length of an image data block') self.statusBar().showMessage(self.tr(('Loading new image (block %s of %s)' % (str(myblocknum), str(mynumblocks))))) self.__scenedata[mystartindex:myendindex] = myblockdata if (myblocknum == mynumblocks): self.__loadingimage = False self.statusBar().showMessage(self.tr('Creating new image')) try: self.__sceneimage = QImage(self.__scenedata, self.__scenewidth, self.__sceneheight, QImage.Format_ARGB32_Premultiplied) self.statusBar().showMessage(self.tr('Drawing new image')) self.updateScene() finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor()
7,243,623,650,107,950,000
Create a new scene image from the information given in this and subsequent dictionaries imageinfo. The image is created from multiple calls to this function since there is a limit on the size of a single object passed through a pipe. The first imageinfo dictionary given when creating an image must define the following key and value pairs: "width": width of the image in pixels "height": height of the image in pixels "stride": number of bytes in one line of the image in the bytearray The scene image data is initialized to all zero (transparent) at this time. This initialization call must be followed by (multiple) calls to this method with imageinfo dictionaries defining the key and value pairs: "blocknum": data block number (1, 2, ... numblocks) "numblocks": total number of image data blocks "startindex": index in the bytearray of image data where this block of image data starts "blockdata": this block of data as a bytearray On receipt of the last block of data (blocknum == numblocks) the scene image will be created and the scene will be updated. Raises: KeyError - if one of the above keys is not given ValueError - if a value for a key is not valid
pviewmod/pipedimagerpq.py
loadNewSceneImage
Jhongesell/PyFerret
python
def loadNewSceneImage(self, imageinfo): '\n Create a new scene image from the information given in this\n and subsequent dictionaries imageinfo. The image is created\n from multiple calls to this function since there is a limit\n on the size of a single object passed through a pipe.\n \n The first imageinfo dictionary given when creating an image\n must define the following key and value pairs:\n "width": width of the image in pixels\n "height": height of the image in pixels\n "stride": number of bytes in one line of the image\n in the bytearray\n The scene image data is initialized to all zero (transparent)\n at this time.\n\n This initialization call must be followed by (multiple) calls\n to this method with imageinfo dictionaries defining the key\n and value pairs:\n "blocknum": data block number (1, 2, ... numblocks)\n "numblocks": total number of image data blocks\n "startindex": index in the bytearray of image data\n where this block of image data starts\n "blockdata": this block of data as a bytearray\n\n On receipt of the last block of data (blocknum == numblocks)\n the scene image will be created and the scene will be updated. \n\n Raises:\n KeyError - if one of the above keys is not given\n ValueError - if a value for a key is not valid\n ' if (not self.__loadingimage): myimgwidth = int(imageinfo['width']) myimgheight = int(imageinfo['height']) myimgstride = int(imageinfo['stride']) if ((myimgwidth < self.__minsize) or (myimgheight < self.__minsize)): raise ValueError(('image width and height cannot be less than %s' % str(self.__minsize))) if (myimgstride != (4 * myimgwidth)): raise ValueError('image stride is not four times the image width') self.__scenedata = bytearray((myimgstride * myimgheight)) self.__scenewidth = myimgwidth self.__sceneheight = myimgheight self.__loadingimage = True QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Loading new image')) return myblocknum = int(imageinfo['blocknum']) mynumblocks = int(imageinfo['numblocks']) mystartindex = int(imageinfo['startindex']) myblockdata = imageinfo['blockdata'] if ((myblocknum < 1) or (myblocknum > mynumblocks)): self.statusBar().clearMessage() QApplication.restoreOverrideCursor() raise ValueError('invalid image data block number or number of blocks') if ((mystartindex < 0) or (mystartindex >= len(self.__scenedata))): self.statusBar().clearMessage() QApplication.restoreOverrideCursor() raise ValueError('invalid start index for an image data block') myblocksize = len(myblockdata) myendindex = (mystartindex + myblocksize) if ((myblocksize < 1) or (myendindex > len(self.__scenedata))): self.statusBar().clearMessage() QApplication.restoreOverrideCursor() raise ValueError('invalid length of an image data block') self.statusBar().showMessage(self.tr(('Loading new image (block %s of %s)' % (str(myblocknum), str(mynumblocks))))) self.__scenedata[mystartindex:myendindex] = myblockdata if (myblocknum == mynumblocks): self.__loadingimage = False self.statusBar().showMessage(self.tr('Creating new image')) try: self.__sceneimage = QImage(self.__scenedata, self.__scenewidth, self.__sceneheight, QImage.Format_ARGB32_Premultiplied) self.statusBar().showMessage(self.tr('Drawing new image')) self.updateScene() finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor()
def inquireSceneScale(self): '\n Prompt the user for the desired scaling factor for the scene.\n ' labelwidth = int(((self.__scenewidth * self.__scalefactor) + 0.5)) labelheight = int(((self.__sceneheight * self.__scalefactor) + 0.5)) scaledlg = ScaleDialogPQ(self.__scalefactor, labelwidth, labelheight, self.__minsize, self.__minsize, self.__autoscale, self) if scaledlg.exec_(): (newscale, autoscale, okay) = scaledlg.getValues() if okay: if autoscale: self.__autoscale = True self.autoScaleScene() else: self.__autoscale = False self.scaleScene(newscale, False)
-6,377,533,769,439,809,000
Prompt the user for the desired scaling factor for the scene.
pviewmod/pipedimagerpq.py
inquireSceneScale
Jhongesell/PyFerret
python
def inquireSceneScale(self): '\n \n ' labelwidth = int(((self.__scenewidth * self.__scalefactor) + 0.5)) labelheight = int(((self.__sceneheight * self.__scalefactor) + 0.5)) scaledlg = ScaleDialogPQ(self.__scalefactor, labelwidth, labelheight, self.__minsize, self.__minsize, self.__autoscale, self) if scaledlg.exec_(): (newscale, autoscale, okay) = scaledlg.getValues() if okay: if autoscale: self.__autoscale = True self.autoScaleScene() else: self.__autoscale = False self.scaleScene(newscale, False)
def autoScaleScene(self): '\n Selects a scaling factor that maximizes the scene within the window \n frame without requiring scroll bars. Intended to be called when\n the window size is changed by the user and auto-scaling is turn on.\n\n Returns:\n True if scaling of this scene is done (no window resize)\n False if the a window resize command was issued\n ' barheights = (self.menuBar().height() + self.statusBar().height()) cwheight = ((self.height() - barheights) - self.__framedelta) heightsf = (float(cwheight) / float(self.__sceneheight)) cwwidth = (self.width() - self.__framedelta) widthsf = (float(cwwidth) / float(self.__scenewidth)) if (heightsf < widthsf): factor = heightsf else: factor = widthsf newcwheight = int(((factor * self.__sceneheight) + 0.5)) newcwwidth = int(((factor * self.__scenewidth) + 0.5)) if (self.isMaximized() or ((abs((cwheight - newcwheight)) <= self.__framedelta) and (abs((cwwidth - newcwwidth)) <= self.__framedelta))): self.scaleScene(factor, False) return True else: self.resize((newcwwidth + self.__framedelta), ((newcwheight + self.__framedelta) + barheights)) return False
-2,819,780,426,648,615,400
Selects a scaling factor that maximizes the scene within the window frame without requiring scroll bars. Intended to be called when the window size is changed by the user and auto-scaling is turn on. Returns: True if scaling of this scene is done (no window resize) False if the a window resize command was issued
pviewmod/pipedimagerpq.py
autoScaleScene
Jhongesell/PyFerret
python
def autoScaleScene(self): '\n Selects a scaling factor that maximizes the scene within the window \n frame without requiring scroll bars. Intended to be called when\n the window size is changed by the user and auto-scaling is turn on.\n\n Returns:\n True if scaling of this scene is done (no window resize)\n False if the a window resize command was issued\n ' barheights = (self.menuBar().height() + self.statusBar().height()) cwheight = ((self.height() - barheights) - self.__framedelta) heightsf = (float(cwheight) / float(self.__sceneheight)) cwwidth = (self.width() - self.__framedelta) widthsf = (float(cwwidth) / float(self.__scenewidth)) if (heightsf < widthsf): factor = heightsf else: factor = widthsf newcwheight = int(((factor * self.__sceneheight) + 0.5)) newcwwidth = int(((factor * self.__scenewidth) + 0.5)) if (self.isMaximized() or ((abs((cwheight - newcwheight)) <= self.__framedelta) and (abs((cwwidth - newcwwidth)) <= self.__framedelta))): self.scaleScene(factor, False) return True else: self.resize((newcwwidth + self.__framedelta), ((newcwheight + self.__framedelta) + barheights)) return False
def scaleScene(self, factor, resizewin): '\n Scales both the horizontal and vertical directions by factor.\n Scaling factors are not accumulative. So if the scene was\n already scaled, that scaling is "removed" before this scaling\n factor is applied. If resizewin is True, the main window is \n resized to accommodate this new scaled scene size.\n\n If factor is zero, just switch to auto-scaling at the current\n window size. If factor is negative, rescale using the absolute\n value (possibly resizing the window) then switch to auto-scaling.\n ' fltfactor = float(factor) if (fltfactor != 0.0): if resizewin: self.__autoscale = False newfactor = abs(fltfactor) newlabwidth = int(((newfactor * self.__scenewidth) + 0.5)) newlabheight = int(((newfactor * self.__sceneheight) + 0.5)) if ((newlabwidth < self.__minsize) or (newlabheight < self.__minsize)): if (self.__scenewidth <= self.__sceneheight): newfactor = (float(self.__minsize) / float(self.__scenewidth)) else: newfactor = (float(self.__minsize) / float(self.__sceneheight)) newlabwidth = int(((newfactor * self.__scenewidth) + 0.5)) newlabheight = int(((newfactor * self.__sceneheight) + 0.5)) oldlabwidth = int(((self.__scalefactor * self.__scenewidth) + 0.5)) oldlabheight = int(((self.__scalefactor * self.__sceneheight) + 0.5)) if ((newlabwidth != oldlabwidth) or (newlabheight != oldlabheight)): self.__scalefactor = newfactor QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Scaling image')) try: self.updateScene() finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor() if resizewin: barheights = (self.menuBar().height() + self.statusBar().height()) mwheight = ((newlabheight + barheights) + self.__framedelta) mwwidth = (newlabwidth + self.__framedelta) scrnrect = QApplication.desktop().availableGeometry() if (mwwidth > (0.95 * scrnrect.width())): mwwidth = int(((0.9 * scrnrect.width()) + 0.5)) if (mwheight > (0.95 * scrnrect.height())): mwheight = int(((0.9 * scrnrect.height()) + 0.5)) self.resize(mwwidth, mwheight) if (fltfactor <= 0.0): self.__autoscale = True self.autoScaleScene()
5,059,421,301,477,726,000
Scales both the horizontal and vertical directions by factor. Scaling factors are not accumulative. So if the scene was already scaled, that scaling is "removed" before this scaling factor is applied. If resizewin is True, the main window is resized to accommodate this new scaled scene size. If factor is zero, just switch to auto-scaling at the current window size. If factor is negative, rescale using the absolute value (possibly resizing the window) then switch to auto-scaling.
pviewmod/pipedimagerpq.py
scaleScene
Jhongesell/PyFerret
python
def scaleScene(self, factor, resizewin): '\n Scales both the horizontal and vertical directions by factor.\n Scaling factors are not accumulative. So if the scene was\n already scaled, that scaling is "removed" before this scaling\n factor is applied. If resizewin is True, the main window is \n resized to accommodate this new scaled scene size.\n\n If factor is zero, just switch to auto-scaling at the current\n window size. If factor is negative, rescale using the absolute\n value (possibly resizing the window) then switch to auto-scaling.\n ' fltfactor = float(factor) if (fltfactor != 0.0): if resizewin: self.__autoscale = False newfactor = abs(fltfactor) newlabwidth = int(((newfactor * self.__scenewidth) + 0.5)) newlabheight = int(((newfactor * self.__sceneheight) + 0.5)) if ((newlabwidth < self.__minsize) or (newlabheight < self.__minsize)): if (self.__scenewidth <= self.__sceneheight): newfactor = (float(self.__minsize) / float(self.__scenewidth)) else: newfactor = (float(self.__minsize) / float(self.__sceneheight)) newlabwidth = int(((newfactor * self.__scenewidth) + 0.5)) newlabheight = int(((newfactor * self.__sceneheight) + 0.5)) oldlabwidth = int(((self.__scalefactor * self.__scenewidth) + 0.5)) oldlabheight = int(((self.__scalefactor * self.__sceneheight) + 0.5)) if ((newlabwidth != oldlabwidth) or (newlabheight != oldlabheight)): self.__scalefactor = newfactor QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Scaling image')) try: self.updateScene() finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor() if resizewin: barheights = (self.menuBar().height() + self.statusBar().height()) mwheight = ((newlabheight + barheights) + self.__framedelta) mwwidth = (newlabwidth + self.__framedelta) scrnrect = QApplication.desktop().availableGeometry() if (mwwidth > (0.95 * scrnrect.width())): mwwidth = int(((0.9 * scrnrect.width()) + 0.5)) if (mwheight > (0.95 * scrnrect.height())): mwheight = int(((0.9 * scrnrect.height()) + 0.5)) self.resize(mwwidth, mwheight) if (fltfactor <= 0.0): self.__autoscale = True self.autoScaleScene()
def inquireSaveFilename(self): '\n Prompt the user for the name of the file into which to save the scene.\n The file format will be determined from the filename extension.\n ' formattypes = [('png', 'PNG - Portable Networks Graphics (*.png)'), ('jpeg', 'JPEG - Joint Photographic Experts Group (*.jpeg *.jpg *.jpe)'), ('tiff', 'TIFF - Tagged Image File Format (*.tiff *.tif)'), ('bmp', 'BMP - Windows Bitmap (*.bmp)'), ('ppm', 'PPM - Portable Pixmap (*.ppm)'), ('xpm', 'XPM - X11 Pixmap (*.xpm)'), ('xbm', 'XBM - X11 Bitmap (*.xbm)')] filters = ';;'.join([t[1] for t in formattypes]) if (QT_VERSION == 5): (fileName, fileFilter) = QFileDialog.getSaveFileName(self, self.tr('Save the current image as '), self.tr(self.__lastfilename), self.tr(filters)) else: (fileName, fileFilter) = QFileDialog.getSaveFileNameAndFilter(self, self.tr('Save the current image as '), self.tr(self.__lastfilename), self.tr(filters)) if fileName: for (fmt, fmtQName) in formattypes: if (self.tr(fmtQName) == fileFilter): fileFormat = fmt break else: raise RuntimeError(("Unexpected file format name '%s'" % fileFilter)) self.saveSceneToFile(fileName, fileFormat, None, None) self.__lastfilename = fileName self.__lastformat = fileFormat
1,811,016,900,019,698,700
Prompt the user for the name of the file into which to save the scene. The file format will be determined from the filename extension.
pviewmod/pipedimagerpq.py
inquireSaveFilename
Jhongesell/PyFerret
python
def inquireSaveFilename(self): '\n Prompt the user for the name of the file into which to save the scene.\n The file format will be determined from the filename extension.\n ' formattypes = [('png', 'PNG - Portable Networks Graphics (*.png)'), ('jpeg', 'JPEG - Joint Photographic Experts Group (*.jpeg *.jpg *.jpe)'), ('tiff', 'TIFF - Tagged Image File Format (*.tiff *.tif)'), ('bmp', 'BMP - Windows Bitmap (*.bmp)'), ('ppm', 'PPM - Portable Pixmap (*.ppm)'), ('xpm', 'XPM - X11 Pixmap (*.xpm)'), ('xbm', 'XBM - X11 Bitmap (*.xbm)')] filters = ';;'.join([t[1] for t in formattypes]) if (QT_VERSION == 5): (fileName, fileFilter) = QFileDialog.getSaveFileName(self, self.tr('Save the current image as '), self.tr(self.__lastfilename), self.tr(filters)) else: (fileName, fileFilter) = QFileDialog.getSaveFileNameAndFilter(self, self.tr('Save the current image as '), self.tr(self.__lastfilename), self.tr(filters)) if fileName: for (fmt, fmtQName) in formattypes: if (self.tr(fmtQName) == fileFilter): fileFormat = fmt break else: raise RuntimeError(("Unexpected file format name '%s'" % fileFilter)) self.saveSceneToFile(fileName, fileFormat, None, None) self.__lastfilename = fileName self.__lastformat = fileFormat
def saveSceneToFile(self, filename, imageformat, transparent, rastsize): '\n Save the current scene to the named file.\n \n If imageformat is empty or None, the format is guessed from\n the filename extension.\n\n If transparent is False, the entire scene is initialized\n to the last clearing color.\n\n If given, rastsize is the pixels size of the saved image.\n If rastsize is not given, the saved image will be saved\n at the current scaled image size. \n ' if (self.__sceneimage == None): return if (not imageformat): fileext = os.path.splitext(filename)[1].lower() if (fileext == '.gif'): myformat = 'gif' else: myformat = None else: myformat = imageformat.lower() if (myformat == 'gif'): myformat = 'png' myfilename = (os.path.splitext(filename)[0] + '.png') else: myfilename = filename QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Saving image')) try: if rastsize: imagewidth = int((rastsize.width() + 0.5)) imageheight = int((rastsize.height() + 0.5)) else: imagewidth = int(((self.__scenewidth * self.__scalefactor) + 0.5)) imageheight = int(((self.__sceneheight * self.__scalefactor) + 0.5)) myimage = QImage(QSize(imagewidth, imageheight), QImage.Format_ARGB32_Premultiplied) if (not transparent): fillint = self.__helper.computeARGB32PreMultInt(self.__lastclearcolor) else: fillint = 0 myimage.fill(fillint) mypainter = QPainter(myimage) trgrect = QRectF(0.0, 0.0, float(imagewidth), float(imageheight)) srcrect = QRectF(0.0, 0.0, float(self.__scenewidth), float(self.__sceneheight)) mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor) mypainter.end() if (not myimage.save(myfilename, myformat)): raise ValueError(('Unable to save the plot as ' + myfilename)) finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor()
5,735,915,115,842,178,000
Save the current scene to the named file. If imageformat is empty or None, the format is guessed from the filename extension. If transparent is False, the entire scene is initialized to the last clearing color. If given, rastsize is the pixels size of the saved image. If rastsize is not given, the saved image will be saved at the current scaled image size.
pviewmod/pipedimagerpq.py
saveSceneToFile
Jhongesell/PyFerret
python
def saveSceneToFile(self, filename, imageformat, transparent, rastsize): '\n Save the current scene to the named file.\n \n If imageformat is empty or None, the format is guessed from\n the filename extension.\n\n If transparent is False, the entire scene is initialized\n to the last clearing color.\n\n If given, rastsize is the pixels size of the saved image.\n If rastsize is not given, the saved image will be saved\n at the current scaled image size. \n ' if (self.__sceneimage == None): return if (not imageformat): fileext = os.path.splitext(filename)[1].lower() if (fileext == '.gif'): myformat = 'gif' else: myformat = None else: myformat = imageformat.lower() if (myformat == 'gif'): myformat = 'png' myfilename = (os.path.splitext(filename)[0] + '.png') else: myfilename = filename QApplication.setOverrideCursor(Qt.WaitCursor) self.statusBar().showMessage(self.tr('Saving image')) try: if rastsize: imagewidth = int((rastsize.width() + 0.5)) imageheight = int((rastsize.height() + 0.5)) else: imagewidth = int(((self.__scenewidth * self.__scalefactor) + 0.5)) imageheight = int(((self.__sceneheight * self.__scalefactor) + 0.5)) myimage = QImage(QSize(imagewidth, imageheight), QImage.Format_ARGB32_Premultiplied) if (not transparent): fillint = self.__helper.computeARGB32PreMultInt(self.__lastclearcolor) else: fillint = 0 myimage.fill(fillint) mypainter = QPainter(myimage) trgrect = QRectF(0.0, 0.0, float(imagewidth), float(imageheight)) srcrect = QRectF(0.0, 0.0, float(self.__scenewidth), float(self.__sceneheight)) mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor) mypainter.end() if (not myimage.save(myfilename, myformat)): raise ValueError(('Unable to save the plot as ' + myfilename)) finally: self.statusBar().clearMessage() QApplication.restoreOverrideCursor()
def checkCommandPipe(self): '\n Get and perform commands waiting in the pipe.\n Stop when no more commands or if more than 50\n milliseconds have passed.\n ' try: starttime = time.clock() while self.__cmndpipe.poll(0.002): cmnd = self.__cmndpipe.recv() self.processCommand(cmnd) if ((time.clock() - starttime) > 0.05): break except EOFError: self.exitViewer() except Exception: (exctype, excval) = sys.exc_info()[:2] try: if excval: self.__rspdpipe.send(('**ERROR %s: %s' % (str(exctype), str(excval)))) else: self.__rspdpipe.send(('**ERROR %s' % str(exctype))) except Exception: pass
-1,018,358,762,143,865,300
Get and perform commands waiting in the pipe. Stop when no more commands or if more than 50 milliseconds have passed.
pviewmod/pipedimagerpq.py
checkCommandPipe
Jhongesell/PyFerret
python
def checkCommandPipe(self): '\n Get and perform commands waiting in the pipe.\n Stop when no more commands or if more than 50\n milliseconds have passed.\n ' try: starttime = time.clock() while self.__cmndpipe.poll(0.002): cmnd = self.__cmndpipe.recv() self.processCommand(cmnd) if ((time.clock() - starttime) > 0.05): break except EOFError: self.exitViewer() except Exception: (exctype, excval) = sys.exc_info()[:2] try: if excval: self.__rspdpipe.send(('**ERROR %s: %s' % (str(exctype), str(excval)))) else: self.__rspdpipe.send(('**ERROR %s' % str(exctype))) except Exception: pass
def processCommand(self, cmnd): '\n Examine the action of cmnd and call the appropriate\n method to deal with this command. Raises a KeyError\n if the "action" key is missing.\n ' try: cmndact = cmnd['action'] except KeyError: raise ValueError(("Unknown command '%s'" % str(cmnd))) if (cmndact == 'clear'): try: bkgcolor = self.__helper.getColorFromCmnd(cmnd) except KeyError: bkgcolor = None self.clearScene(bkgcolor) elif (cmndact == 'exit'): self.exitViewer() elif (cmndact == 'hide'): self.showMinimized() elif (cmndact == 'screenInfo'): scrnrect = QApplication.desktop().availableGeometry() info = (self.physicalDpiX(), self.physicalDpiY(), scrnrect.width(), scrnrect.height()) self.__rspdpipe.send(info) elif (cmndact == 'redraw'): try: bkgcolor = self.__helper.getColorFromCmnd(cmnd) except KeyError: bkgcolor = None self.redrawScene(bkgcolor) elif (cmndact == 'rescale'): self.scaleScene(float(cmnd['factor']), True) elif (cmndact == 'resize'): mysize = self.__helper.getSizeFromCmnd(cmnd) self.resizeScene(mysize.width(), mysize.height()) elif (cmndact == 'newImage'): self.loadNewSceneImage(cmnd) elif (cmndact == 'save'): filename = cmnd['filename'] fileformat = cmnd.get('fileformat', None) try: bkgcolor = self.__helper.getColorFromCmnd(cmnd) except KeyError: bkgcolor = None rastsize = self.__helper.getSizeFromCmnd(cmnd['rastsize']) self.saveSceneToFile(filename, fileformat, bkgcolor, rastsize) elif (cmndact == 'setTitle'): self.setWindowTitle(cmnd['title']) elif (cmndact == 'imgname'): myvalue = cmnd.get('name', None) if myvalue: self.__lastfilename = myvalue myvalue = cmnd.get('format', None) if myvalue: self.__lastformat = myvalue.lower() elif (cmndact == 'show'): if (not self.isVisible()): self.show() elif (cmndact == 'noalpha'): self.__noalpha = True else: raise ValueError(('Unknown command action %s' % str(cmndact)))
-6,530,815,419,400,795,000
Examine the action of cmnd and call the appropriate method to deal with this command. Raises a KeyError if the "action" key is missing.
pviewmod/pipedimagerpq.py
processCommand
Jhongesell/PyFerret
python
def processCommand(self, cmnd): '\n Examine the action of cmnd and call the appropriate\n method to deal with this command. Raises a KeyError\n if the "action" key is missing.\n ' try: cmndact = cmnd['action'] except KeyError: raise ValueError(("Unknown command '%s'" % str(cmnd))) if (cmndact == 'clear'): try: bkgcolor = self.__helper.getColorFromCmnd(cmnd) except KeyError: bkgcolor = None self.clearScene(bkgcolor) elif (cmndact == 'exit'): self.exitViewer() elif (cmndact == 'hide'): self.showMinimized() elif (cmndact == 'screenInfo'): scrnrect = QApplication.desktop().availableGeometry() info = (self.physicalDpiX(), self.physicalDpiY(), scrnrect.width(), scrnrect.height()) self.__rspdpipe.send(info) elif (cmndact == 'redraw'): try: bkgcolor = self.__helper.getColorFromCmnd(cmnd) except KeyError: bkgcolor = None self.redrawScene(bkgcolor) elif (cmndact == 'rescale'): self.scaleScene(float(cmnd['factor']), True) elif (cmndact == 'resize'): mysize = self.__helper.getSizeFromCmnd(cmnd) self.resizeScene(mysize.width(), mysize.height()) elif (cmndact == 'newImage'): self.loadNewSceneImage(cmnd) elif (cmndact == 'save'): filename = cmnd['filename'] fileformat = cmnd.get('fileformat', None) try: bkgcolor = self.__helper.getColorFromCmnd(cmnd) except KeyError: bkgcolor = None rastsize = self.__helper.getSizeFromCmnd(cmnd['rastsize']) self.saveSceneToFile(filename, fileformat, bkgcolor, rastsize) elif (cmndact == 'setTitle'): self.setWindowTitle(cmnd['title']) elif (cmndact == 'imgname'): myvalue = cmnd.get('name', None) if myvalue: self.__lastfilename = myvalue myvalue = cmnd.get('format', None) if myvalue: self.__lastformat = myvalue.lower() elif (cmndact == 'show'): if (not self.isVisible()): self.show() elif (cmndact == 'noalpha'): self.__noalpha = True else: raise ValueError(('Unknown command action %s' % str(cmndact)))
def __init__(self, cmndpipe, rspdpipe): '\n Create a Process that will produce a PipedImagerPQ\n attached to the given Pipes when run.\n ' super(PipedImagerPQProcess, self).__init__(group=None, target=None, name='PipedImagerPQ') self.__cmndpipe = cmndpipe self.__rspdpipe = rspdpipe self.__app = None self.__viewer = None
2,727,524,586,828,027,000
Create a Process that will produce a PipedImagerPQ attached to the given Pipes when run.
pviewmod/pipedimagerpq.py
__init__
Jhongesell/PyFerret
python
def __init__(self, cmndpipe, rspdpipe): '\n Create a Process that will produce a PipedImagerPQ\n attached to the given Pipes when run.\n ' super(PipedImagerPQProcess, self).__init__(group=None, target=None, name='PipedImagerPQ') self.__cmndpipe = cmndpipe self.__rspdpipe = rspdpipe self.__app = None self.__viewer = None
def run(self): '\n Create a PipedImagerPQ that is attached\n to the Pipe of this instance.\n ' self.__app = QApplication(['PipedImagerPQ']) self.__viewer = PipedImagerPQ(self.__cmndpipe, self.__rspdpipe) myresult = self.__app.exec_() sys.exit(myresult)
-3,157,003,115,251,203,000
Create a PipedImagerPQ that is attached to the Pipe of this instance.
pviewmod/pipedimagerpq.py
run
Jhongesell/PyFerret
python
def run(self): '\n Create a PipedImagerPQ that is attached\n to the Pipe of this instance.\n ' self.__app = QApplication(['PipedImagerPQ']) self.__viewer = PipedImagerPQ(self.__cmndpipe, self.__rspdpipe) myresult = self.__app.exec_() sys.exit(myresult)
def __init__(self, parent, cmndpipe, rspdpipe, cmndlist): '\n Create a QDialog with a single QPushButton for controlling\n the submission of commands from cmndlist to cmndpipe.\n ' super(_CommandSubmitterPQ, self).__init__(parent) self.__cmndlist = cmndlist self.__cmndpipe = cmndpipe self.__rspdpipe = rspdpipe self.__nextcmnd = 0 self.__button = QPushButton('Submit next command', self) self.__button.pressed.connect(self.submitNextCommand) self.show()
5,109,366,959,997,596,000
Create a QDialog with a single QPushButton for controlling the submission of commands from cmndlist to cmndpipe.
pviewmod/pipedimagerpq.py
__init__
Jhongesell/PyFerret
python
def __init__(self, parent, cmndpipe, rspdpipe, cmndlist): '\n Create a QDialog with a single QPushButton for controlling\n the submission of commands from cmndlist to cmndpipe.\n ' super(_CommandSubmitterPQ, self).__init__(parent) self.__cmndlist = cmndlist self.__cmndpipe = cmndpipe self.__rspdpipe = rspdpipe self.__nextcmnd = 0 self.__button = QPushButton('Submit next command', self) self.__button.pressed.connect(self.submitNextCommand) self.show()
def submitNextCommand(self): '\n Submit the next command from the command list to the command pipe,\n or shutdown if there are no more commands to submit.\n ' try: cmndstr = str(self.__cmndlist[self.__nextcmnd]) if (len(cmndstr) > 188): cmndstr = (cmndstr[:188] + '...') print(('Command: %s' % cmndstr)) self.__cmndpipe.send(self.__cmndlist[self.__nextcmnd]) self.__nextcmnd += 1 while self.__rspdpipe.poll(0.1): print(('Response: %s' % str(self.__rspdpipe.recv()))) except IndexError: self.__rspdpipe.close() self.__cmndpipe.close() self.close()
-4,546,979,611,108,747,000
Submit the next command from the command list to the command pipe, or shutdown if there are no more commands to submit.
pviewmod/pipedimagerpq.py
submitNextCommand
Jhongesell/PyFerret
python
def submitNextCommand(self): '\n Submit the next command from the command list to the command pipe,\n or shutdown if there are no more commands to submit.\n ' try: cmndstr = str(self.__cmndlist[self.__nextcmnd]) if (len(cmndstr) > 188): cmndstr = (cmndstr[:188] + '...') print(('Command: %s' % cmndstr)) self.__cmndpipe.send(self.__cmndlist[self.__nextcmnd]) self.__nextcmnd += 1 while self.__rspdpipe.poll(0.1): print(('Response: %s' % str(self.__rspdpipe.recv()))) except IndexError: self.__rspdpipe.close() self.__cmndpipe.close() self.close()
def split_match(self, match): 'Override this method to prefix the error message with the lint binary name.' (match, line, col, error, warning, message, near) = super().split_match(match) if match: message = ('[vcom] ' + message) return (match, line, col, error, warning, message, near)
6,648,812,486,559,834,000
Override this method to prefix the error message with the lint binary name.
linter.py
split_match
dave2pi/SublimeLinter-contrib-vcom
python
def split_match(self, match): (match, line, col, error, warning, message, near) = super().split_match(match) if match: message = ('[vcom] ' + message) return (match, line, col, error, warning, message, near)
def setUp(self): 'See unittest.TestCase.setUp for full specification.\n\n Overriding implementations must call this implementation.\n ' self._control = test_control.PauseFailControl() self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE) self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool) (generic_stub, dynamic_stubs, self._memo) = self.implementation.instantiate(self._digest.methods, self._digest.event_method_implementations, None) self._invoker = self.invoker_constructor.construct_invoker(generic_stub, dynamic_stubs, self._digest.methods)
5,657,211,816,029,526,000
See unittest.TestCase.setUp for full specification. Overriding implementations must call this implementation.
src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
setUp
DiracResearch/grpc
python
def setUp(self): 'See unittest.TestCase.setUp for full specification.\n\n Overriding implementations must call this implementation.\n ' self._control = test_control.PauseFailControl() self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE) self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool) (generic_stub, dynamic_stubs, self._memo) = self.implementation.instantiate(self._digest.methods, self._digest.event_method_implementations, None) self._invoker = self.invoker_constructor.construct_invoker(generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self): 'See unittest.TestCase.tearDown for full specification.\n\n Overriding implementations must call this implementation.\n ' self._invoker = None self.implementation.destantiate(self._memo) self._digest_pool.shutdown(wait=True)
-5,514,593,741,479,847,000
See unittest.TestCase.tearDown for full specification. Overriding implementations must call this implementation.
src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
tearDown
DiracResearch/grpc
python
def tearDown(self): 'See unittest.TestCase.tearDown for full specification.\n\n Overriding implementations must call this implementation.\n ' self._invoker = None self.implementation.destantiate(self._memo) self._digest_pool.shutdown(wait=True)
def killall(self, everywhere=False): 'Kills all nailgun servers started by pants.\n\n :param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;\n otherwise restricts the nailguns killed to those started for the\n current build root.\n ' with self._NAILGUN_KILL_LOCK: for proc in self._iter_nailgun_instances(everywhere): logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid)) proc.terminate()
7,276,317,597,980,383,000
Kills all nailgun servers started by pants. :param bool everywhere: If ``True``, kills all pants-started nailguns on this machine; otherwise restricts the nailguns killed to those started for the current build root.
src/python/pants/java/nailgun_executor.py
killall
revl/pants
python
def killall(self, everywhere=False): 'Kills all nailgun servers started by pants.\n\n :param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;\n otherwise restricts the nailguns killed to those started for the\n current build root.\n ' with self._NAILGUN_KILL_LOCK: for proc in self._iter_nailgun_instances(everywhere): logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid)) proc.terminate()
@staticmethod def _fingerprint(jvm_options, classpath, java_version): 'Compute a fingerprint for this invocation of a Java task.\n\n :param list jvm_options: JVM options passed to the java invocation\n :param list classpath: The -cp arguments passed to the java invocation\n :param Revision java_version: return value from Distribution.version()\n :return: a hexstring representing a fingerprint of the java invocation\n ' digest = hashlib.sha1() encoded_jvm_options = [option.encode() for option in sorted(jvm_options)] encoded_classpath = [cp.encode() for cp in sorted(classpath)] encoded_java_version = repr(java_version).encode() for item in (encoded_jvm_options, encoded_classpath, encoded_java_version): digest.update(str(item).encode()) return digest.hexdigest()
-855,648,847,069,729,900
Compute a fingerprint for this invocation of a Java task. :param list jvm_options: JVM options passed to the java invocation :param list classpath: The -cp arguments passed to the java invocation :param Revision java_version: return value from Distribution.version() :return: a hexstring representing a fingerprint of the java invocation
src/python/pants/java/nailgun_executor.py
_fingerprint
revl/pants
python
@staticmethod def _fingerprint(jvm_options, classpath, java_version): 'Compute a fingerprint for this invocation of a Java task.\n\n :param list jvm_options: JVM options passed to the java invocation\n :param list classpath: The -cp arguments passed to the java invocation\n :param Revision java_version: return value from Distribution.version()\n :return: a hexstring representing a fingerprint of the java invocation\n ' digest = hashlib.sha1() encoded_jvm_options = [option.encode() for option in sorted(jvm_options)] encoded_classpath = [cp.encode() for cp in sorted(classpath)] encoded_java_version = repr(java_version).encode() for item in (encoded_jvm_options, encoded_classpath, encoded_java_version): digest.update(str(item).encode()) return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args): 'Runner factory.\n\n Called via Executor.execute().\n ' command = self._create_command(classpath, main, jvm_options, args) class Runner(self.Runner): @property def executor(this): return self @property def command(self): return list(command) def run(this, stdout=None, stderr=None, stdin=None, cwd=None): nailgun = None try: nailgun = self._get_nailgun_client(jvm_options, classpath, stdout, stderr, stdin) logger.debug('Executing via {ng_desc}: {cmd}'.format(ng_desc=nailgun, cmd=this.cmd)) return nailgun.execute(main, cwd, *args) except (NailgunClient.NailgunError, self.InitialNailgunConnectTimedOut) as e: self.terminate() raise self.Error('Problem launching via {ng_desc} command {main} {args}: {msg}'.format(ng_desc=(nailgun or '<no nailgun connection>'), main=main, args=' '.join(args), msg=e)) return Runner()
964,269,671,404,280,400
Runner factory. Called via Executor.execute().
src/python/pants/java/nailgun_executor.py
_runner
revl/pants
python
def _runner(self, classpath, main, jvm_options, args): 'Runner factory.\n\n Called via Executor.execute().\n ' command = self._create_command(classpath, main, jvm_options, args) class Runner(self.Runner): @property def executor(this): return self @property def command(self): return list(command) def run(this, stdout=None, stderr=None, stdin=None, cwd=None): nailgun = None try: nailgun = self._get_nailgun_client(jvm_options, classpath, stdout, stderr, stdin) logger.debug('Executing via {ng_desc}: {cmd}'.format(ng_desc=nailgun, cmd=this.cmd)) return nailgun.execute(main, cwd, *args) except (NailgunClient.NailgunError, self.InitialNailgunConnectTimedOut) as e: self.terminate() raise self.Error('Problem launching via {ng_desc} command {main} {args}: {msg}'.format(ng_desc=(nailgun or '<no nailgun connection>'), main=main, args=' '.join(args), msg=e)) return Runner()
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin): 'This (somewhat unfortunately) is the main entrypoint to this class via the Runner.\n\n It handles creation of the running nailgun server as well as creation of the client.\n ' classpath = (self._nailgun_classpath + classpath) new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version) with self._NAILGUN_SPAWN_LOCK: (running, updated) = self._check_nailgun_state(new_fingerprint) if (running and updated): logger.debug('Found running nailgun server that needs updating, killing {server}'.format(server=self._identity)) self.terminate() if ((not running) or (running and updated)): return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin) return self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
5,750,670,072,620,023,000
This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles creation of the running nailgun server as well as creation of the client.
src/python/pants/java/nailgun_executor.py
_get_nailgun_client
revl/pants
python
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin): 'This (somewhat unfortunately) is the main entrypoint to this class via the Runner.\n\n It handles creation of the running nailgun server as well as creation of the client.\n ' classpath = (self._nailgun_classpath + classpath) new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version) with self._NAILGUN_SPAWN_LOCK: (running, updated) = self._check_nailgun_state(new_fingerprint) if (running and updated): logger.debug('Found running nailgun server that needs updating, killing {server}'.format(server=self._identity)) self.terminate() if ((not running) or (running and updated)): return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin) return self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
def _await_socket(self, timeout): 'Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun\n stdout.' start_time = time.time() accumulated_stdout = '' def calculate_remaining_time(): return (time.time() - (start_time + timeout)) def possibly_raise_timeout(remaining_time): if (remaining_time > 0): stderr = read_file(self._ng_stderr, binary_mode=True) raise self.InitialNailgunConnectTimedOut(timeout=timeout, stdout=accumulated_stdout, stderr=stderr) with selectors.PollSelector() as selector, safe_open(self._ng_stdout, 'r') as ng_stdout: selector.register(ng_stdout, selectors.EVENT_READ) while 1: remaining_time = calculate_remaining_time() possibly_raise_timeout(remaining_time) events = selector.select(timeout=((- 1) * remaining_time)) if events: line = ng_stdout.readline() try: return self._NG_PORT_REGEX.match(line).group(1) except AttributeError: pass accumulated_stdout += line
-7,452,305,804,962,434,000
Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout.
src/python/pants/java/nailgun_executor.py
_await_socket
revl/pants
python
def _await_socket(self, timeout): 'Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun\n stdout.' start_time = time.time() accumulated_stdout = def calculate_remaining_time(): return (time.time() - (start_time + timeout)) def possibly_raise_timeout(remaining_time): if (remaining_time > 0): stderr = read_file(self._ng_stderr, binary_mode=True) raise self.InitialNailgunConnectTimedOut(timeout=timeout, stdout=accumulated_stdout, stderr=stderr) with selectors.PollSelector() as selector, safe_open(self._ng_stdout, 'r') as ng_stdout: selector.register(ng_stdout, selectors.EVENT_READ) while 1: remaining_time = calculate_remaining_time() possibly_raise_timeout(remaining_time) events = selector.select(timeout=((- 1) * remaining_time)) if events: line = ng_stdout.readline() try: return self._NG_PORT_REGEX.match(line).group(1) except AttributeError: pass accumulated_stdout += line
def ensure_connectable(self, nailgun): 'Ensures that a nailgun client is connectable or raises NailgunError.' attempt_count = 1 while 1: try: with closing(nailgun.try_connect()) as sock: logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername())) return except nailgun.NailgunConnectionError: if (attempt_count >= self._connect_attempts): logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts)) raise attempt_count += 1 time.sleep(self.WAIT_INTERVAL_SEC)
-8,188,085,437,961,309,000
Ensures that a nailgun client is connectable or raises NailgunError.
src/python/pants/java/nailgun_executor.py
ensure_connectable
revl/pants
python
def ensure_connectable(self, nailgun): attempt_count = 1 while 1: try: with closing(nailgun.try_connect()) as sock: logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername())) return except nailgun.NailgunConnectionError: if (attempt_count >= self._connect_attempts): logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts)) raise attempt_count += 1 time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin): 'Synchronously spawn a new nailgun server.' safe_file_dump(self._ng_stdout, b'', mode='wb') safe_file_dump(self._ng_stderr, b'', mode='wb') jvm_options = (jvm_options + [self._PANTS_NG_BUILDROOT_ARG, self._create_owner_arg(self._workdir), self._create_fingerprint_arg(fingerprint)]) post_fork_child_opts = dict(fingerprint=fingerprint, jvm_options=jvm_options, classpath=classpath, stdout=stdout, stderr=stderr) logger.debug('Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}'.format(i=self._identity, f=fingerprint, j=jvm_options, cp=classpath)) self.daemon_spawn(post_fork_child_opts=post_fork_child_opts) self.await_pid(self._startup_timeout) self.write_socket(self._await_socket(self._connect_timeout)) logger.debug('Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}'.format(i=self._identity, f=fingerprint, pid=self.pid, port=self.socket)) client = self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin) self.ensure_connectable(client) return client
-9,036,215,056,650,780,000
Synchronously spawn a new nailgun server.
src/python/pants/java/nailgun_executor.py
_spawn_nailgun_server
revl/pants
python
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin): safe_file_dump(self._ng_stdout, b, mode='wb') safe_file_dump(self._ng_stderr, b, mode='wb') jvm_options = (jvm_options + [self._PANTS_NG_BUILDROOT_ARG, self._create_owner_arg(self._workdir), self._create_fingerprint_arg(fingerprint)]) post_fork_child_opts = dict(fingerprint=fingerprint, jvm_options=jvm_options, classpath=classpath, stdout=stdout, stderr=stderr) logger.debug('Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}'.format(i=self._identity, f=fingerprint, j=jvm_options, cp=classpath)) self.daemon_spawn(post_fork_child_opts=post_fork_child_opts) self.await_pid(self._startup_timeout) self.write_socket(self._await_socket(self._connect_timeout)) logger.debug('Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}'.format(i=self._identity, f=fingerprint, pid=self.pid, port=self.socket)) client = self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin) self.ensure_connectable(client) return client
def _check_process_buildroot(self, process): 'Matches only processes started from the current buildroot.' return (self._PANTS_NG_BUILDROOT_ARG in process.cmdline())
4,314,080,186,965,596,700
Matches only processes started from the current buildroot.
src/python/pants/java/nailgun_executor.py
_check_process_buildroot
revl/pants
python
def _check_process_buildroot(self, process): return (self._PANTS_NG_BUILDROOT_ARG in process.cmdline())
def is_alive(self): 'A ProcessManager.is_alive() override that ensures buildroot flags are present in the\n process command line arguments.' return super().is_alive(self._check_process_buildroot)
-4,234,401,703,301,696,500
A ProcessManager.is_alive() override that ensures buildroot flags are present in the process command line arguments.
src/python/pants/java/nailgun_executor.py
is_alive
revl/pants
python
def is_alive(self): 'A ProcessManager.is_alive() override that ensures buildroot flags are present in the\n process command line arguments.' return super().is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr): 'Post-fork() child callback for ProcessManager.daemon_spawn().' java = SubprocessExecutor(self._distribution) subproc = java.spawn(classpath=classpath, main='com.martiansoftware.nailgun.NGServer', jvm_options=jvm_options, args=[':0'], stdin=safe_open('/dev/null', 'r'), stdout=safe_open(self._ng_stdout, 'w'), stderr=safe_open(self._ng_stderr, 'w'), close_fds=True) self.write_pid(subproc.pid)
-1,710,778,269,961,609,500
Post-fork() child callback for ProcessManager.daemon_spawn().
src/python/pants/java/nailgun_executor.py
post_fork_child
revl/pants
python
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr): java = SubprocessExecutor(self._distribution) subproc = java.spawn(classpath=classpath, main='com.martiansoftware.nailgun.NGServer', jvm_options=jvm_options, args=[':0'], stdin=safe_open('/dev/null', 'r'), stdout=safe_open(self._ng_stdout, 'w'), stderr=safe_open(self._ng_stderr, 'w'), close_fds=True) self.write_pid(subproc.pid)
def __init__(self, io: StratumStyle, config: configparser.ConfigParser): '\n Object constructor.\n\n :param PyStratumStyle io: The output decorator.\n ' self._code: str = '' '\n The generated Python code buffer.\n ' self._lob_as_string_flag: bool = False '\n If true BLOBs and CLOBs must be treated as strings.\n ' self._metadata_filename: Optional[str] = None '\n The filename of the file with the metadata of all stored procedures.\n ' self._parent_class_name: Optional[str] = None '\n The class name of the parent class of the routine wrapper.\n ' self._parent_class_namespace: Optional[str] = None '\n The namespace of the parent class of the routine wrapper.\n ' self._wrapper_class_name: Optional[str] = None '\n The class name of the routine wrapper.\n ' self._wrapper_filename: Optional[str] = None '\n The filename where the generated wrapper class must be stored.\n ' self._io: StratumStyle = io '\n The output decorator.\n ' self._config = config '\n The configuration object.\n\n :type: ConfigParser \n '
-2,458,651,980,066,538,000
Object constructor. :param PyStratumStyle io: The output decorator.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
__init__
DatabaseStratum/py-stratum-common
python
def __init__(self, io: StratumStyle, config: configparser.ConfigParser): '\n Object constructor.\n\n :param PyStratumStyle io: The output decorator.\n ' self._code: str = '\n The generated Python code buffer.\n ' self._lob_as_string_flag: bool = False '\n If true BLOBs and CLOBs must be treated as strings.\n ' self._metadata_filename: Optional[str] = None '\n The filename of the file with the metadata of all stored procedures.\n ' self._parent_class_name: Optional[str] = None '\n The class name of the parent class of the routine wrapper.\n ' self._parent_class_namespace: Optional[str] = None '\n The namespace of the parent class of the routine wrapper.\n ' self._wrapper_class_name: Optional[str] = None '\n The class name of the routine wrapper.\n ' self._wrapper_filename: Optional[str] = None '\n The filename where the generated wrapper class must be stored.\n ' self._io: StratumStyle = io '\n The output decorator.\n ' self._config = config '\n The configuration object.\n\n :type: ConfigParser \n '
def execute(self) -> int: '\n The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred.\n\n :rtype: int\n ' self._read_configuration_file() if self._wrapper_class_name: self._io.title('Wrapper') self.__generate_wrapper_class() self._io.writeln('') else: self._io.log_verbose('Wrapper not enabled') return 0
-8,795,018,086,121,583,000
The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred. :rtype: int
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
execute
DatabaseStratum/py-stratum-common
python
def execute(self) -> int: '\n The "main" of the wrapper generator. Returns 0 on success, 1 if one or more errors occurred.\n\n :rtype: int\n ' self._read_configuration_file() if self._wrapper_class_name: self._io.title('Wrapper') self.__generate_wrapper_class() self._io.writeln() else: self._io.log_verbose('Wrapper not enabled') return 0
def __generate_wrapper_class(self) -> None: '\n Generates the wrapper class.\n ' routines = self._read_routine_metadata() self._write_class_header() if routines: for routine_name in sorted(routines): if (routines[routine_name]['designation'] != 'hidden'): self._write_routine_function(routines[routine_name]) else: self._io.error('No files with stored routines found') self._write_class_trailer() Util.write_two_phases(self._wrapper_filename, self._code, self._io)
4,423,072,790,207,266,300
Generates the wrapper class.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
__generate_wrapper_class
DatabaseStratum/py-stratum-common
python
def __generate_wrapper_class(self) -> None: '\n \n ' routines = self._read_routine_metadata() self._write_class_header() if routines: for routine_name in sorted(routines): if (routines[routine_name]['designation'] != 'hidden'): self._write_routine_function(routines[routine_name]) else: self._io.error('No files with stored routines found') self._write_class_trailer() Util.write_two_phases(self._wrapper_filename, self._code, self._io)
def _read_configuration_file(self) -> None: '\n Reads parameters from the configuration file.\n ' self._parent_class_name = self._config.get('wrapper', 'parent_class') self._parent_class_namespace = self._config.get('wrapper', 'parent_class_namespace') self._wrapper_class_name = self._config.get('wrapper', 'wrapper_class') self._wrapper_filename = self._config.get('wrapper', 'wrapper_file') self._metadata_filename = self._config.get('wrapper', 'metadata') self._lob_as_string_flag = bool(self._config.get('wrapper', 'lob_as_string'))
8,673,982,918,055,212,000
Reads parameters from the configuration file.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
_read_configuration_file
DatabaseStratum/py-stratum-common
python
def _read_configuration_file(self) -> None: '\n \n ' self._parent_class_name = self._config.get('wrapper', 'parent_class') self._parent_class_namespace = self._config.get('wrapper', 'parent_class_namespace') self._wrapper_class_name = self._config.get('wrapper', 'wrapper_class') self._wrapper_filename = self._config.get('wrapper', 'wrapper_file') self._metadata_filename = self._config.get('wrapper', 'metadata') self._lob_as_string_flag = bool(self._config.get('wrapper', 'lob_as_string'))
def _read_routine_metadata(self) -> Dict: '\n Returns the metadata of stored routines.\n\n :rtype: dict\n ' metadata = {} if os.path.isfile(self._metadata_filename): with open(self._metadata_filename, 'r') as file: metadata = json.load(file) return metadata
8,979,833,419,646,104,000
Returns the metadata of stored routines. :rtype: dict
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
_read_routine_metadata
DatabaseStratum/py-stratum-common
python
def _read_routine_metadata(self) -> Dict: '\n Returns the metadata of stored routines.\n\n :rtype: dict\n ' metadata = {} if os.path.isfile(self._metadata_filename): with open(self._metadata_filename, 'r') as file: metadata = json.load(file) return metadata
def _write_class_header(self) -> None: '\n Generate a class header for stored routine wrapper.\n ' self._write_line('from typing import Any, Dict, List, Optional, Union') self._write_line() self._write_line('from {0!s} import {1!s}'.format(self._parent_class_namespace, self._parent_class_name)) self._write_line() self._write_line() self._write_line('class {0!s}({1!s}):'.format(self._wrapper_class_name, self._parent_class_name)) self._write_line(' """') self._write_line(' The stored routines wrappers.') self._write_line(' """')
1,402,745,181,515,204,400
Generate a class header for stored routine wrapper.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
_write_class_header
DatabaseStratum/py-stratum-common
python
def _write_class_header(self) -> None: '\n \n ' self._write_line('from typing import Any, Dict, List, Optional, Union') self._write_line() self._write_line('from {0!s} import {1!s}'.format(self._parent_class_namespace, self._parent_class_name)) self._write_line() self._write_line() self._write_line('class {0!s}({1!s}):'.format(self._wrapper_class_name, self._parent_class_name)) self._write_line(' "') self._write_line(' The stored routines wrappers.') self._write_line(' "')
def _write_line(self, text: str='') -> None: '\n Writes a line with Python code to the generate code buffer.\n\n :param str text: The line with Python code.\n ' if text: self._code += (str(text) + '\n') else: self._code += '\n'
5,762,203,659,539,912,000
Writes a line with Python code to the generate code buffer. :param str text: The line with Python code.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
_write_line
DatabaseStratum/py-stratum-common
python
def _write_line(self, text: str=) -> None: '\n Writes a line with Python code to the generate code buffer.\n\n :param str text: The line with Python code.\n ' if text: self._code += (str(text) + '\n') else: self._code += '\n'
def _write_class_trailer(self) -> None: '\n Generate a class trailer for stored routine wrapper.\n ' self._write_line() self._write_line() self._write_line(('# ' + ('-' * 118)))
1,877,206,851,702,984,400
Generate a class trailer for stored routine wrapper.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
_write_class_trailer
DatabaseStratum/py-stratum-common
python
def _write_class_trailer(self) -> None: '\n \n ' self._write_line() self._write_line() self._write_line(('# ' + ('-' * 118)))
@abc.abstractmethod def _write_routine_function(self, routine: Dict[(str, Any)]) -> None: '\n Generates a complete wrapper method for a stored routine.\n\n :param dict routine: The metadata of the stored routine.\n ' raise NotImplementedError()
-2,223,047,177,619,755,500
Generates a complete wrapper method for a stored routine. :param dict routine: The metadata of the stored routine.
pystratum_common/backend/CommonRoutineWrapperGeneratorWorker.py
_write_routine_function
DatabaseStratum/py-stratum-common
python
@abc.abstractmethod def _write_routine_function(self, routine: Dict[(str, Any)]) -> None: '\n Generates a complete wrapper method for a stored routine.\n\n :param dict routine: The metadata of the stored routine.\n ' raise NotImplementedError()
def get_parser(): 'Parser to specify arguments and their defaults.' parser = argparse.ArgumentParser(prog='neuropredict', formatter_class=argparse.RawTextHelpFormatter, description='Easy, standardized and comprehensive predictive analysis.') help_text_fs_dir = textwrap.dedent('\n Absolute path to ``SUBJECTS_DIR`` containing the finished runs of Freesurfer parcellation\n Each subject will be queried after its ID in the metadata file.\n\n E.g. ``--fs_subject_dir /project/freesurfer_v5.3``\n \n \n ') help_text_user_defined_folder = textwrap.dedent("\n List of absolute paths to user's own features.\n\n Format: Each of these folders contains a separate folder for each subject (named after its ID in the metadata file)\n containing a file called features.txt with one number per line.\n All the subjects (in a given folder) must have the number of features (#lines in file).\n Different parent folders (describing one feature set) can have different number of features for each subject,\n but they must all have the same number of subjects (folders) within them.\n\n Names of each folder is used to annotate the results in visualizations.\n Hence name them uniquely and meaningfully, keeping in mind these figures will be included in your papers.\n For example,\n\n .. parsed-literal::\n\n --user_feature_paths /project/fmri/ /project/dti/ /project/t1_volumes/\n\n Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.\n \n \n ") help_text_pyradigm_paths = textwrap.dedent('\n Path(s) to pyradigm datasets.\n\n Each path is self-contained dataset identifying each sample, its class and features.\n \n \n ') help_text_data_matrix = textwrap.dedent("\n List of absolute paths to text files containing one matrix of size N x p (num_samples x num_features).\n\n Each row in the data matrix file must represent data corresponding to sample in the same row\n of the meta data file (meta data file and data matrix must be in row-wise correspondence).\n\n Name of this file will be used to annotate the results and visualizations.\n\n E.g. ``--data_matrix_paths /project/fmri.csv /project/dti.csv /project/t1_volumes.csv ``\n\n Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.\n \n File format could be\n - a simple comma-separated text file (with extension .csv or .txt): which can easily be read back with\n numpy.loadtxt(filepath, delimiter=',')\n or\n - a numpy array saved to disk (with extension .npy or .numpy) that can read in with numpy.load(filepath).\n\n One could use ``numpy.savetxt(data_array, delimiter=',')`` or ``numpy.save(data_array)`` to save features.\n\n File format is inferred from its extension.\n \n \n ") help_text_arff_paths = textwrap.dedent("\n List of paths to files saved in Weka's ARFF dataset format.\n \n Note: \n - this format does NOT allow IDs for each subject.\n - given feature values are saved in text format, this can lead to large files with high-dimensional data, \n compared to numpy arrays saved to disk in binary format.\n \n More info: https://www.cs.waikato.ac.nz/ml/weka/arff.html\n \n \n ") help_text_positive_class = textwrap.dedent('\n Name of the positive class (e.g. Alzheimers, MCI etc) to be used in calculation of area under the ROC curve.\n Applicable only for binary classification experiments.\n\n Default: class appearing last in order specified in metadata file.\n \n \n ') help_text_train_perc = textwrap.dedent('\n Percentage of the smallest class to be reserved for training.\n\n Must be in the interval [0.01 0.99].\n\n If sample size is sufficiently big, we recommend 0.5.\n If sample size is small, or class imbalance is high, choose 0.8.\n \n \n ') help_text_num_rep_cv = textwrap.dedent('\n Number of repetitions of the repeated-holdout cross-validation.\n\n The larger the number, more stable the estimates will be.\n \n \n ') help_text_sub_groups = textwrap.dedent("\n This option allows the user to study different combinations of classes in a multi-class (N>2) dataset.\n\n For example, in a dataset with 3 classes CN, FTD and AD,\n two studies of pair-wise combinations can be studied separately\n with the following flag ``--sub_groups CN,FTD CN,AD``.\n This allows the user to focus on few interesting subgroups depending on their dataset/goal.\n\n Format: Different subgroups must be separated by space,\n and each sub-group must be a comma-separated list of class names defined in the meta data file.\n Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters,\n and ideally just alphanumeric characters separated by underscores.\n\n Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.\n\n Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.\n \n \n ") help_text_metadata_file = textwrap.dedent('\n Abs path to file containing metadata for subjects to be included for analysis.\n\n At the minimum, each subject should have an id per row followed by the class it belongs to.\n\n E.g.\n .. parsed-literal::\n\n sub001,control\n sub002,control\n sub003,disease\n sub004,disease\n\n \n \n ') help_text_feature_selection = textwrap.dedent("Number of features to select as part of feature selection.\n Options:\n\n - 'tenth'\n - 'sqrt'\n - 'log2'\n - 'all'\n\n Default: 'tenth' of the number of samples in the training set.\n\n For example, if your dataset has 90 samples, you chose 50 percent for training (default),\n then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining.\n If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.\n \n \n ") help_text_gs_level = textwrap.dedent('\n Flag to specify the level of grid search during hyper-parameter optimization on the training set.\n Allowed options are : \'none\', \'light\' and \'exhaustive\', in the order of how many values/values will be optimized. \n \n More parameters and more values demand more resources and much longer time for optimization.\n \n The \'light\' option tries to "folk wisdom" to try least number of values (no more than one or two),\n for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization). \n The \'light\' will be the fastest and should give a "rough idea" of predictive performance. \n The \'exhaustive\' option will try to most parameter values for the most parameters that can be optimized.\n ') help_text_make_vis = textwrap.dedent('\n Option to make visualizations from existing results in the given path. \n This is helpful when neuropredict failed to generate result figures automatically \n e.g. on a HPC cluster, or another environment when DISPLAY is either not available.\n \n ') help_text_atlas = textwrap.dedent('\n Name of the atlas to use for visualization. Default: fsaverage, if available.\n \n \n ') help_text_num_cpus = textwrap.dedent('\n Number of CPUs to use to parallelize CV repetitions.\n\n Default : 4.\n\n Number of CPUs will be capped at the number available on the machine if higher is requested.\n \n \n ') help_text_out_dir = textwrap.dedent('\n Output folder to store gathered features & results.\n \n \n ') help_classifier = textwrap.dedent("\n \n String specifying one of the implemented classifiers. \n (Classifiers are carefully chosen to allow for the comprehensive report provided by neuropredict).\n \n Default: 'RandomForestClassifier'\n \n ") help_feat_select_method = textwrap.dedent("\n Feature selection method to apply prior to training the classifier.\n \n Default: 'VarianceThreshold', removing features with 0.001 percent of lowest variance (zeros etc).\n \n ") parser.add_argument('-m', '--meta_file', action='store', dest='meta_file', default=None, required=False, help=help_text_metadata_file) parser.add_argument('-o', '--out_dir', action='store', dest='out_dir', required=False, help=help_text_out_dir, default=None) parser.add_argument('-f', '--fs_subject_dir', action='store', dest='fs_subject_dir', default=None, help=help_text_fs_dir) user_defined = parser.add_argument_group(title='Input data and formats', description='Only one of the following types can be specified.') user_defined.add_argument('-y', '--pyradigm_paths', action='store', dest='pyradigm_paths', nargs='+', default=None, help=help_text_pyradigm_paths) user_defined.add_argument('-u', '--user_feature_paths', action='store', dest='user_feature_paths', nargs='+', default=None, help=help_text_user_defined_folder) user_defined.add_argument('-d', '--data_matrix_paths', action='store', dest='data_matrix_paths', nargs='+', default=None, help=help_text_data_matrix) user_defined.add_argument('-a', '--arff_paths', action='store', dest='arff_paths', nargs='+', default=None, help=help_text_arff_paths) cv_args_group = parser.add_argument_group(title='Cross-validation', description='Parameters related to training and optimization during cross-validation') cv_args_group.add_argument('-p', '--positive_class', action='store', dest='positive_class', default=None, help=help_text_positive_class) cv_args_group.add_argument('-t', '--train_perc', action='store', dest='train_perc', default=cfg.default_train_perc, help=help_text_train_perc) cv_args_group.add_argument('-n', '--num_rep_cv', action='store', dest='num_rep_cv', default=cfg.default_num_repetitions, help=help_text_num_rep_cv) cv_args_group.add_argument('-k', '--num_features_to_select', dest='num_features_to_select', action='store', default=cfg.default_num_features_to_select, help=help_text_feature_selection) cv_args_group.add_argument('-sg', '--sub_groups', action='store', dest='sub_groups', nargs='*', default='all', help=help_text_sub_groups) cv_args_group.add_argument('-g', '--gs_level', action='store', dest='gs_level', default='light', help=help_text_gs_level, choices=cfg.GRIDSEARCH_LEVELS) pipeline_group = parser.add_argument_group(title='Predictive Model', description='Parameters related to pipeline comprising the predictive model') pipeline_group.add_argument('-fs', '--feat_select_method', action='store', dest='feat_select_method', default=cfg.default_feat_select_method, help=help_feat_select_method, choices=cfg.feature_selection_choices) pipeline_group.add_argument('-e', '--classifier', action='store', dest='classifier', default=cfg.default_classifier, help=help_classifier, choices=cfg.classifier_choices) vis_args = parser.add_argument_group(title='Visualization', description='Parameters related to generating visualizations') vis_args.add_argument('-z', '--make_vis', action='store', dest='make_vis', default=None, help=help_text_make_vis) comp_args = parser.add_argument_group(title='Computing', description='Parameters related to computations/debugging') comp_args.add_argument('-c', '--num_procs', action='store', dest='num_procs', default=cfg.DEFAULT_NUM_PROCS, help=help_text_num_cpus) comp_args.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=__version__)) return parser
-1,475,722,670,201,997,800
Parser to specify arguments and their defaults.
neuropredict/run_workflow.py
get_parser
dinga92/neuropredict
python
def get_parser(): parser = argparse.ArgumentParser(prog='neuropredict', formatter_class=argparse.RawTextHelpFormatter, description='Easy, standardized and comprehensive predictive analysis.') help_text_fs_dir = textwrap.dedent('\n Absolute path to ``SUBJECTS_DIR`` containing the finished runs of Freesurfer parcellation\n Each subject will be queried after its ID in the metadata file.\n\n E.g. ``--fs_subject_dir /project/freesurfer_v5.3``\n \n \n ') help_text_user_defined_folder = textwrap.dedent("\n List of absolute paths to user's own features.\n\n Format: Each of these folders contains a separate folder for each subject (named after its ID in the metadata file)\n containing a file called features.txt with one number per line.\n All the subjects (in a given folder) must have the number of features (#lines in file).\n Different parent folders (describing one feature set) can have different number of features for each subject,\n but they must all have the same number of subjects (folders) within them.\n\n Names of each folder is used to annotate the results in visualizations.\n Hence name them uniquely and meaningfully, keeping in mind these figures will be included in your papers.\n For example,\n\n .. parsed-literal::\n\n --user_feature_paths /project/fmri/ /project/dti/ /project/t1_volumes/\n\n Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.\n \n \n ") help_text_pyradigm_paths = textwrap.dedent('\n Path(s) to pyradigm datasets.\n\n Each path is self-contained dataset identifying each sample, its class and features.\n \n \n ') help_text_data_matrix = textwrap.dedent("\n List of absolute paths to text files containing one matrix of size N x p (num_samples x num_features).\n\n Each row in the data matrix file must represent data corresponding to sample in the same row\n of the meta data file (meta data file and data matrix must be in row-wise correspondence).\n\n Name of this file will be used to annotate the results and visualizations.\n\n E.g. ``--data_matrix_paths /project/fmri.csv /project/dti.csv /project/t1_volumes.csv ``\n\n Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.\n \n File format could be\n - a simple comma-separated text file (with extension .csv or .txt): which can easily be read back with\n numpy.loadtxt(filepath, delimiter=',')\n or\n - a numpy array saved to disk (with extension .npy or .numpy) that can read in with numpy.load(filepath).\n\n One could use ``numpy.savetxt(data_array, delimiter=',')`` or ``numpy.save(data_array)`` to save features.\n\n File format is inferred from its extension.\n \n \n ") help_text_arff_paths = textwrap.dedent("\n List of paths to files saved in Weka's ARFF dataset format.\n \n Note: \n - this format does NOT allow IDs for each subject.\n - given feature values are saved in text format, this can lead to large files with high-dimensional data, \n compared to numpy arrays saved to disk in binary format.\n \n More info: https://www.cs.waikato.ac.nz/ml/weka/arff.html\n \n \n ") help_text_positive_class = textwrap.dedent('\n Name of the positive class (e.g. Alzheimers, MCI etc) to be used in calculation of area under the ROC curve.\n Applicable only for binary classification experiments.\n\n Default: class appearing last in order specified in metadata file.\n \n \n ') help_text_train_perc = textwrap.dedent('\n Percentage of the smallest class to be reserved for training.\n\n Must be in the interval [0.01 0.99].\n\n If sample size is sufficiently big, we recommend 0.5.\n If sample size is small, or class imbalance is high, choose 0.8.\n \n \n ') help_text_num_rep_cv = textwrap.dedent('\n Number of repetitions of the repeated-holdout cross-validation.\n\n The larger the number, more stable the estimates will be.\n \n \n ') help_text_sub_groups = textwrap.dedent("\n This option allows the user to study different combinations of classes in a multi-class (N>2) dataset.\n\n For example, in a dataset with 3 classes CN, FTD and AD,\n two studies of pair-wise combinations can be studied separately\n with the following flag ``--sub_groups CN,FTD CN,AD``.\n This allows the user to focus on few interesting subgroups depending on their dataset/goal.\n\n Format: Different subgroups must be separated by space,\n and each sub-group must be a comma-separated list of class names defined in the meta data file.\n Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters,\n and ideally just alphanumeric characters separated by underscores.\n\n Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.\n\n Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.\n \n \n ") help_text_metadata_file = textwrap.dedent('\n Abs path to file containing metadata for subjects to be included for analysis.\n\n At the minimum, each subject should have an id per row followed by the class it belongs to.\n\n E.g.\n .. parsed-literal::\n\n sub001,control\n sub002,control\n sub003,disease\n sub004,disease\n\n \n \n ') help_text_feature_selection = textwrap.dedent("Number of features to select as part of feature selection.\n Options:\n\n - 'tenth'\n - 'sqrt'\n - 'log2'\n - 'all'\n\n Default: 'tenth' of the number of samples in the training set.\n\n For example, if your dataset has 90 samples, you chose 50 percent for training (default),\n then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining.\n If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.\n \n \n ") help_text_gs_level = textwrap.dedent('\n Flag to specify the level of grid search during hyper-parameter optimization on the training set.\n Allowed options are : \'none\', \'light\' and \'exhaustive\', in the order of how many values/values will be optimized. \n \n More parameters and more values demand more resources and much longer time for optimization.\n \n The \'light\' option tries to "folk wisdom" to try least number of values (no more than one or two),\n for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization). \n The \'light\' will be the fastest and should give a "rough idea" of predictive performance. \n The \'exhaustive\' option will try to most parameter values for the most parameters that can be optimized.\n ') help_text_make_vis = textwrap.dedent('\n Option to make visualizations from existing results in the given path. \n This is helpful when neuropredict failed to generate result figures automatically \n e.g. on a HPC cluster, or another environment when DISPLAY is either not available.\n \n ') help_text_atlas = textwrap.dedent('\n Name of the atlas to use for visualization. Default: fsaverage, if available.\n \n \n ') help_text_num_cpus = textwrap.dedent('\n Number of CPUs to use to parallelize CV repetitions.\n\n Default : 4.\n\n Number of CPUs will be capped at the number available on the machine if higher is requested.\n \n \n ') help_text_out_dir = textwrap.dedent('\n Output folder to store gathered features & results.\n \n \n ') help_classifier = textwrap.dedent("\n \n String specifying one of the implemented classifiers. \n (Classifiers are carefully chosen to allow for the comprehensive report provided by neuropredict).\n \n Default: 'RandomForestClassifier'\n \n ") help_feat_select_method = textwrap.dedent("\n Feature selection method to apply prior to training the classifier.\n \n Default: 'VarianceThreshold', removing features with 0.001 percent of lowest variance (zeros etc).\n \n ") parser.add_argument('-m', '--meta_file', action='store', dest='meta_file', default=None, required=False, help=help_text_metadata_file) parser.add_argument('-o', '--out_dir', action='store', dest='out_dir', required=False, help=help_text_out_dir, default=None) parser.add_argument('-f', '--fs_subject_dir', action='store', dest='fs_subject_dir', default=None, help=help_text_fs_dir) user_defined = parser.add_argument_group(title='Input data and formats', description='Only one of the following types can be specified.') user_defined.add_argument('-y', '--pyradigm_paths', action='store', dest='pyradigm_paths', nargs='+', default=None, help=help_text_pyradigm_paths) user_defined.add_argument('-u', '--user_feature_paths', action='store', dest='user_feature_paths', nargs='+', default=None, help=help_text_user_defined_folder) user_defined.add_argument('-d', '--data_matrix_paths', action='store', dest='data_matrix_paths', nargs='+', default=None, help=help_text_data_matrix) user_defined.add_argument('-a', '--arff_paths', action='store', dest='arff_paths', nargs='+', default=None, help=help_text_arff_paths) cv_args_group = parser.add_argument_group(title='Cross-validation', description='Parameters related to training and optimization during cross-validation') cv_args_group.add_argument('-p', '--positive_class', action='store', dest='positive_class', default=None, help=help_text_positive_class) cv_args_group.add_argument('-t', '--train_perc', action='store', dest='train_perc', default=cfg.default_train_perc, help=help_text_train_perc) cv_args_group.add_argument('-n', '--num_rep_cv', action='store', dest='num_rep_cv', default=cfg.default_num_repetitions, help=help_text_num_rep_cv) cv_args_group.add_argument('-k', '--num_features_to_select', dest='num_features_to_select', action='store', default=cfg.default_num_features_to_select, help=help_text_feature_selection) cv_args_group.add_argument('-sg', '--sub_groups', action='store', dest='sub_groups', nargs='*', default='all', help=help_text_sub_groups) cv_args_group.add_argument('-g', '--gs_level', action='store', dest='gs_level', default='light', help=help_text_gs_level, choices=cfg.GRIDSEARCH_LEVELS) pipeline_group = parser.add_argument_group(title='Predictive Model', description='Parameters related to pipeline comprising the predictive model') pipeline_group.add_argument('-fs', '--feat_select_method', action='store', dest='feat_select_method', default=cfg.default_feat_select_method, help=help_feat_select_method, choices=cfg.feature_selection_choices) pipeline_group.add_argument('-e', '--classifier', action='store', dest='classifier', default=cfg.default_classifier, help=help_classifier, choices=cfg.classifier_choices) vis_args = parser.add_argument_group(title='Visualization', description='Parameters related to generating visualizations') vis_args.add_argument('-z', '--make_vis', action='store', dest='make_vis', default=None, help=help_text_make_vis) comp_args = parser.add_argument_group(title='Computing', description='Parameters related to computations/debugging') comp_args.add_argument('-c', '--num_procs', action='store', dest='num_procs', default=cfg.DEFAULT_NUM_PROCS, help=help_text_num_cpus) comp_args.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=__version__)) return parser
def organize_inputs(user_args): '\n Validates the input features specified and returns organized list of paths and readers.\n\n Parameters\n ----------\n user_args : ArgParse object\n Various options specified by the user.\n\n Returns\n -------\n user_feature_paths : list\n List of paths to specified input features\n user_feature_type : str\n String identifying the type of user-defined input\n fs_subject_dir : str\n Path to freesurfer subject directory, if supplied.\n\n ' atleast_one_feature_specified = False meta_data_supplied = False meta_data_format = None if not_unspecified(user_args.fs_subject_dir): fs_subject_dir = abspath(user_args.fs_subject_dir) if (not pexists(fs_subject_dir)): raise IOError("Given Freesurfer directory doesn't exist.") atleast_one_feature_specified = True else: fs_subject_dir = None mutually_excl_formats = ['user_feature_paths', 'data_matrix_paths', 'pyradigm_paths', 'arff_paths'] not_none_count = 0 for format in mutually_excl_formats: if not_unspecified(getattr(user_args, format)): not_none_count = (not_none_count + 1) if (not_none_count > 1): raise ValueError('Only one of the following formats can be specified:\n{}'.format(mutually_excl_formats)) if not_unspecified(user_args.user_feature_paths): user_feature_paths = check_paths(user_args.user_feature_paths, path_type='user defined (dir_of_dirs)') atleast_one_feature_specified = True user_feature_type = 'dir_of_dirs' elif not_unspecified(user_args.data_matrix_paths): user_feature_paths = check_paths(user_args.data_matrix_paths, path_type='data matrix') atleast_one_feature_specified = True user_feature_type = 'data_matrix' elif not_unspecified(user_args.pyradigm_paths): user_feature_paths = check_paths(user_args.pyradigm_paths, path_type='pyradigm') atleast_one_feature_specified = True meta_data_supplied = user_feature_paths[0] meta_data_format = 'pyradigm' user_feature_type = 'pyradigm' elif not_unspecified(user_args.arff_paths): user_feature_paths = check_paths(user_args.arff_paths, path_type='ARFF') atleast_one_feature_specified = True user_feature_type = 'arff' meta_data_supplied = user_feature_paths[0] meta_data_format = 'arff' else: user_feature_paths = None user_feature_type = None if (not isinstance(user_feature_paths, list)): user_feature_paths = list(user_feature_paths) if (not atleast_one_feature_specified): raise ValueError('Atleast one method specifying features must be specified. It can be a path(s) to pyradigm dataset, matrix file, user-defined folder or a Freesurfer subject directory.') return (user_feature_paths, user_feature_type, fs_subject_dir, meta_data_supplied, meta_data_format)
-745,357,083,414,423,400
Validates the input features specified and returns organized list of paths and readers. Parameters ---------- user_args : ArgParse object Various options specified by the user. Returns ------- user_feature_paths : list List of paths to specified input features user_feature_type : str String identifying the type of user-defined input fs_subject_dir : str Path to freesurfer subject directory, if supplied.
neuropredict/run_workflow.py
organize_inputs
dinga92/neuropredict
python
def organize_inputs(user_args): '\n Validates the input features specified and returns organized list of paths and readers.\n\n Parameters\n ----------\n user_args : ArgParse object\n Various options specified by the user.\n\n Returns\n -------\n user_feature_paths : list\n List of paths to specified input features\n user_feature_type : str\n String identifying the type of user-defined input\n fs_subject_dir : str\n Path to freesurfer subject directory, if supplied.\n\n ' atleast_one_feature_specified = False meta_data_supplied = False meta_data_format = None if not_unspecified(user_args.fs_subject_dir): fs_subject_dir = abspath(user_args.fs_subject_dir) if (not pexists(fs_subject_dir)): raise IOError("Given Freesurfer directory doesn't exist.") atleast_one_feature_specified = True else: fs_subject_dir = None mutually_excl_formats = ['user_feature_paths', 'data_matrix_paths', 'pyradigm_paths', 'arff_paths'] not_none_count = 0 for format in mutually_excl_formats: if not_unspecified(getattr(user_args, format)): not_none_count = (not_none_count + 1) if (not_none_count > 1): raise ValueError('Only one of the following formats can be specified:\n{}'.format(mutually_excl_formats)) if not_unspecified(user_args.user_feature_paths): user_feature_paths = check_paths(user_args.user_feature_paths, path_type='user defined (dir_of_dirs)') atleast_one_feature_specified = True user_feature_type = 'dir_of_dirs' elif not_unspecified(user_args.data_matrix_paths): user_feature_paths = check_paths(user_args.data_matrix_paths, path_type='data matrix') atleast_one_feature_specified = True user_feature_type = 'data_matrix' elif not_unspecified(user_args.pyradigm_paths): user_feature_paths = check_paths(user_args.pyradigm_paths, path_type='pyradigm') atleast_one_feature_specified = True meta_data_supplied = user_feature_paths[0] meta_data_format = 'pyradigm' user_feature_type = 'pyradigm' elif not_unspecified(user_args.arff_paths): user_feature_paths = check_paths(user_args.arff_paths, path_type='ARFF') atleast_one_feature_specified = True user_feature_type = 'arff' meta_data_supplied = user_feature_paths[0] meta_data_format = 'arff' else: user_feature_paths = None user_feature_type = None if (not isinstance(user_feature_paths, list)): user_feature_paths = list(user_feature_paths) if (not atleast_one_feature_specified): raise ValueError('Atleast one method specifying features must be specified. It can be a path(s) to pyradigm dataset, matrix file, user-defined folder or a Freesurfer subject directory.') return (user_feature_paths, user_feature_type, fs_subject_dir, meta_data_supplied, meta_data_format)
def parse_args(): 'Parser/validator for the cmd line args.' parser = get_parser() if (len(sys.argv) < 2): print('Too few arguments!') parser.print_help() parser.exit(1) try: user_args = parser.parse_args() except: parser.exit(1) if ((len(sys.argv) == 3) and not_unspecified(user_args.make_vis)): out_dir = realpath(user_args.make_vis) res_path = pjoin(out_dir, cfg.file_name_results) if (pexists(out_dir) and pexists(res_path)): print('\n\nSaving the visualizations to \n{}'.format(out_dir)) make_visualizations(res_path, out_dir) sys.exit(0) else: raise ValueError('Given folder does not exist, or has no results!') (user_feature_paths, user_feature_type, fs_subject_dir, meta_data_path, meta_data_format) = organize_inputs(user_args) if (not meta_data_path): if (user_args.meta_file is not None): meta_file = abspath(user_args.meta_file) if (not pexists(meta_file)): raise IOError("Meta data file doesn't exist.") else: raise ValueError('Metadata file must be provided when not using pyradigm/ARFF inputs.') (sample_ids, classes) = get_metadata(meta_file) else: print('Using meta data from:\n{}'.format(meta_data_path)) (sample_ids, classes) = get_metadata_in_pyradigm(meta_data_path, meta_data_format) if (user_args.out_dir is not None): out_dir = realpath(user_args.out_dir) else: out_dir = pjoin(realpath(os.getcwd()), cfg.output_dir_default) try: os.makedirs(out_dir, exist_ok=True) except: raise IOError('Output folder could not be created.') train_perc = np.float32(user_args.train_perc) if (not (0.01 <= train_perc <= 0.99)): raise ValueError('Training percentage {} out of bounds - must be >= 0.01 and <= 0.99'.format(train_perc)) num_rep_cv = np.int64(user_args.num_rep_cv) if (num_rep_cv < 10): raise ValueError('Atleast 10 repetitions of CV is recommened.') num_procs = check_num_procs(user_args.num_procs) (class_set, subgroups, positive_class) = validate_class_set(classes, user_args.sub_groups, user_args.positive_class) feature_selection_size = validate_feature_selection_size(user_args.num_features_to_select) grid_search_level = user_args.gs_level.lower() if (grid_search_level not in cfg.GRIDSEARCH_LEVELS): raise ValueError('Unrecognized level of grid search. Valid choices: {}'.format(cfg.GRIDSEARCH_LEVELS)) classifier = user_args.classifier.lower() feat_select_method = user_args.feat_select_method.lower() options_to_save = [sample_ids, classes, out_dir, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, subgroups, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method] options_path = save_options(options_to_save, out_dir) return (sample_ids, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, subgroups, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method)
1,028,055,262,831,333,100
Parser/validator for the cmd line args.
neuropredict/run_workflow.py
parse_args
dinga92/neuropredict
python
def parse_args(): parser = get_parser() if (len(sys.argv) < 2): print('Too few arguments!') parser.print_help() parser.exit(1) try: user_args = parser.parse_args() except: parser.exit(1) if ((len(sys.argv) == 3) and not_unspecified(user_args.make_vis)): out_dir = realpath(user_args.make_vis) res_path = pjoin(out_dir, cfg.file_name_results) if (pexists(out_dir) and pexists(res_path)): print('\n\nSaving the visualizations to \n{}'.format(out_dir)) make_visualizations(res_path, out_dir) sys.exit(0) else: raise ValueError('Given folder does not exist, or has no results!') (user_feature_paths, user_feature_type, fs_subject_dir, meta_data_path, meta_data_format) = organize_inputs(user_args) if (not meta_data_path): if (user_args.meta_file is not None): meta_file = abspath(user_args.meta_file) if (not pexists(meta_file)): raise IOError("Meta data file doesn't exist.") else: raise ValueError('Metadata file must be provided when not using pyradigm/ARFF inputs.') (sample_ids, classes) = get_metadata(meta_file) else: print('Using meta data from:\n{}'.format(meta_data_path)) (sample_ids, classes) = get_metadata_in_pyradigm(meta_data_path, meta_data_format) if (user_args.out_dir is not None): out_dir = realpath(user_args.out_dir) else: out_dir = pjoin(realpath(os.getcwd()), cfg.output_dir_default) try: os.makedirs(out_dir, exist_ok=True) except: raise IOError('Output folder could not be created.') train_perc = np.float32(user_args.train_perc) if (not (0.01 <= train_perc <= 0.99)): raise ValueError('Training percentage {} out of bounds - must be >= 0.01 and <= 0.99'.format(train_perc)) num_rep_cv = np.int64(user_args.num_rep_cv) if (num_rep_cv < 10): raise ValueError('Atleast 10 repetitions of CV is recommened.') num_procs = check_num_procs(user_args.num_procs) (class_set, subgroups, positive_class) = validate_class_set(classes, user_args.sub_groups, user_args.positive_class) feature_selection_size = validate_feature_selection_size(user_args.num_features_to_select) grid_search_level = user_args.gs_level.lower() if (grid_search_level not in cfg.GRIDSEARCH_LEVELS): raise ValueError('Unrecognized level of grid search. Valid choices: {}'.format(cfg.GRIDSEARCH_LEVELS)) classifier = user_args.classifier.lower() feat_select_method = user_args.feat_select_method.lower() options_to_save = [sample_ids, classes, out_dir, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, subgroups, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method] options_path = save_options(options_to_save, out_dir) return (sample_ids, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, subgroups, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method)
def make_visualizations(results_file_path, out_dir, options_path=None): '\n Produces the performance visualizations/comparisons from the cross-validation results.\n\n Parameters\n ----------\n results_file_path : str\n Path to file containing results produced by `rhst`\n\n out_dir : str\n Path to a folder to store results.\n\n ' results_dict = rhst.load_results_dict(results_file_path) accuracy_balanced = results_dict['accuracy_balanced'] method_names = results_dict['method_names'] num_classes = results_dict['num_classes'] class_sizes = results_dict['class_sizes'] confusion_matrix = results_dict['confusion_matrix'] class_order = results_dict['class_set'] feature_importances_rf = results_dict['feature_importances_rf'] feature_names = results_dict['feature_names'] num_times_misclfd = results_dict['num_times_misclfd'] num_times_tested = results_dict['num_times_tested'] feature_importances_available = True if (options_path is not None): user_options = load_options(out_dir, options_path) if (user_options['classifier_name'].lower() not in cfg.clfs_with_feature_importance): feature_importances_available = False else: unusable = [np.all(np.isnan(method_fi.flatten())) for method_fi in feature_importances_rf] feature_importances_available = (not np.all(unusable)) try: balacc_fig_path = pjoin(out_dir, 'balanced_accuracy') visualize.metric_distribution(accuracy_balanced, method_names, balacc_fig_path, class_sizes, num_classes, 'Balanced Accuracy') confmat_fig_path = pjoin(out_dir, 'confusion_matrix') visualize.confusion_matrices(confusion_matrix, class_order, method_names, confmat_fig_path) cmp_misclf_fig_path = pjoin(out_dir, 'compare_misclf_rates') if (num_classes > 2): visualize.compare_misclf_pairwise(confusion_matrix, class_order, method_names, cmp_misclf_fig_path) elif (num_classes == 2): visualize.compare_misclf_pairwise_parallel_coord_plot(confusion_matrix, class_order, method_names, cmp_misclf_fig_path) if feature_importances_available: featimp_fig_path = pjoin(out_dir, 'feature_importance') visualize.feature_importance_map(feature_importances_rf, method_names, featimp_fig_path, feature_names) else: print('\nCurrent predictive model does not provide feature importance values. Skipping them.') misclf_out_path = pjoin(out_dir, 'misclassified_subjects') visualize.freq_hist_misclassifications(num_times_misclfd, num_times_tested, method_names, misclf_out_path) except: traceback.print_exc() warnings.warn('Error generating the visualizations! Skipping ..') plt.close('all') return
-877,074,157,645,304,800
Produces the performance visualizations/comparisons from the cross-validation results. Parameters ---------- results_file_path : str Path to file containing results produced by `rhst` out_dir : str Path to a folder to store results.
neuropredict/run_workflow.py
make_visualizations
dinga92/neuropredict
python
def make_visualizations(results_file_path, out_dir, options_path=None): '\n Produces the performance visualizations/comparisons from the cross-validation results.\n\n Parameters\n ----------\n results_file_path : str\n Path to file containing results produced by `rhst`\n\n out_dir : str\n Path to a folder to store results.\n\n ' results_dict = rhst.load_results_dict(results_file_path) accuracy_balanced = results_dict['accuracy_balanced'] method_names = results_dict['method_names'] num_classes = results_dict['num_classes'] class_sizes = results_dict['class_sizes'] confusion_matrix = results_dict['confusion_matrix'] class_order = results_dict['class_set'] feature_importances_rf = results_dict['feature_importances_rf'] feature_names = results_dict['feature_names'] num_times_misclfd = results_dict['num_times_misclfd'] num_times_tested = results_dict['num_times_tested'] feature_importances_available = True if (options_path is not None): user_options = load_options(out_dir, options_path) if (user_options['classifier_name'].lower() not in cfg.clfs_with_feature_importance): feature_importances_available = False else: unusable = [np.all(np.isnan(method_fi.flatten())) for method_fi in feature_importances_rf] feature_importances_available = (not np.all(unusable)) try: balacc_fig_path = pjoin(out_dir, 'balanced_accuracy') visualize.metric_distribution(accuracy_balanced, method_names, balacc_fig_path, class_sizes, num_classes, 'Balanced Accuracy') confmat_fig_path = pjoin(out_dir, 'confusion_matrix') visualize.confusion_matrices(confusion_matrix, class_order, method_names, confmat_fig_path) cmp_misclf_fig_path = pjoin(out_dir, 'compare_misclf_rates') if (num_classes > 2): visualize.compare_misclf_pairwise(confusion_matrix, class_order, method_names, cmp_misclf_fig_path) elif (num_classes == 2): visualize.compare_misclf_pairwise_parallel_coord_plot(confusion_matrix, class_order, method_names, cmp_misclf_fig_path) if feature_importances_available: featimp_fig_path = pjoin(out_dir, 'feature_importance') visualize.feature_importance_map(feature_importances_rf, method_names, featimp_fig_path, feature_names) else: print('\nCurrent predictive model does not provide feature importance values. Skipping them.') misclf_out_path = pjoin(out_dir, 'misclassified_subjects') visualize.freq_hist_misclassifications(num_times_misclfd, num_times_tested, method_names, misclf_out_path) except: traceback.print_exc() warnings.warn('Error generating the visualizations! Skipping ..') plt.close('all') return
def validate_class_set(classes, subgroups, positive_class=None): 'Ensures class names are valid and sub-groups exist.' class_set = list(set(classes.values())) sub_group_list = list() if (subgroups != 'all'): if isinstance(subgroups, str): subgroups = [subgroups] for comb in subgroups: cls_list = comb.split(',') if (len(set(cls_list)) < 2): raise ValueError('This subgroup {} does not contain two unique classes.'.format(comb)) for cls in cls_list: if (cls not in class_set): raise ValueError('Class {} in combination {} does not exist in meta data.'.format(cls, comb)) sub_group_list.append(cls_list) else: sub_group_list.append(class_set) class_order_in_meta = list() for x in class_set: if (x not in class_order_in_meta): class_order_in_meta.append(x) num_classes = len(class_order_in_meta) if (num_classes < 2): raise ValueError('Atleast two classes are required for predictive analysis! Only one given ({})'.format(set(classes.values()))) if (num_classes == 2): if not_unspecified(positive_class): if (positive_class not in class_order_in_meta): raise ValueError('Positive class specified does not exist in meta data.\nChoose one of {}'.format(class_order_in_meta)) print('Positive class specified for AUC calculation: {}'.format(positive_class)) else: positive_class = class_order_in_meta[(- 1)] print('Positive class inferred for AUC calculation: {}'.format(positive_class)) return (class_set, sub_group_list, positive_class)
-4,337,507,200,275,579,400
Ensures class names are valid and sub-groups exist.
neuropredict/run_workflow.py
validate_class_set
dinga92/neuropredict
python
def validate_class_set(classes, subgroups, positive_class=None): class_set = list(set(classes.values())) sub_group_list = list() if (subgroups != 'all'): if isinstance(subgroups, str): subgroups = [subgroups] for comb in subgroups: cls_list = comb.split(',') if (len(set(cls_list)) < 2): raise ValueError('This subgroup {} does not contain two unique classes.'.format(comb)) for cls in cls_list: if (cls not in class_set): raise ValueError('Class {} in combination {} does not exist in meta data.'.format(cls, comb)) sub_group_list.append(cls_list) else: sub_group_list.append(class_set) class_order_in_meta = list() for x in class_set: if (x not in class_order_in_meta): class_order_in_meta.append(x) num_classes = len(class_order_in_meta) if (num_classes < 2): raise ValueError('Atleast two classes are required for predictive analysis! Only one given ({})'.format(set(classes.values()))) if (num_classes == 2): if not_unspecified(positive_class): if (positive_class not in class_order_in_meta): raise ValueError('Positive class specified does not exist in meta data.\nChoose one of {}'.format(class_order_in_meta)) print('Positive class specified for AUC calculation: {}'.format(positive_class)) else: positive_class = class_order_in_meta[(- 1)] print('Positive class inferred for AUC calculation: {}'.format(positive_class)) return (class_set, sub_group_list, positive_class)
def import_datasets(method_list, out_dir, subjects, classes, feature_path, feature_type='dir_of_dirs'): "\n Imports all the specified feature sets and organizes them into datasets.\n\n Parameters\n ----------\n method_list : list of callables\n Set of predefined methods returning a vector of features for a given sample id and location\n out_dir : str\n Path to the output folder\n\n subjects : list of str\n List of sample ids\n classes : dict\n Dict identifying the class for each sample id in the dataset.\n feature_path : list of str\n List of paths to the root directory containing the features (pre- or user-defined).\n Must be of same length as method_list\n feature_type : str\n a string identifying the structure of feature set.\n Choices = ('dir_of_dirs', 'data_matrix')\n\n Returns\n -------\n method_names : list of str\n List of method names used for annotation.\n dataset_paths_file : str\n Path to the file containing paths to imported feature sets.\n\n " def clean_str(string): return ' '.join(string.strip().split(' _-:\n\r\t')) method_names = list() outpath_list = list() for (mm, cur_method) in enumerate(method_list): if (cur_method in [get_dir_of_dirs]): method_name = basename(feature_path[mm]) elif (cur_method in [get_data_matrix]): method_name = os.path.splitext(basename(feature_path[mm]))[0] elif (cur_method in [get_pyradigm]): if (feature_type in ['pyradigm']): loaded_dataset = MLDataset(filepath=feature_path[mm]) else: raise ValueError('Invalid state of the program!') if (len(loaded_dataset.description) > 1): method_name = loaded_dataset.description else: method_name = basename(feature_path[mm]) method_names.append(clean_str(method_name)) if saved_dataset_matches(loaded_dataset, subjects, classes): outpath_list.append(feature_path[mm]) continue else: raise ValueError('supplied pyradigm dataset does not match samples in the meta data.') elif (cur_method in [get_arff]): loaded_dataset = MLDataset(arff_path=feature_path[mm]) if (len(loaded_dataset.description) > 1): method_name = loaded_dataset.description else: method_name = basename(feature_path[mm]) method_names.append(clean_str(method_name)) out_name = make_dataset_filename(method_name) outpath_dataset = pjoin(out_dir, out_name) loaded_dataset.save(outpath_dataset) outpath_list.append(outpath_dataset) continue else: method_name = cur_method.__name__ method_names.append(clean_str(method_name)) out_name = make_dataset_filename(method_name) outpath_dataset = pjoin(out_dir, out_name) if (not saved_dataset_matches(outpath_dataset, subjects, classes)): outpath_dataset = get_features(subjects, classes, feature_path[mm], out_dir, out_name, cur_method, feature_type) outpath_list.append(outpath_dataset) combined_name = uniq_combined_name(method_names) dataset_paths_file = pjoin(out_dir, (('datasetlist.' + combined_name) + '.txt')) with open(dataset_paths_file, 'w') as dpf: dpf.writelines('\n'.join(outpath_list)) return (method_names, dataset_paths_file)
5,965,091,228,184,356,000
Imports all the specified feature sets and organizes them into datasets. Parameters ---------- method_list : list of callables Set of predefined methods returning a vector of features for a given sample id and location out_dir : str Path to the output folder subjects : list of str List of sample ids classes : dict Dict identifying the class for each sample id in the dataset. feature_path : list of str List of paths to the root directory containing the features (pre- or user-defined). Must be of same length as method_list feature_type : str a string identifying the structure of feature set. Choices = ('dir_of_dirs', 'data_matrix') Returns ------- method_names : list of str List of method names used for annotation. dataset_paths_file : str Path to the file containing paths to imported feature sets.
neuropredict/run_workflow.py
import_datasets
dinga92/neuropredict
python
def import_datasets(method_list, out_dir, subjects, classes, feature_path, feature_type='dir_of_dirs'): "\n Imports all the specified feature sets and organizes them into datasets.\n\n Parameters\n ----------\n method_list : list of callables\n Set of predefined methods returning a vector of features for a given sample id and location\n out_dir : str\n Path to the output folder\n\n subjects : list of str\n List of sample ids\n classes : dict\n Dict identifying the class for each sample id in the dataset.\n feature_path : list of str\n List of paths to the root directory containing the features (pre- or user-defined).\n Must be of same length as method_list\n feature_type : str\n a string identifying the structure of feature set.\n Choices = ('dir_of_dirs', 'data_matrix')\n\n Returns\n -------\n method_names : list of str\n List of method names used for annotation.\n dataset_paths_file : str\n Path to the file containing paths to imported feature sets.\n\n " def clean_str(string): return ' '.join(string.strip().split(' _-:\n\r\t')) method_names = list() outpath_list = list() for (mm, cur_method) in enumerate(method_list): if (cur_method in [get_dir_of_dirs]): method_name = basename(feature_path[mm]) elif (cur_method in [get_data_matrix]): method_name = os.path.splitext(basename(feature_path[mm]))[0] elif (cur_method in [get_pyradigm]): if (feature_type in ['pyradigm']): loaded_dataset = MLDataset(filepath=feature_path[mm]) else: raise ValueError('Invalid state of the program!') if (len(loaded_dataset.description) > 1): method_name = loaded_dataset.description else: method_name = basename(feature_path[mm]) method_names.append(clean_str(method_name)) if saved_dataset_matches(loaded_dataset, subjects, classes): outpath_list.append(feature_path[mm]) continue else: raise ValueError('supplied pyradigm dataset does not match samples in the meta data.') elif (cur_method in [get_arff]): loaded_dataset = MLDataset(arff_path=feature_path[mm]) if (len(loaded_dataset.description) > 1): method_name = loaded_dataset.description else: method_name = basename(feature_path[mm]) method_names.append(clean_str(method_name)) out_name = make_dataset_filename(method_name) outpath_dataset = pjoin(out_dir, out_name) loaded_dataset.save(outpath_dataset) outpath_list.append(outpath_dataset) continue else: method_name = cur_method.__name__ method_names.append(clean_str(method_name)) out_name = make_dataset_filename(method_name) outpath_dataset = pjoin(out_dir, out_name) if (not saved_dataset_matches(outpath_dataset, subjects, classes)): outpath_dataset = get_features(subjects, classes, feature_path[mm], out_dir, out_name, cur_method, feature_type) outpath_list.append(outpath_dataset) combined_name = uniq_combined_name(method_names) dataset_paths_file = pjoin(out_dir, (('datasetlist.' + combined_name) + '.txt')) with open(dataset_paths_file, 'w') as dpf: dpf.writelines('\n'.join(outpath_list)) return (method_names, dataset_paths_file)
def make_method_list(fs_subject_dir, user_feature_paths, user_feature_type='dir_of_dirs'): '\n Returns an organized list of feature paths and methods to read in features.\n\n Parameters\n ----------\n fs_subject_dir : str\n user_feature_paths : list of str\n user_feature_type : str\n\n Returns\n -------\n feature_dir : list\n method_list : list\n\n\n ' freesurfer_readers = [aseg_stats_subcortical, aseg_stats_whole_brain] userdefined_readers = {'dir_of_dirs': get_dir_of_dirs, 'data_matrix': get_data_matrix, 'pyradigm': get_pyradigm, 'arff': get_arff} feature_dir = list() method_list = list() if not_unspecified(user_feature_paths): if (user_feature_type not in userdefined_readers): raise NotImplementedError('Invalid feature type or its reader is not implemented yet!') for upath in user_feature_paths: feature_dir.append(upath) method_list.append(userdefined_readers[user_feature_type]) if not_unspecified(fs_subject_dir): for fsrdr in freesurfer_readers: feature_dir.append(fs_subject_dir) method_list.append(fsrdr) if (len(method_list) != len(feature_dir)): raise ValueError('Invalid specification for features!') if (len(method_list) < 1): raise ValueError('Atleast one feature set must be specified.') print('\nRequested features for analysis:') for (mm, method) in enumerate(method_list): print('{} from {}'.format(method.__name__, feature_dir[mm])) return (feature_dir, method_list)
-3,986,442,342,340,710,400
Returns an organized list of feature paths and methods to read in features. Parameters ---------- fs_subject_dir : str user_feature_paths : list of str user_feature_type : str Returns ------- feature_dir : list method_list : list
neuropredict/run_workflow.py
make_method_list
dinga92/neuropredict
python
def make_method_list(fs_subject_dir, user_feature_paths, user_feature_type='dir_of_dirs'): '\n Returns an organized list of feature paths and methods to read in features.\n\n Parameters\n ----------\n fs_subject_dir : str\n user_feature_paths : list of str\n user_feature_type : str\n\n Returns\n -------\n feature_dir : list\n method_list : list\n\n\n ' freesurfer_readers = [aseg_stats_subcortical, aseg_stats_whole_brain] userdefined_readers = {'dir_of_dirs': get_dir_of_dirs, 'data_matrix': get_data_matrix, 'pyradigm': get_pyradigm, 'arff': get_arff} feature_dir = list() method_list = list() if not_unspecified(user_feature_paths): if (user_feature_type not in userdefined_readers): raise NotImplementedError('Invalid feature type or its reader is not implemented yet!') for upath in user_feature_paths: feature_dir.append(upath) method_list.append(userdefined_readers[user_feature_type]) if not_unspecified(fs_subject_dir): for fsrdr in freesurfer_readers: feature_dir.append(fs_subject_dir) method_list.append(fsrdr) if (len(method_list) != len(feature_dir)): raise ValueError('Invalid specification for features!') if (len(method_list) < 1): raise ValueError('Atleast one feature set must be specified.') print('\nRequested features for analysis:') for (mm, method) in enumerate(method_list): print('{} from {}'.format(method.__name__, feature_dir[mm])) return (feature_dir, method_list)
def prepare_and_run(subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method): 'Organizes the inputs and prepares them for CV' (feature_dir, method_list) = make_method_list(fs_subject_dir, user_feature_paths, user_feature_type) (method_names, dataset_paths_file) = import_datasets(method_list, out_dir, subjects, classes, feature_dir, user_feature_type) for sub_group in sub_group_list: print('{}\nProcessing subgroup : {}\n{}'.format(('-' * 80), sub_group, ('-' * 80))) out_dir_sg = pjoin(out_dir, sub_group_identifier(sub_group)) results_file_path = rhst.run(dataset_paths_file, method_names, out_dir_sg, train_perc=train_perc, num_repetitions=num_rep_cv, positive_class=positive_class, sub_group=sub_group, feat_sel_size=feature_selection_size, num_procs=num_procs, grid_search_level=grid_search_level, classifier_name=classifier, feat_select_method=feat_select_method, options_path=options_path) print('\n\nSaving the visualizations to \n{}'.format(out_dir)) make_visualizations(results_file_path, out_dir_sg, options_path) print('\n') return
-2,289,500,217,651,069,400
Organizes the inputs and prepares them for CV
neuropredict/run_workflow.py
prepare_and_run
dinga92/neuropredict
python
def prepare_and_run(subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method): (feature_dir, method_list) = make_method_list(fs_subject_dir, user_feature_paths, user_feature_type) (method_names, dataset_paths_file) = import_datasets(method_list, out_dir, subjects, classes, feature_dir, user_feature_type) for sub_group in sub_group_list: print('{}\nProcessing subgroup : {}\n{}'.format(('-' * 80), sub_group, ('-' * 80))) out_dir_sg = pjoin(out_dir, sub_group_identifier(sub_group)) results_file_path = rhst.run(dataset_paths_file, method_names, out_dir_sg, train_perc=train_perc, num_repetitions=num_rep_cv, positive_class=positive_class, sub_group=sub_group, feat_sel_size=feature_selection_size, num_procs=num_procs, grid_search_level=grid_search_level, classifier_name=classifier, feat_select_method=feat_select_method, options_path=options_path) print('\n\nSaving the visualizations to \n{}'.format(out_dir)) make_visualizations(results_file_path, out_dir_sg, options_path) print('\n') return
def cli(): '\n Main entry point.\n\n ' (subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method) = parse_args() print('Running neuropredict {}'.format(__version__)) prepare_and_run(subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method) return
2,863,102,169,206,160,000
Main entry point.
neuropredict/run_workflow.py
cli
dinga92/neuropredict
python
def cli(): '\n \n\n ' (subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method) = parse_args() print('Running neuropredict {}'.format(__version__)) prepare_and_run(subjects, classes, out_dir, options_path, user_feature_paths, user_feature_type, fs_subject_dir, train_perc, num_rep_cv, positive_class, sub_group_list, feature_selection_size, num_procs, grid_search_level, classifier, feat_select_method) return
def run(feature_sets, feature_type=cfg.default_feature_type, meta_data=None, output_dir=None, pipeline=None, train_perc=0.5, num_repetitions=200, positive_class=None, feat_sel_size=cfg.default_num_features_to_select, sub_groups='all', grid_search_level=cfg.GRIDSEARCH_LEVEL_DEFAULT, num_procs=2): '\n Generate comprehensive report on the predictive performance for different feature sets and statistically compare them.\n\n Main entry point for API access.\n\n Parameters\n ----------\n feature_sets : list\n The input can be specified in either of the following ways:\n - list of paths to pyradigm datasets saved on disk\n - path to a file containing list of paths (each line containing path to a valid MLDataset)\n - list of MLDatasets that are already loaded\n - list of tuples (to specify multiple features), each element containing (X, y) i.e. data and target labels\n - a single tuple containing (X, y) i.e. data and target labels\n - list of paths to CSV files, each containing one type of features.\n\n When specifying multiple sets of input features, ensure:\n - all of them contain the same number of samples\n - each sample belongs to same class across all feature sets.\n\n feature_type : str\n String identifying the type of features as described above. It could be:\n \'list_of_pyradigm_paths\', \'pyradigm_list\',\n \'list_of_tuples\', \'tuple\', \'list_of_csv_paths\'\n\n meta_data : multiple\n The meta data can be specified in either of the following ways:\n\n - a path to a meta data file (see :doc:`features` page)\n - a dict keyed in by sample IDs with values representing their classes.\n - None, if meta data is already specified in ``feature_sets`` input (e.g. with pyradigms).\n\n pipeline : str or object\n If a string, it identifying one of the implemented classifiers e.g. \'RandomForestClassifier\' or \'ExtraTreesClassifier\'\n If an object, it must be a sciki-learn pipeline describing the sequence of steps.\n This is typically a set of feature selections or dimensionality reduction steps followed by an estimator (classifier).\n\n See http://scikit-learn.org/stable/modules/pipeline.html#pipeline for more details.\n\n Default: None, which leads to the selection of a Random Forest classifier,\n with robust scaling, followed by removal of low variance features.\n\n method_names : list\n A list of names to denote the different feature sets\n\n out_results_dir : str\n Path to output directory to save the cross validation results to.\n If not specified, a new directory named \'neuropredict\' will be created in the current directory.\n\n train_perc : float, optional\n Percetange of subjects to train the classifier on.\n The percentage is applied to the size of the smallest class to estimate\n the number of subjects from each class to be reserved for training.\n The smallest class is chosen to avoid class-imbalance in the training set.\n Default: 0.8 (80%).\n\n positive_class : str\n Name of the class to be treated as positive in calculation of AUC\n\n feat_sel_size : str or int\n Number of features to select as part of feature selection. Options:\n\n - \'tenth\'\n - \'sqrt\'\n - \'log2\'\n - \'all\'\n\n Default: \'tenth\' of the number of samples in the training set. For example, if your dataset has 90 samples, you chose 50 percent for training (default), then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining. If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.\n\n num_repetitions : int, optional\n Number of repetitions of cross-validation estimation. Default: 200.\n\n num_procs : int, optional\n Number of CPUs to use to parallelize CV repetitions.\n\n Default : 4. Number of CPUs will be capped at the number available on the machine if higher is requested.\n\n sub_groups : list\n This option allows the user to study different combinations of classes in a multi-class (N>2) dataset. For example, in a dataset with 3 classes CN, FTD and AD, two studies of pair-wise combinations can be studied separately with the following flag ``--sub_groups CN,FTD CN,AD``. This allows the user to focus on few interesting subgroups depending on their dataset/goal.\n\n Format: Different subgroups must be separated by space, and each sub-group must be a comma-separated list of class names defined in the meta data file. Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters, and ideally just alphanumeric characters separated by underscores. Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.\n\n Default: ``\'all\'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.\n\n grid_search_level : str\n Flag to specify the level of grid search during hyper-parameter optimization on the training set.\n Allowed options are : \'none\', \'light\' and \'exhaustive\', in the order of how many values/values will be optimized.\n\n More parameters and more values demand more resources and much longer time for optimization.\n\n The \'light\' option tries to "folk wisdom" to try least number of values (no more than one or two),\n for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization).\n The \'light\' will be the fastest and should give a "rough idea" of predictive performance.\n The \'exhaustive\' option will try to most parameter values for the most parameters that can be optimized.\n\n Returns\n -------\n results_path : str\n Path to pickle file containing full set of CV results.\n\n ' raise NotImplementedError return
7,251,242,653,249,876,000
Generate comprehensive report on the predictive performance for different feature sets and statistically compare them. Main entry point for API access. Parameters ---------- feature_sets : list The input can be specified in either of the following ways: - list of paths to pyradigm datasets saved on disk - path to a file containing list of paths (each line containing path to a valid MLDataset) - list of MLDatasets that are already loaded - list of tuples (to specify multiple features), each element containing (X, y) i.e. data and target labels - a single tuple containing (X, y) i.e. data and target labels - list of paths to CSV files, each containing one type of features. When specifying multiple sets of input features, ensure: - all of them contain the same number of samples - each sample belongs to same class across all feature sets. feature_type : str String identifying the type of features as described above. It could be: 'list_of_pyradigm_paths', 'pyradigm_list', 'list_of_tuples', 'tuple', 'list_of_csv_paths' meta_data : multiple The meta data can be specified in either of the following ways: - a path to a meta data file (see :doc:`features` page) - a dict keyed in by sample IDs with values representing their classes. - None, if meta data is already specified in ``feature_sets`` input (e.g. with pyradigms). pipeline : str or object If a string, it identifying one of the implemented classifiers e.g. 'RandomForestClassifier' or 'ExtraTreesClassifier' If an object, it must be a sciki-learn pipeline describing the sequence of steps. This is typically a set of feature selections or dimensionality reduction steps followed by an estimator (classifier). See http://scikit-learn.org/stable/modules/pipeline.html#pipeline for more details. Default: None, which leads to the selection of a Random Forest classifier, with robust scaling, followed by removal of low variance features. method_names : list A list of names to denote the different feature sets out_results_dir : str Path to output directory to save the cross validation results to. If not specified, a new directory named 'neuropredict' will be created in the current directory. train_perc : float, optional Percetange of subjects to train the classifier on. The percentage is applied to the size of the smallest class to estimate the number of subjects from each class to be reserved for training. The smallest class is chosen to avoid class-imbalance in the training set. Default: 0.8 (80%). positive_class : str Name of the class to be treated as positive in calculation of AUC feat_sel_size : str or int Number of features to select as part of feature selection. Options: - 'tenth' - 'sqrt' - 'log2' - 'all' Default: 'tenth' of the number of samples in the training set. For example, if your dataset has 90 samples, you chose 50 percent for training (default), then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining. If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features. num_repetitions : int, optional Number of repetitions of cross-validation estimation. Default: 200. num_procs : int, optional Number of CPUs to use to parallelize CV repetitions. Default : 4. Number of CPUs will be capped at the number available on the machine if higher is requested. sub_groups : list This option allows the user to study different combinations of classes in a multi-class (N>2) dataset. For example, in a dataset with 3 classes CN, FTD and AD, two studies of pair-wise combinations can be studied separately with the following flag ``--sub_groups CN,FTD CN,AD``. This allows the user to focus on few interesting subgroups depending on their dataset/goal. Format: Different subgroups must be separated by space, and each sub-group must be a comma-separated list of class names defined in the meta data file. Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters, and ideally just alphanumeric characters separated by underscores. Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes. Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting. grid_search_level : str Flag to specify the level of grid search during hyper-parameter optimization on the training set. Allowed options are : 'none', 'light' and 'exhaustive', in the order of how many values/values will be optimized. More parameters and more values demand more resources and much longer time for optimization. The 'light' option tries to "folk wisdom" to try least number of values (no more than one or two), for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization). The 'light' will be the fastest and should give a "rough idea" of predictive performance. The 'exhaustive' option will try to most parameter values for the most parameters that can be optimized. Returns ------- results_path : str Path to pickle file containing full set of CV results.
neuropredict/run_workflow.py
run
dinga92/neuropredict
python
def run(feature_sets, feature_type=cfg.default_feature_type, meta_data=None, output_dir=None, pipeline=None, train_perc=0.5, num_repetitions=200, positive_class=None, feat_sel_size=cfg.default_num_features_to_select, sub_groups='all', grid_search_level=cfg.GRIDSEARCH_LEVEL_DEFAULT, num_procs=2): '\n Generate comprehensive report on the predictive performance for different feature sets and statistically compare them.\n\n Main entry point for API access.\n\n Parameters\n ----------\n feature_sets : list\n The input can be specified in either of the following ways:\n - list of paths to pyradigm datasets saved on disk\n - path to a file containing list of paths (each line containing path to a valid MLDataset)\n - list of MLDatasets that are already loaded\n - list of tuples (to specify multiple features), each element containing (X, y) i.e. data and target labels\n - a single tuple containing (X, y) i.e. data and target labels\n - list of paths to CSV files, each containing one type of features.\n\n When specifying multiple sets of input features, ensure:\n - all of them contain the same number of samples\n - each sample belongs to same class across all feature sets.\n\n feature_type : str\n String identifying the type of features as described above. It could be:\n \'list_of_pyradigm_paths\', \'pyradigm_list\',\n \'list_of_tuples\', \'tuple\', \'list_of_csv_paths\'\n\n meta_data : multiple\n The meta data can be specified in either of the following ways:\n\n - a path to a meta data file (see :doc:`features` page)\n - a dict keyed in by sample IDs with values representing their classes.\n - None, if meta data is already specified in ``feature_sets`` input (e.g. with pyradigms).\n\n pipeline : str or object\n If a string, it identifying one of the implemented classifiers e.g. \'RandomForestClassifier\' or \'ExtraTreesClassifier\'\n If an object, it must be a sciki-learn pipeline describing the sequence of steps.\n This is typically a set of feature selections or dimensionality reduction steps followed by an estimator (classifier).\n\n See http://scikit-learn.org/stable/modules/pipeline.html#pipeline for more details.\n\n Default: None, which leads to the selection of a Random Forest classifier,\n with robust scaling, followed by removal of low variance features.\n\n method_names : list\n A list of names to denote the different feature sets\n\n out_results_dir : str\n Path to output directory to save the cross validation results to.\n If not specified, a new directory named \'neuropredict\' will be created in the current directory.\n\n train_perc : float, optional\n Percetange of subjects to train the classifier on.\n The percentage is applied to the size of the smallest class to estimate\n the number of subjects from each class to be reserved for training.\n The smallest class is chosen to avoid class-imbalance in the training set.\n Default: 0.8 (80%).\n\n positive_class : str\n Name of the class to be treated as positive in calculation of AUC\n\n feat_sel_size : str or int\n Number of features to select as part of feature selection. Options:\n\n - \'tenth\'\n - \'sqrt\'\n - \'log2\'\n - \'all\'\n\n Default: \'tenth\' of the number of samples in the training set. For example, if your dataset has 90 samples, you chose 50 percent for training (default), then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining. If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.\n\n num_repetitions : int, optional\n Number of repetitions of cross-validation estimation. Default: 200.\n\n num_procs : int, optional\n Number of CPUs to use to parallelize CV repetitions.\n\n Default : 4. Number of CPUs will be capped at the number available on the machine if higher is requested.\n\n sub_groups : list\n This option allows the user to study different combinations of classes in a multi-class (N>2) dataset. For example, in a dataset with 3 classes CN, FTD and AD, two studies of pair-wise combinations can be studied separately with the following flag ``--sub_groups CN,FTD CN,AD``. This allows the user to focus on few interesting subgroups depending on their dataset/goal.\n\n Format: Different subgroups must be separated by space, and each sub-group must be a comma-separated list of class names defined in the meta data file. Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters, and ideally just alphanumeric characters separated by underscores. Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.\n\n Default: ``\'all\'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.\n\n grid_search_level : str\n Flag to specify the level of grid search during hyper-parameter optimization on the training set.\n Allowed options are : \'none\', \'light\' and \'exhaustive\', in the order of how many values/values will be optimized.\n\n More parameters and more values demand more resources and much longer time for optimization.\n\n The \'light\' option tries to "folk wisdom" to try least number of values (no more than one or two),\n for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization).\n The \'light\' will be the fastest and should give a "rough idea" of predictive performance.\n The \'exhaustive\' option will try to most parameter values for the most parameters that can be optimized.\n\n Returns\n -------\n results_path : str\n Path to pickle file containing full set of CV results.\n\n ' raise NotImplementedError return
def portfolio_computeKnm_np(X, Xbar, l, sigma): '\n X: n x d\n l: d\n ' n = np.shape(X)[0] m = np.shape(Xbar)[0] xdim = np.shape(X)[1] l = l.reshape(1, xdim) X = (X / l) Xbar = (Xbar / l) Q = np.tile(np.sum((X * X), axis=1, keepdims=True), reps=(1, m)) Qbar = np.tile(np.sum((Xbar * Xbar), axis=1, keepdims=True).T, reps=(n, 1)) dist = ((Qbar + Q) - (2 * X.dot(Xbar.T))) knm = (sigma * np.exp(((- 0.5) * dist))) return knm
9,051,418,591,690,908,000
X: n x d l: d
functions.py
portfolio_computeKnm_np
qphong/BayesOpt-LV
python
def portfolio_computeKnm_np(X, Xbar, l, sigma): '\n X: n x d\n l: d\n ' n = np.shape(X)[0] m = np.shape(Xbar)[0] xdim = np.shape(X)[1] l = l.reshape(1, xdim) X = (X / l) Xbar = (Xbar / l) Q = np.tile(np.sum((X * X), axis=1, keepdims=True), reps=(1, m)) Qbar = np.tile(np.sum((Xbar * Xbar), axis=1, keepdims=True).T, reps=(n, 1)) dist = ((Qbar + Q) - (2 * X.dot(Xbar.T))) knm = (sigma * np.exp(((- 0.5) * dist))) return knm
def portfolio_computeKnm(X, Xbar, l, sigma, dtype=tf.float32): '\n X: n x d\n l: d\n ' n = tf.shape(X)[0] m = tf.shape(Xbar)[0] X = (X / l) Xbar = (Xbar / l) Q = tf.tile(tf.reduce_sum(tf.square(X), axis=1, keepdims=True), multiples=(1, m)) Qbar = tf.tile(tf.transpose(tf.reduce_sum(tf.square(Xbar), axis=1, keepdims=True)), multiples=(n, 1)) dist = ((Qbar + Q) - ((2 * X) @ tf.transpose(Xbar))) knm = (sigma * tf.exp(((- 0.5) * dist))) return knm
195,986,522,214,792,770
X: n x d l: d
functions.py
portfolio_computeKnm
qphong/BayesOpt-LV
python
def portfolio_computeKnm(X, Xbar, l, sigma, dtype=tf.float32): '\n X: n x d\n l: d\n ' n = tf.shape(X)[0] m = tf.shape(Xbar)[0] X = (X / l) Xbar = (Xbar / l) Q = tf.tile(tf.reduce_sum(tf.square(X), axis=1, keepdims=True), multiples=(1, m)) Qbar = tf.tile(tf.transpose(tf.reduce_sum(tf.square(Xbar), axis=1, keepdims=True)), multiples=(n, 1)) dist = ((Qbar + Q) - ((2 * X) @ tf.transpose(Xbar))) knm = (sigma * tf.exp(((- 0.5) * dist))) return knm
def __init__(self, intermediate_directory='intermediates'): '\n :param intermediate_directory: Directory, where the\n intermediate pandas dataframe should be persisted\n to.\n ' super(NumpyNullPreprocessor, self).__init__() self._intermediate_directory = intermediate_directory self._cached = False self._cached_object = None
-2,495,241,162,938,111,500
:param intermediate_directory: Directory, where the intermediate pandas dataframe should be persisted to.
brewPipe/preprocess/numpy_null.py
__init__
meyerd/brewPipe
python
def __init__(self, intermediate_directory='intermediates'): '\n :param intermediate_directory: Directory, where the\n intermediate pandas dataframe should be persisted\n to.\n ' super(NumpyNullPreprocessor, self).__init__() self._intermediate_directory = intermediate_directory self._cached = False self._cached_object = None
def spatial_variable(self, symbol): '\n Convert a :class:`pybamm.SpatialVariable` node to a linear algebra object that\n can be evaluated (here, a :class:`pybamm.Vector` on either the nodes or the\n edges).\n\n Parameters\n -----------\n symbol : :class:`pybamm.SpatialVariable`\n The spatial variable to be discretised.\n\n Returns\n -------\n :class:`pybamm.Vector`\n Contains the discretised spatial variable\n ' symbol_mesh = self.mesh.combine_submeshes(*symbol.domain) if symbol.name.endswith('_edge'): return pybamm.Vector(symbol_mesh[0].edges, domain=symbol.domain) else: return pybamm.Vector(symbol_mesh[0].nodes, domain=symbol.domain)
2,688,663,816,349,106,000
Convert a :class:`pybamm.SpatialVariable` node to a linear algebra object that can be evaluated (here, a :class:`pybamm.Vector` on either the nodes or the edges). Parameters ----------- symbol : :class:`pybamm.SpatialVariable` The spatial variable to be discretised. Returns ------- :class:`pybamm.Vector` Contains the discretised spatial variable
pybamm/spatial_methods/spatial_method.py
spatial_variable
jedgedrudd/PyBaMM
python
def spatial_variable(self, symbol): '\n Convert a :class:`pybamm.SpatialVariable` node to a linear algebra object that\n can be evaluated (here, a :class:`pybamm.Vector` on either the nodes or the\n edges).\n\n Parameters\n -----------\n symbol : :class:`pybamm.SpatialVariable`\n The spatial variable to be discretised.\n\n Returns\n -------\n :class:`pybamm.Vector`\n Contains the discretised spatial variable\n ' symbol_mesh = self.mesh.combine_submeshes(*symbol.domain) if symbol.name.endswith('_edge'): return pybamm.Vector(symbol_mesh[0].edges, domain=symbol.domain) else: return pybamm.Vector(symbol_mesh[0].nodes, domain=symbol.domain)
def broadcast(self, symbol, domain, auxiliary_domains, broadcast_type): "\n Broadcast symbol to a specified domain.\n\n Parameters\n ----------\n symbol : :class:`pybamm.Symbol`\n The symbol to be broadcasted\n domain : iterable of strings\n The domain to broadcast to\n broadcast_type : str\n The type of broadcast, either: 'primary' or 'full'\n\n Returns\n -------\n broadcasted_symbol: class: `pybamm.Symbol`\n The discretised symbol of the correct size for the spatial method\n " primary_pts_for_broadcast = sum((self.mesh[dom][0].npts_for_broadcast for dom in domain)) full_pts_for_broadcast = sum((subdom.npts_for_broadcast for dom in domain for subdom in self.mesh[dom])) if (broadcast_type == 'primary'): out = pybamm.Outer(symbol, pybamm.Vector(np.ones(primary_pts_for_broadcast), domain=domain)) out.auxiliary_domains = auxiliary_domains elif (broadcast_type == 'full'): out = (symbol * pybamm.Vector(np.ones(full_pts_for_broadcast), domain=domain)) return out
2,225,620,983,754,394,400
Broadcast symbol to a specified domain. Parameters ---------- symbol : :class:`pybamm.Symbol` The symbol to be broadcasted domain : iterable of strings The domain to broadcast to broadcast_type : str The type of broadcast, either: 'primary' or 'full' Returns ------- broadcasted_symbol: class: `pybamm.Symbol` The discretised symbol of the correct size for the spatial method
pybamm/spatial_methods/spatial_method.py
broadcast
jedgedrudd/PyBaMM
python
def broadcast(self, symbol, domain, auxiliary_domains, broadcast_type): "\n Broadcast symbol to a specified domain.\n\n Parameters\n ----------\n symbol : :class:`pybamm.Symbol`\n The symbol to be broadcasted\n domain : iterable of strings\n The domain to broadcast to\n broadcast_type : str\n The type of broadcast, either: 'primary' or 'full'\n\n Returns\n -------\n broadcasted_symbol: class: `pybamm.Symbol`\n The discretised symbol of the correct size for the spatial method\n " primary_pts_for_broadcast = sum((self.mesh[dom][0].npts_for_broadcast for dom in domain)) full_pts_for_broadcast = sum((subdom.npts_for_broadcast for dom in domain for subdom in self.mesh[dom])) if (broadcast_type == 'primary'): out = pybamm.Outer(symbol, pybamm.Vector(np.ones(primary_pts_for_broadcast), domain=domain)) out.auxiliary_domains = auxiliary_domains elif (broadcast_type == 'full'): out = (symbol * pybamm.Vector(np.ones(full_pts_for_broadcast), domain=domain)) return out
def gradient(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the gradient for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised gradient on\n the child discretised_symbol\n ' raise NotImplementedError
-1,960,366,641,476,553,500
Implements the gradient for a spatial method. Parameters ---------- symbol: :class:`pybamm.Symbol` The symbol that we will take the gradient of. discretised_symbol: :class:`pybamm.Symbol` The discretised symbol of the correct size boundary_conditions : dict The boundary conditions of the model ({symbol.id: {"left": left bc, "right": right bc}}) Returns ------- :class: `pybamm.Array` Contains the result of acting the discretised gradient on the child discretised_symbol
pybamm/spatial_methods/spatial_method.py
gradient
jedgedrudd/PyBaMM
python
def gradient(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the gradient for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised gradient on\n the child discretised_symbol\n ' raise NotImplementedError
def divergence(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the divergence for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised divergence on\n the child discretised_symbol\n ' raise NotImplementedError
-467,223,934,223,671,550
Implements the divergence for a spatial method. Parameters ---------- symbol: :class:`pybamm.Symbol` The symbol that we will take the gradient of. discretised_symbol: :class:`pybamm.Symbol` The discretised symbol of the correct size boundary_conditions : dict The boundary conditions of the model ({symbol.id: {"left": left bc, "right": right bc}}) Returns ------- :class: `pybamm.Array` Contains the result of acting the discretised divergence on the child discretised_symbol
pybamm/spatial_methods/spatial_method.py
divergence
jedgedrudd/PyBaMM
python
def divergence(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the divergence for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised divergence on\n the child discretised_symbol\n ' raise NotImplementedError
def laplacian(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the laplacian for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised laplacian on\n the child discretised_symbol\n ' raise NotImplementedError
-2,857,456,298,622,612,000
Implements the laplacian for a spatial method. Parameters ---------- symbol: :class:`pybamm.Symbol` The symbol that we will take the gradient of. discretised_symbol: :class:`pybamm.Symbol` The discretised symbol of the correct size boundary_conditions : dict The boundary conditions of the model ({symbol.id: {"left": left bc, "right": right bc}}) Returns ------- :class: `pybamm.Array` Contains the result of acting the discretised laplacian on the child discretised_symbol
pybamm/spatial_methods/spatial_method.py
laplacian
jedgedrudd/PyBaMM
python
def laplacian(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the laplacian for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised laplacian on\n the child discretised_symbol\n ' raise NotImplementedError
def gradient_squared(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the inner product of the gradient with itself for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of taking the inner product of the result of acting\n the discretised gradient on the child discretised_symbol with itself\n ' raise NotImplementedError
5,460,788,753,370,440,000
Implements the inner product of the gradient with itself for a spatial method. Parameters ---------- symbol: :class:`pybamm.Symbol` The symbol that we will take the gradient of. discretised_symbol: :class:`pybamm.Symbol` The discretised symbol of the correct size boundary_conditions : dict The boundary conditions of the model ({symbol.id: {"left": left bc, "right": right bc}}) Returns ------- :class: `pybamm.Array` Contains the result of taking the inner product of the result of acting the discretised gradient on the child discretised_symbol with itself
pybamm/spatial_methods/spatial_method.py
gradient_squared
jedgedrudd/PyBaMM
python
def gradient_squared(self, symbol, discretised_symbol, boundary_conditions): '\n Implements the inner product of the gradient with itself for a spatial method.\n\n Parameters\n ----------\n symbol: :class:`pybamm.Symbol`\n The symbol that we will take the gradient of.\n discretised_symbol: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n boundary_conditions : dict\n The boundary conditions of the model\n ({symbol.id: {"left": left bc, "right": right bc}})\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of taking the inner product of the result of acting\n the discretised gradient on the child discretised_symbol with itself\n ' raise NotImplementedError
def integral(self, child, discretised_child): '\n Implements the integral for a spatial method.\n\n Parameters\n ----------\n child: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_child: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised integral on\n the child discretised_symbol\n ' raise NotImplementedError
326,992,160,910,767,740
Implements the integral for a spatial method. Parameters ---------- child: :class:`pybamm.Symbol` The symbol to which is being integrated discretised_child: :class:`pybamm.Symbol` The discretised symbol of the correct size Returns ------- :class: `pybamm.Array` Contains the result of acting the discretised integral on the child discretised_symbol
pybamm/spatial_methods/spatial_method.py
integral
jedgedrudd/PyBaMM
python
def integral(self, child, discretised_child): '\n Implements the integral for a spatial method.\n\n Parameters\n ----------\n child: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_child: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised integral on\n the child discretised_symbol\n ' raise NotImplementedError
def indefinite_integral(self, child, discretised_child): '\n Implements the indefinite integral for a spatial method.\n\n Parameters\n ----------\n child: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_child: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised indefinite integral on\n the child discretised_symbol\n ' raise NotImplementedError
-4,873,417,814,637,923,000
Implements the indefinite integral for a spatial method. Parameters ---------- child: :class:`pybamm.Symbol` The symbol to which is being integrated discretised_child: :class:`pybamm.Symbol` The discretised symbol of the correct size Returns ------- :class: `pybamm.Array` Contains the result of acting the discretised indefinite integral on the child discretised_symbol
pybamm/spatial_methods/spatial_method.py
indefinite_integral
jedgedrudd/PyBaMM
python
def indefinite_integral(self, child, discretised_child): '\n Implements the indefinite integral for a spatial method.\n\n Parameters\n ----------\n child: :class:`pybamm.Symbol`\n The symbol to which is being integrated\n discretised_child: :class:`pybamm.Symbol`\n The discretised symbol of the correct size\n\n Returns\n -------\n :class: `pybamm.Array`\n Contains the result of acting the discretised indefinite integral on\n the child discretised_symbol\n ' raise NotImplementedError