body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __init__(self, doc): '\n Initialize from a database document.\n\n :param dict doc: A database document.\n ' self._enabled = doc.get('enabled', True) self._strip_parameters = doc.get('strip_parameters', list())
4,003,996,736,965,378,600
Initialize from a database document. :param dict doc: A database document.
starbelly/policy.py
__init__
HyperionGray/starbelly
python
def __init__(self, doc): '\n Initialize from a database document.\n\n :param dict doc: A database document.\n ' self._enabled = doc.get('enabled', True) self._strip_parameters = doc.get('strip_parameters', list())
def normalize(self, url): '\n Normalize ``url`` according to policy.\n\n :param str url: The URL to be normalized.\n :returns: The normalized URL.\n :rtype str:\n ' if self._enabled: if self._strip_parameters: url = w3lib.url.url_query_cleaner(url, remove=True, unique=False, parameterlist=self._strip_parameters) url = w3lib.url.canonicalize_url(url) return url
-8,645,073,213,677,712,000
Normalize ``url`` according to policy. :param str url: The URL to be normalized. :returns: The normalized URL. :rtype str:
starbelly/policy.py
normalize
HyperionGray/starbelly
python
def normalize(self, url): '\n Normalize ``url`` according to policy.\n\n :param str url: The URL to be normalized.\n :returns: The normalized URL.\n :rtype str:\n ' if self._enabled: if self._strip_parameters: url = w3lib.url.url_query_cleaner(url, remove=True, unique=False, parameterlist=self._strip_parameters) url = w3lib.url.canonicalize_url(url) return url
@staticmethod def convert_doc_to_pb(doc, pb): '\n Convert from database document to protobuf.\n\n :param dict doc: Database document.\n :param pb: An empty protobuf.\n :type pb: starbelly.starbelly_pb2.PolicyUrlRules\n ' for doc_url in doc: pb_url = pb.add() if ('pattern' in doc_url): pb_url.pattern = doc_url['pattern'] if ('match' in doc_url): pb_url.match = MATCH_ENUM.Value(doc_url['match']) if ('action' in doc_url): pb_url.action = ACTION_ENUM.Value(doc_url['action']) if ('amount' in doc_url): pb_url.amount = doc_url['amount']
-6,116,275,236,407,626,000
Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUrlRules
starbelly/policy.py
convert_doc_to_pb
HyperionGray/starbelly
python
@staticmethod def convert_doc_to_pb(doc, pb): '\n Convert from database document to protobuf.\n\n :param dict doc: Database document.\n :param pb: An empty protobuf.\n :type pb: starbelly.starbelly_pb2.PolicyUrlRules\n ' for doc_url in doc: pb_url = pb.add() if ('pattern' in doc_url): pb_url.pattern = doc_url['pattern'] if ('match' in doc_url): pb_url.match = MATCH_ENUM.Value(doc_url['match']) if ('action' in doc_url): pb_url.action = ACTION_ENUM.Value(doc_url['action']) if ('amount' in doc_url): pb_url.amount = doc_url['amount']
@staticmethod def convert_pb_to_doc(pb, doc): '\n Convert protobuf to database document.\n\n :param pb: A protobuf\n :type pb: starbelly.starbelly_pb2.PolicyUrlRules\n :returns: Database document.\n :rtype: dict\n ' for pb_url in pb: doc_url = dict() if pb_url.HasField('pattern'): doc_url['pattern'] = pb_url.pattern if pb_url.HasField('match'): doc_url['match'] = MATCH_ENUM.Name(pb_url.match) if pb_url.HasField('action'): doc_url['action'] = ACTION_ENUM.Name(pb_url.action) if pb_url.HasField('amount'): doc_url['amount'] = pb_url.amount doc.append(doc_url)
2,046,348,049,237,025,000
Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUrlRules :returns: Database document. :rtype: dict
starbelly/policy.py
convert_pb_to_doc
HyperionGray/starbelly
python
@staticmethod def convert_pb_to_doc(pb, doc): '\n Convert protobuf to database document.\n\n :param pb: A protobuf\n :type pb: starbelly.starbelly_pb2.PolicyUrlRules\n :returns: Database document.\n :rtype: dict\n ' for pb_url in pb: doc_url = dict() if pb_url.HasField('pattern'): doc_url['pattern'] = pb_url.pattern if pb_url.HasField('match'): doc_url['match'] = MATCH_ENUM.Name(pb_url.match) if pb_url.HasField('action'): doc_url['action'] = ACTION_ENUM.Name(pb_url.action) if pb_url.HasField('amount'): doc_url['amount'] = pb_url.amount doc.append(doc_url)
def __init__(self, docs, seeds): '\n Initialize from database documents.\n\n :param docs: Database document.\n :type docs: list[dict]\n :param seeds: Seed URLs, used for computing the costs for crawled links.\n :type seeds: list[str]\n ' if (not docs): _invalid('At least one URL rule is required') self._rules = list() max_index = (len(docs) - 1) seed_domains = {URL(seed).host for seed in seeds} for (index, url_rule) in enumerate(docs): if (index < max_index): location = 'URL rule #{}'.format((index + 1)) if (url_rule.get('pattern', '').strip() == ''): _invalid('Pattern is required', location) if ('match' not in url_rule): _invalid('Match selector is required', location) if ('action' not in url_rule): _invalid('Action selector is required', location) if ('amount' not in url_rule): _invalid('Amount is required', location) try: pattern_re = re.compile(url_rule['pattern'].format(SEED_DOMAINS='|'.join(seed_domains))) except: _invalid('Invalid regular expression', location) self._rules.append((pattern_re, url_rule['match'], url_rule['action'], url_rule['amount'])) else: location = 'last URL rule' if ('pattern' in url_rule): _invalid('Pattern is not allowed', location) if ('match' in url_rule): _invalid('Match is not allowed', location) if ('action' not in url_rule): _invalid('Action is required', location) if ('amount' not in url_rule): _invalid('Amount is required', location) self._rules.append((None, None, url_rule['action'], url_rule['amount']))
-1,408,077,322,427,772,000
Initialize from database documents. :param docs: Database document. :type docs: list[dict] :param seeds: Seed URLs, used for computing the costs for crawled links. :type seeds: list[str]
starbelly/policy.py
__init__
HyperionGray/starbelly
python
def __init__(self, docs, seeds): '\n Initialize from database documents.\n\n :param docs: Database document.\n :type docs: list[dict]\n :param seeds: Seed URLs, used for computing the costs for crawled links.\n :type seeds: list[str]\n ' if (not docs): _invalid('At least one URL rule is required') self._rules = list() max_index = (len(docs) - 1) seed_domains = {URL(seed).host for seed in seeds} for (index, url_rule) in enumerate(docs): if (index < max_index): location = 'URL rule #{}'.format((index + 1)) if (url_rule.get('pattern', ).strip() == ): _invalid('Pattern is required', location) if ('match' not in url_rule): _invalid('Match selector is required', location) if ('action' not in url_rule): _invalid('Action selector is required', location) if ('amount' not in url_rule): _invalid('Amount is required', location) try: pattern_re = re.compile(url_rule['pattern'].format(SEED_DOMAINS='|'.join(seed_domains))) except: _invalid('Invalid regular expression', location) self._rules.append((pattern_re, url_rule['match'], url_rule['action'], url_rule['amount'])) else: location = 'last URL rule' if ('pattern' in url_rule): _invalid('Pattern is not allowed', location) if ('match' in url_rule): _invalid('Match is not allowed', location) if ('action' not in url_rule): _invalid('Action is required', location) if ('amount' not in url_rule): _invalid('Amount is required', location) self._rules.append((None, None, url_rule['action'], url_rule['amount']))
def get_cost(self, parent_cost, url): '\n Return the cost for a URL.\n\n :param float parent_cost: The cost of the resource which yielded this\n URL.\n :param str url: The URL to compute cost for.\n :returns: Cost of ``url``.\n :rtype: float\n ' for (pattern, match, action, amount) in self._rules: if (pattern is None): break else: result = (pattern.search(url) is not None) if (match == 'DOES_NOT_MATCH'): result = (not result) if result: break if (action == 'ADD'): return (parent_cost + amount) return (parent_cost * amount)
-8,746,397,229,955,853,000
Return the cost for a URL. :param float parent_cost: The cost of the resource which yielded this URL. :param str url: The URL to compute cost for. :returns: Cost of ``url``. :rtype: float
starbelly/policy.py
get_cost
HyperionGray/starbelly
python
def get_cost(self, parent_cost, url): '\n Return the cost for a URL.\n\n :param float parent_cost: The cost of the resource which yielded this\n URL.\n :param str url: The URL to compute cost for.\n :returns: Cost of ``url``.\n :rtype: float\n ' for (pattern, match, action, amount) in self._rules: if (pattern is None): break else: result = (pattern.search(url) is not None) if (match == 'DOES_NOT_MATCH'): result = (not result) if result: break if (action == 'ADD'): return (parent_cost + amount) return (parent_cost * amount)
@staticmethod def convert_doc_to_pb(doc, pb): '\n Convert from database document to protobuf.\n\n :param dict doc: Database document.\n :param pb: An empty protobuf.\n :type pb: starbelly.starbelly_pb2.PolicyUserAgents\n ' for doc_user_agent in doc: pb_user_agent = pb.add() pb_user_agent.name = doc_user_agent['name']
-4,759,767,526,905,407,000
Convert from database document to protobuf. :param dict doc: Database document. :param pb: An empty protobuf. :type pb: starbelly.starbelly_pb2.PolicyUserAgents
starbelly/policy.py
convert_doc_to_pb
HyperionGray/starbelly
python
@staticmethod def convert_doc_to_pb(doc, pb): '\n Convert from database document to protobuf.\n\n :param dict doc: Database document.\n :param pb: An empty protobuf.\n :type pb: starbelly.starbelly_pb2.PolicyUserAgents\n ' for doc_user_agent in doc: pb_user_agent = pb.add() pb_user_agent.name = doc_user_agent['name']
@staticmethod def convert_pb_to_doc(pb, doc): '\n Convert protobuf to database document.\n\n :param pb: A protobuf\n :type pb: starbelly.starbelly_pb2.PolicyUserAgents\n :returns: Database document.\n :rtype: dict\n ' for user_agent in pb: doc.append({'name': user_agent.name})
608,808,429,641,883,000
Convert protobuf to database document. :param pb: A protobuf :type pb: starbelly.starbelly_pb2.PolicyUserAgents :returns: Database document. :rtype: dict
starbelly/policy.py
convert_pb_to_doc
HyperionGray/starbelly
python
@staticmethod def convert_pb_to_doc(pb, doc): '\n Convert protobuf to database document.\n\n :param pb: A protobuf\n :type pb: starbelly.starbelly_pb2.PolicyUserAgents\n :returns: Database document.\n :rtype: dict\n ' for user_agent in pb: doc.append({'name': user_agent.name})
def __init__(self, docs, version): '\n Initialize from database documents.\n\n :param docs: Database document.\n :type docs: list[dict]\n :param str version: The version number interpolated into ``{VERSION}``.\n ' if (not docs): _invalid('At least one user agent is required') self._user_agents = list() for (index, user_agent) in enumerate(docs): location = 'User agent #{}'.format((index + 1)) if (user_agent.get('name', '').strip() == ''): _invalid('Name is required', location) self._user_agents.append(user_agent['name'].format(VERSION=version))
-6,679,003,698,236,295,000
Initialize from database documents. :param docs: Database document. :type docs: list[dict] :param str version: The version number interpolated into ``{VERSION}``.
starbelly/policy.py
__init__
HyperionGray/starbelly
python
def __init__(self, docs, version): '\n Initialize from database documents.\n\n :param docs: Database document.\n :type docs: list[dict]\n :param str version: The version number interpolated into ``{VERSION}``.\n ' if (not docs): _invalid('At least one user agent is required') self._user_agents = list() for (index, user_agent) in enumerate(docs): location = 'User agent #{}'.format((index + 1)) if (user_agent.get('name', ).strip() == ): _invalid('Name is required', location) self._user_agents.append(user_agent['name'].format(VERSION=version))
def get_first_user_agent(self): '\n :returns: Return the first user agent.\n :rtype: str\n ' return self._user_agents[0]
1,953,470,778,544,375,300
:returns: Return the first user agent. :rtype: str
starbelly/policy.py
get_first_user_agent
HyperionGray/starbelly
python
def get_first_user_agent(self): '\n :returns: Return the first user agent.\n :rtype: str\n ' return self._user_agents[0]
def get_user_agent(self): '\n :returns: A randomly selected user agent string.\n :rtype: str\n ' return random.choice(self._user_agents)
-8,234,317,892,050,347,000
:returns: A randomly selected user agent string. :rtype: str
starbelly/policy.py
get_user_agent
HyperionGray/starbelly
python
def get_user_agent(self): '\n :returns: A randomly selected user agent string.\n :rtype: str\n ' return random.choice(self._user_agents)
@transaction.atomic def merge_users(main_user, other_user, preview=False): 'Merges other_user into main_user' merged_user = {} merged_user['is_active'] = (main_user.is_active or other_user.is_active) merged_user['title'] = (main_user.title or other_user.title or '') merged_user['first_name'] = (main_user.first_name or other_user.first_name or '') merged_user['last_name'] = (main_user.last_name or other_user.last_name or '') merged_user['email'] = (main_user.email or other_user.email or None) merged_user['groups'] = Group.objects.filter(user__in=[main_user, other_user]).distinct() merged_user['is_superuser'] = (main_user.is_superuser or other_user.is_superuser) merged_user['is_proxy_user'] = (main_user.is_proxy_user or other_user.is_proxy_user) merged_user['delegates'] = UserProfile.objects.filter(represented_users__in=[main_user, other_user]).distinct() merged_user['represented_users'] = UserProfile.objects.filter(delegates__in=[main_user, other_user]).distinct() merged_user['cc_users'] = UserProfile.objects.filter(ccing_users__in=[main_user, other_user]).distinct() merged_user['ccing_users'] = UserProfile.objects.filter(cc_users__in=[main_user, other_user]).distinct() errors = [] warnings = [] courses_main_user_is_responsible_for = main_user.get_sorted_courses_responsible_for() if any(((course in courses_main_user_is_responsible_for) for course in other_user.get_sorted_courses_responsible_for())): errors.append('courses_responsible_for') if any(((contribution.evaluation in [contribution.evaluation for contribution in main_user.get_sorted_contributions()]) for contribution in other_user.get_sorted_contributions())): errors.append('contributions') if any(((evaluation in main_user.get_sorted_evaluations_participating_in()) for evaluation in other_user.get_sorted_evaluations_participating_in())): errors.append('evaluations_participating_in') if any(((evaluation in main_user.get_sorted_evaluations_voted_for()) for evaluation in other_user.get_sorted_evaluations_voted_for())): errors.append('evaluations_voted_for') if (main_user.reward_point_grantings.all().exists() and other_user.reward_point_grantings.all().exists()): warnings.append('rewards') merged_user['courses_responsible_for'] = Course.objects.filter(responsibles__in=[main_user, other_user]).order_by('semester__created_at', 'name_de') merged_user['contributions'] = Contribution.objects.filter(contributor__in=[main_user, other_user]).order_by('evaluation__course__semester__created_at', 'evaluation__name_de') merged_user['evaluations_participating_in'] = Evaluation.objects.filter(participants__in=[main_user, other_user]).order_by('course__semester__created_at', 'name_de') merged_user['evaluations_voted_for'] = Evaluation.objects.filter(voters__in=[main_user, other_user]).order_by('course__semester__created_at', 'name_de') merged_user['reward_point_grantings'] = (main_user.reward_point_grantings.all() or other_user.reward_point_grantings.all()) merged_user['reward_point_redemptions'] = (main_user.reward_point_redemptions.all() or other_user.reward_point_redemptions.all()) if (preview or errors): return (merged_user, errors, warnings) for course in Course.objects.filter(responsibles__in=[other_user]): responsibles = list(course.responsibles.all()) responsibles.remove(other_user) responsibles.append(main_user) course.responsibles.set(responsibles) GradeDocument.objects.filter(last_modified_user=other_user).update(last_modified_user=main_user) other_user.email = '' other_user.save() for (key, value) in merged_user.items(): attr = getattr(main_user, key) if hasattr(attr, 'set'): attr.set(value) else: setattr(main_user, key, value) main_user.save() other_user.reward_point_grantings.all().delete() other_user.reward_point_redemptions.all().delete() LogEntry.objects.filter(user=other_user).update(user=main_user) evaluations = Evaluation.objects.filter(contributions__contributor=main_user, state__in=STATES_WITH_RESULTS_CACHING).distinct() for evaluation in evaluations: cache_results(evaluation) other_user.delete() return (merged_user, errors, warnings)
-4,810,132,564,737,749,000
Merges other_user into main_user
evap/staff/tools.py
merge_users
lill28/EvaP
python
@transaction.atomic def merge_users(main_user, other_user, preview=False): merged_user = {} merged_user['is_active'] = (main_user.is_active or other_user.is_active) merged_user['title'] = (main_user.title or other_user.title or ) merged_user['first_name'] = (main_user.first_name or other_user.first_name or ) merged_user['last_name'] = (main_user.last_name or other_user.last_name or ) merged_user['email'] = (main_user.email or other_user.email or None) merged_user['groups'] = Group.objects.filter(user__in=[main_user, other_user]).distinct() merged_user['is_superuser'] = (main_user.is_superuser or other_user.is_superuser) merged_user['is_proxy_user'] = (main_user.is_proxy_user or other_user.is_proxy_user) merged_user['delegates'] = UserProfile.objects.filter(represented_users__in=[main_user, other_user]).distinct() merged_user['represented_users'] = UserProfile.objects.filter(delegates__in=[main_user, other_user]).distinct() merged_user['cc_users'] = UserProfile.objects.filter(ccing_users__in=[main_user, other_user]).distinct() merged_user['ccing_users'] = UserProfile.objects.filter(cc_users__in=[main_user, other_user]).distinct() errors = [] warnings = [] courses_main_user_is_responsible_for = main_user.get_sorted_courses_responsible_for() if any(((course in courses_main_user_is_responsible_for) for course in other_user.get_sorted_courses_responsible_for())): errors.append('courses_responsible_for') if any(((contribution.evaluation in [contribution.evaluation for contribution in main_user.get_sorted_contributions()]) for contribution in other_user.get_sorted_contributions())): errors.append('contributions') if any(((evaluation in main_user.get_sorted_evaluations_participating_in()) for evaluation in other_user.get_sorted_evaluations_participating_in())): errors.append('evaluations_participating_in') if any(((evaluation in main_user.get_sorted_evaluations_voted_for()) for evaluation in other_user.get_sorted_evaluations_voted_for())): errors.append('evaluations_voted_for') if (main_user.reward_point_grantings.all().exists() and other_user.reward_point_grantings.all().exists()): warnings.append('rewards') merged_user['courses_responsible_for'] = Course.objects.filter(responsibles__in=[main_user, other_user]).order_by('semester__created_at', 'name_de') merged_user['contributions'] = Contribution.objects.filter(contributor__in=[main_user, other_user]).order_by('evaluation__course__semester__created_at', 'evaluation__name_de') merged_user['evaluations_participating_in'] = Evaluation.objects.filter(participants__in=[main_user, other_user]).order_by('course__semester__created_at', 'name_de') merged_user['evaluations_voted_for'] = Evaluation.objects.filter(voters__in=[main_user, other_user]).order_by('course__semester__created_at', 'name_de') merged_user['reward_point_grantings'] = (main_user.reward_point_grantings.all() or other_user.reward_point_grantings.all()) merged_user['reward_point_redemptions'] = (main_user.reward_point_redemptions.all() or other_user.reward_point_redemptions.all()) if (preview or errors): return (merged_user, errors, warnings) for course in Course.objects.filter(responsibles__in=[other_user]): responsibles = list(course.responsibles.all()) responsibles.remove(other_user) responsibles.append(main_user) course.responsibles.set(responsibles) GradeDocument.objects.filter(last_modified_user=other_user).update(last_modified_user=main_user) other_user.email = other_user.save() for (key, value) in merged_user.items(): attr = getattr(main_user, key) if hasattr(attr, 'set'): attr.set(value) else: setattr(main_user, key, value) main_user.save() other_user.reward_point_grantings.all().delete() other_user.reward_point_redemptions.all().delete() LogEntry.objects.filter(user=other_user).update(user=main_user) evaluations = Evaluation.objects.filter(contributions__contributor=main_user, state__in=STATES_WITH_RESULTS_CACHING).distinct() for evaluation in evaluations: cache_results(evaluation) other_user.delete() return (merged_user, errors, warnings)
def timer_callback(self): ' Calculate Mx1, My1, ...... Mx6, My6 ' Mx1 = (self.x2 - self.x1) My1 = (self.y2 - self.y1) Mx2 = (((self.x1 - self.x2) + (self.x3 - self.x2)) / 2) My2 = (((self.y1 - self.y2) + (self.y3 - self.y2)) / 2) Mx3 = (((self.x2 - self.x3) + (self.x4 - self.x3)) / 2) My3 = (((self.y2 - self.y3) + (self.y4 - self.y3)) / 2) Mx4 = (((self.x3 - self.x4) + (self.x5 - self.x4)) / 2) My4 = (((self.y4 - self.y4) + (self.y5 - self.y4)) / 2) Mx5 = (((self.x4 - self.x5) + (self.x6 - self.x5)) / 2) My5 = (((self.y4 - self.y5) + (self.y6 - self.y5)) / 2) Mx6 = (self.x5 - self.x6) My6 = (self.y5 - self.y6) ' Use MLP to Predict control inputs ' relative_pose_1 = [Mx1, My1] relative_pose_2 = [Mx2, My2] relative_pose_3 = [Mx3, My3] relative_pose_4 = [Mx4, My4] relative_pose_5 = [Mx5, My5] relative_pose_6 = [Mx6, My6] u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) u2_predicted = MLP_Model.predict(relative_pose_2, loaded_model) u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) u4_predicted = MLP_Model.predict(relative_pose_4, loaded_model) u5_predicted = MLP_Model.predict(relative_pose_5, loaded_model) u6_predicted = MLP_Model.predict(relative_pose_6, loaded_model) u1_predicted_np = np.array([[u1_predicted[0][0]], [u1_predicted[0][1]]]) u2_predicted_np = np.array([[u2_predicted[0][0]], [u2_predicted[0][1]]]) u3_predicted_np = np.array([[u3_predicted[0][0]], [u3_predicted[0][1]]]) u4_predicted_np = np.array([[u4_predicted[0][0]], [u4_predicted[0][1]]]) u5_predicted_np = np.array([[u5_predicted[0][0]], [u5_predicted[0][1]]]) u6_predicted_np = np.array([[u6_predicted[0][0]], [u6_predicted[0][1]]]) ' Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 ' S1 = np.array([[self.v1], [self.w1]]) G1 = np.array([[1, 0], [0, (1 / L)]]) R1 = np.array([[math.cos(self.Theta1), math.sin(self.Theta1)], [(- math.sin(self.Theta1)), math.cos(self.Theta1)]]) S1 = np.dot(np.dot(G1, R1), u1_predicted_np) S2 = np.array([[self.v2], [self.w2]]) G2 = np.array([[1, 0], [0, (1 / L)]]) R2 = np.array([[math.cos(self.Theta2), math.sin(self.Theta2)], [(- math.sin(self.Theta2)), math.cos(self.Theta2)]]) S2 = np.dot(np.dot(G2, R2), u2_predicted_np) S3 = np.array([[self.v3], [self.w3]]) G3 = np.array([[1, 0], [0, (1 / L)]]) R3 = np.array([[math.cos(self.Theta3), math.sin(self.Theta3)], [(- math.sin(self.Theta3)), math.cos(self.Theta3)]]) S3 = np.dot(np.dot(G3, R3), u3_predicted_np) S4 = np.array([[self.v4], [self.w4]]) G4 = np.array([[1, 0], [0, (1 / L)]]) R4 = np.array([[math.cos(self.Theta4), math.sin(self.Theta4)], [(- math.sin(self.Theta4)), math.cos(self.Theta4)]]) S4 = np.dot(np.dot(G4, R4), u4_predicted_np) S5 = np.array([[self.v5], [self.w5]]) G5 = np.array([[1, 0], [0, (1 / L)]]) R5 = np.array([[math.cos(self.Theta5), math.sin(self.Theta5)], [(- math.sin(self.Theta5)), math.cos(self.Theta5)]]) S5 = np.dot(np.dot(G5, R5), u5_predicted_np) S6 = np.array([[self.v6], [self.w6]]) G6 = np.array([[1, 0], [0, (1 / L)]]) R6 = np.array([[math.cos(self.Theta6), math.sin(self.Theta6)], [(- math.sin(self.Theta6)), math.cos(self.Theta6)]]) S6 = np.dot(np.dot(G6, R6), u6_predicted_np) ' Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 ' D = np.array([[(1 / 2), (1 / 2)], [((- 1) / (2 * d)), (1 / (2 * d))]]) Di = np.linalg.inv(D) Speed_L1 = np.array([[self.vL1], [self.vR1]]) Speed_L2 = np.array([[self.vL2], [self.vR2]]) Speed_L3 = np.array([[self.vL3], [self.vR3]]) Speed_L4 = np.array([[self.vL4], [self.vR4]]) Speed_L5 = np.array([[self.vL5], [self.vR5]]) Speed_L6 = np.array([[self.vL6], [self.vR6]]) M1 = np.array([[S1[0]], [S1[1]]]).reshape(2, 1) M2 = np.array([[S2[0]], [S2[1]]]).reshape(2, 1) M3 = np.array([[S3[0]], [S3[1]]]).reshape(2, 1) M4 = np.array([[S4[0]], [S4[1]]]).reshape(2, 1) M5 = np.array([[S5[0]], [S5[1]]]).reshape(2, 1) M6 = np.array([[S6[0]], [S6[1]]]).reshape(2, 1) Speed_L1 = np.dot(Di, M1) Speed_L2 = np.dot(Di, M2) Speed_L3 = np.dot(Di, M3) Speed_L4 = np.dot(Di, M4) Speed_L5 = np.dot(Di, M5) Speed_L6 = np.dot(Di, M6) VL1 = float(Speed_L1[0]) VR1 = float(Speed_L1[1]) VL2 = float(Speed_L2[0]) VR2 = float(Speed_L2[1]) VL3 = float(Speed_L3[0]) VR3 = float(Speed_L3[1]) VL4 = float(Speed_L4[0]) VR4 = float(Speed_L4[1]) VL5 = float(Speed_L5[0]) VR5 = float(Speed_L5[1]) VL6 = float(Speed_L6[0]) VR6 = float(Speed_L6[1]) ' Publish Speed Commands to Robot 1 ' msgl1 = Float32() msgr1 = Float32() msgl1.data = VL1 msgr1.data = VR1 self.publisher_l1.publish(msgl1) self.publisher_r1.publish(msgr1) ' Publish Speed Commands to Robot 2 ' msgl2 = Float32() msgr2 = Float32() msgl2.data = VL2 msgr2.data = VR2 self.publisher_l2.publish(msgl2) self.publisher_r2.publish(msgr2) ' Publish Speed Commands to Robot 3 ' msgl3 = Float32() msgr3 = Float32() msgl3.data = VL3 msgr3.data = VR3 self.publisher_l3.publish(msgl3) self.publisher_r3.publish(msgr3) ' Publish Speed Commands to Robot 4 ' msgl4 = Float32() msgr4 = Float32() msgl4.data = VL4 msgr4.data = VR4 self.publisher_l4.publish(msgl4) self.publisher_r4.publish(msgr4) ' Publish Speed Commands to Robot 5 ' msgl5 = Float32() msgr5 = Float32() msgl5.data = VL5 msgr5.data = VR5 self.publisher_l5.publish(msgl5) self.publisher_r5.publish(msgr5) ' Publish Speed Commands to Robot 6 ' msgl6 = Float32() msgr6 = Float32() msgl6.data = VL6 msgr6.data = VR6 self.publisher_l6.publish(msgl6) self.publisher_r6.publish(msgr6) self.i += 1
6,601,636,101,486,824,000
Calculate Mx1, My1, ...... Mx6, My6
Real Topology Graph/GNN Model 1/Cyclic Graph/Main_MLP_line.py
timer_callback
HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots
python
def timer_callback(self): ' ' Mx1 = (self.x2 - self.x1) My1 = (self.y2 - self.y1) Mx2 = (((self.x1 - self.x2) + (self.x3 - self.x2)) / 2) My2 = (((self.y1 - self.y2) + (self.y3 - self.y2)) / 2) Mx3 = (((self.x2 - self.x3) + (self.x4 - self.x3)) / 2) My3 = (((self.y2 - self.y3) + (self.y4 - self.y3)) / 2) Mx4 = (((self.x3 - self.x4) + (self.x5 - self.x4)) / 2) My4 = (((self.y4 - self.y4) + (self.y5 - self.y4)) / 2) Mx5 = (((self.x4 - self.x5) + (self.x6 - self.x5)) / 2) My5 = (((self.y4 - self.y5) + (self.y6 - self.y5)) / 2) Mx6 = (self.x5 - self.x6) My6 = (self.y5 - self.y6) ' Use MLP to Predict control inputs ' relative_pose_1 = [Mx1, My1] relative_pose_2 = [Mx2, My2] relative_pose_3 = [Mx3, My3] relative_pose_4 = [Mx4, My4] relative_pose_5 = [Mx5, My5] relative_pose_6 = [Mx6, My6] u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) u2_predicted = MLP_Model.predict(relative_pose_2, loaded_model) u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) u4_predicted = MLP_Model.predict(relative_pose_4, loaded_model) u5_predicted = MLP_Model.predict(relative_pose_5, loaded_model) u6_predicted = MLP_Model.predict(relative_pose_6, loaded_model) u1_predicted_np = np.array([[u1_predicted[0][0]], [u1_predicted[0][1]]]) u2_predicted_np = np.array([[u2_predicted[0][0]], [u2_predicted[0][1]]]) u3_predicted_np = np.array([[u3_predicted[0][0]], [u3_predicted[0][1]]]) u4_predicted_np = np.array([[u4_predicted[0][0]], [u4_predicted[0][1]]]) u5_predicted_np = np.array([[u5_predicted[0][0]], [u5_predicted[0][1]]]) u6_predicted_np = np.array([[u6_predicted[0][0]], [u6_predicted[0][1]]]) ' Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 ' S1 = np.array([[self.v1], [self.w1]]) G1 = np.array([[1, 0], [0, (1 / L)]]) R1 = np.array([[math.cos(self.Theta1), math.sin(self.Theta1)], [(- math.sin(self.Theta1)), math.cos(self.Theta1)]]) S1 = np.dot(np.dot(G1, R1), u1_predicted_np) S2 = np.array([[self.v2], [self.w2]]) G2 = np.array([[1, 0], [0, (1 / L)]]) R2 = np.array([[math.cos(self.Theta2), math.sin(self.Theta2)], [(- math.sin(self.Theta2)), math.cos(self.Theta2)]]) S2 = np.dot(np.dot(G2, R2), u2_predicted_np) S3 = np.array([[self.v3], [self.w3]]) G3 = np.array([[1, 0], [0, (1 / L)]]) R3 = np.array([[math.cos(self.Theta3), math.sin(self.Theta3)], [(- math.sin(self.Theta3)), math.cos(self.Theta3)]]) S3 = np.dot(np.dot(G3, R3), u3_predicted_np) S4 = np.array([[self.v4], [self.w4]]) G4 = np.array([[1, 0], [0, (1 / L)]]) R4 = np.array([[math.cos(self.Theta4), math.sin(self.Theta4)], [(- math.sin(self.Theta4)), math.cos(self.Theta4)]]) S4 = np.dot(np.dot(G4, R4), u4_predicted_np) S5 = np.array([[self.v5], [self.w5]]) G5 = np.array([[1, 0], [0, (1 / L)]]) R5 = np.array([[math.cos(self.Theta5), math.sin(self.Theta5)], [(- math.sin(self.Theta5)), math.cos(self.Theta5)]]) S5 = np.dot(np.dot(G5, R5), u5_predicted_np) S6 = np.array([[self.v6], [self.w6]]) G6 = np.array([[1, 0], [0, (1 / L)]]) R6 = np.array([[math.cos(self.Theta6), math.sin(self.Theta6)], [(- math.sin(self.Theta6)), math.cos(self.Theta6)]]) S6 = np.dot(np.dot(G6, R6), u6_predicted_np) ' Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 ' D = np.array([[(1 / 2), (1 / 2)], [((- 1) / (2 * d)), (1 / (2 * d))]]) Di = np.linalg.inv(D) Speed_L1 = np.array([[self.vL1], [self.vR1]]) Speed_L2 = np.array([[self.vL2], [self.vR2]]) Speed_L3 = np.array([[self.vL3], [self.vR3]]) Speed_L4 = np.array([[self.vL4], [self.vR4]]) Speed_L5 = np.array([[self.vL5], [self.vR5]]) Speed_L6 = np.array([[self.vL6], [self.vR6]]) M1 = np.array([[S1[0]], [S1[1]]]).reshape(2, 1) M2 = np.array([[S2[0]], [S2[1]]]).reshape(2, 1) M3 = np.array([[S3[0]], [S3[1]]]).reshape(2, 1) M4 = np.array([[S4[0]], [S4[1]]]).reshape(2, 1) M5 = np.array([[S5[0]], [S5[1]]]).reshape(2, 1) M6 = np.array([[S6[0]], [S6[1]]]).reshape(2, 1) Speed_L1 = np.dot(Di, M1) Speed_L2 = np.dot(Di, M2) Speed_L3 = np.dot(Di, M3) Speed_L4 = np.dot(Di, M4) Speed_L5 = np.dot(Di, M5) Speed_L6 = np.dot(Di, M6) VL1 = float(Speed_L1[0]) VR1 = float(Speed_L1[1]) VL2 = float(Speed_L2[0]) VR2 = float(Speed_L2[1]) VL3 = float(Speed_L3[0]) VR3 = float(Speed_L3[1]) VL4 = float(Speed_L4[0]) VR4 = float(Speed_L4[1]) VL5 = float(Speed_L5[0]) VR5 = float(Speed_L5[1]) VL6 = float(Speed_L6[0]) VR6 = float(Speed_L6[1]) ' Publish Speed Commands to Robot 1 ' msgl1 = Float32() msgr1 = Float32() msgl1.data = VL1 msgr1.data = VR1 self.publisher_l1.publish(msgl1) self.publisher_r1.publish(msgr1) ' Publish Speed Commands to Robot 2 ' msgl2 = Float32() msgr2 = Float32() msgl2.data = VL2 msgr2.data = VR2 self.publisher_l2.publish(msgl2) self.publisher_r2.publish(msgr2) ' Publish Speed Commands to Robot 3 ' msgl3 = Float32() msgr3 = Float32() msgl3.data = VL3 msgr3.data = VR3 self.publisher_l3.publish(msgl3) self.publisher_r3.publish(msgr3) ' Publish Speed Commands to Robot 4 ' msgl4 = Float32() msgr4 = Float32() msgl4.data = VL4 msgr4.data = VR4 self.publisher_l4.publish(msgl4) self.publisher_r4.publish(msgr4) ' Publish Speed Commands to Robot 5 ' msgl5 = Float32() msgr5 = Float32() msgl5.data = VL5 msgr5.data = VR5 self.publisher_l5.publish(msgl5) self.publisher_r5.publish(msgr5) ' Publish Speed Commands to Robot 6 ' msgl6 = Float32() msgr6 = Float32() msgl6.data = VL6 msgr6.data = VR6 self.publisher_l6.publish(msgl6) self.publisher_r6.publish(msgr6) self.i += 1
def fOBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, minNvisits=750): 'Metrics for calculating fO.\n\n Parameters\n ----------\n colmap : dict or None, optional\n A dictionary with a mapping of column names. Default will use OpsimV4 column names.\n runName : str, optional\n The name of the simulated survey. Default is "opsim".\n nside : int, optional\n Nside for the healpix slicer. Default 64.\n extraSql : str or None, optional\n Additional sql constraint to apply to all metrics.\n extraMetadata : str or None, optional\n Additional metadata to apply to all results.\n\n Returns\n -------\n metricBundleDict\n ' if (colmap is None): colmap = ColMapDict('fbs') bundleList = [] sql = '' metadata = 'All visits' if ((extraSql is not None) and (len(extraSql) > 0)): sql = extraSql if (extraMetadata is None): metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if (extraMetadata is not None): metadata = extraMetadata subgroup = metadata (raCol, decCol, degrees, ditherStacker, ditherMeta) = radecCols(None, colmap, None) metadata = combineMetadata(metadata, ditherMeta) slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) displayDict = {'group': 'SRD FO metrics', 'subgroup': subgroup, 'order': 0} metric = metrics.CountExplimMetric(col=colmap['mjd'], metricName='fO', expCol=colmap['exptime']) plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500} summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=False, metricName=f'fOArea_{minNvisits}', Asky=benchmarkArea, Nvisit=minNvisits)] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ('foNv: out of %.2f sq degrees, the area receives at least X and a median of Y visits (out of %d, if compared to benchmark). ' % (benchmarkArea, benchmarkNvisits)) caption += ('fOArea: this many sq deg (out of %.2f sq deg if compared to benchmark) receives at least %d visits. ' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, stackerList=[ditherStacker], displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()], metadata=metadata) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
-1,700,880,196,380,505,000
Metrics for calculating fO. Parameters ---------- colmap : dict or None, optional A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, optional The name of the simulated survey. Default is "opsim". nside : int, optional Nside for the healpix slicer. Default 64. extraSql : str or None, optional Additional sql constraint to apply to all metrics. extraMetadata : str or None, optional Additional metadata to apply to all results. Returns ------- metricBundleDict
rubin_sim/maf/batches/srdBatch.py
fOBatch
RileyWClarke/Flarubin
python
def fOBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, minNvisits=750): 'Metrics for calculating fO.\n\n Parameters\n ----------\n colmap : dict or None, optional\n A dictionary with a mapping of column names. Default will use OpsimV4 column names.\n runName : str, optional\n The name of the simulated survey. Default is "opsim".\n nside : int, optional\n Nside for the healpix slicer. Default 64.\n extraSql : str or None, optional\n Additional sql constraint to apply to all metrics.\n extraMetadata : str or None, optional\n Additional metadata to apply to all results.\n\n Returns\n -------\n metricBundleDict\n ' if (colmap is None): colmap = ColMapDict('fbs') bundleList = [] sql = metadata = 'All visits' if ((extraSql is not None) and (len(extraSql) > 0)): sql = extraSql if (extraMetadata is None): metadata = extraSql.replace('filter =', ).replace('filter=', ) metadata = metadata.replace('"', ).replace("'", ) if (extraMetadata is not None): metadata = extraMetadata subgroup = metadata (raCol, decCol, degrees, ditherStacker, ditherMeta) = radecCols(None, colmap, None) metadata = combineMetadata(metadata, ditherMeta) slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) displayDict = {'group': 'SRD FO metrics', 'subgroup': subgroup, 'order': 0} metric = metrics.CountExplimMetric(col=colmap['mjd'], metricName='fO', expCol=colmap['exptime']) plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500} summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=False, metricName=f'fOArea_{minNvisits}', Asky=benchmarkArea, Nvisit=minNvisits)] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ('foNv: out of %.2f sq degrees, the area receives at least X and a median of Y visits (out of %d, if compared to benchmark). ' % (benchmarkArea, benchmarkNvisits)) caption += ('fOArea: this many sq deg (out of %.2f sq deg if compared to benchmark) receives at least %d visits. ' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, stackerList=[ditherStacker], displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()], metadata=metadata) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def astrometryBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, ditherStacker=None, ditherkwargs=None): 'Metrics for evaluating proper motion and parallax.\n\n Parameters\n ----------\n colmap : dict or None, optional\n A dictionary with a mapping of column names. Default will use OpsimV4 column names.\n runName : str, optional\n The name of the simulated survey. Default is "opsim".\n nside : int, optional\n Nside for the healpix slicer. Default 64.\n extraSql : str or None, optional\n Additional sql constraint to apply to all metrics.\n extraMetadata : str or None, optional\n Additional metadata to apply to all results.\n ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker\n Optional dither stacker to use to define ra/dec columns.\n ditherkwargs: dict, optional\n Optional dictionary of kwargs for the dither stacker.\n\n Returns\n -------\n metricBundleDict\n ' if (colmap is None): colmap = ColMapDict('fbs') bundleList = [] sql = '' metadata = 'All visits' if ((extraSql is not None) and (len(extraSql) > 0)): sql = extraSql if (extraMetadata is None): metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if (extraMetadata is not None): metadata = extraMetadata subgroup = metadata (raCol, decCol, degrees, ditherStacker, ditherMeta) = radecCols(ditherStacker, colmap, ditherkwargs) metadata = combineMetadata(metadata, ditherMeta) rmags_para = [22.4, 24.0] rmags_pm = [20.5, 24.0] parallaxStacker = stackers.ParallaxFactorStacker(raCol=raCol, decCol=decCol, dateCol=colmap['mjd'], degrees=degrees) dcrStacker = stackers.DcrStacker(filterCol=colmap['filter'], altCol=colmap['alt'], degrees=degrees, raCol=raCol, decCol=decCol, lstCol=colmap['lst'], site='LSST', mjdCol=colmap['mjd']) slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': 'SRD Parallax', 'subgroup': subgroup, 'order': 0, 'caption': None} plotmaxVals = (2.0, 15.0) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Parallax Error (18k)')] summary.append(metrics.PercentileMetric(percentile=95, metricName='95th Percentile Parallax Error')) summary.extend(standardSummary()) for (rmag, plotmax) in zip(rmags_para, plotmaxVals): plotDict = {'xMin': 0, 'xMax': plotmax, 'colorMin': 0, 'colorMax': plotmax} metric = metrics.ParallaxMetric(metricName=('Parallax Error @ %.1f' % rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_para: metric = metrics.ParallaxMetric(metricName=('Normalized Parallax @ %.1f' % rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_para: metric = metrics.ParallaxCoverageMetric(metricName=('Parallax Coverage @ %.1f' % rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_para: metric = metrics.ParallaxDcrDegenMetric(metricName=('Parallax-DCR degeneracy @ %.1f' % rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = ('Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % rmag) caption += ' (0 is good, near -1 or 1 is bad).' bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[dcrStacker, parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 displayDict = {'group': 'SRD Proper Motion', 'subgroup': subgroup, 'order': 0, 'caption': None} plotmaxVals = (1.0, 5.0) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Proper Motion Error (18k)')] summary.append(metrics.PercentileMetric(metricName='95th Percentile Proper Motion Error')) summary.extend(standardSummary()) for (rmag, plotmax) in zip(rmags_pm, plotmaxVals): plotDict = {'xMin': 0, 'xMax': plotmax, 'colorMin': 0, 'colorMax': plotmax} metric = metrics.ProperMotionMetric(metricName=('Proper Motion Error @ %.1f' % rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[ditherStacker], displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_pm: metric = metrics.ProperMotionMetric(metricName=('Normalized Proper Motion @ %.1f' % rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
879,015,945,918,942,300
Metrics for evaluating proper motion and parallax. Parameters ---------- colmap : dict or None, optional A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, optional The name of the simulated survey. Default is "opsim". nside : int, optional Nside for the healpix slicer. Default 64. extraSql : str or None, optional Additional sql constraint to apply to all metrics. extraMetadata : str or None, optional Additional metadata to apply to all results. ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker Optional dither stacker to use to define ra/dec columns. ditherkwargs: dict, optional Optional dictionary of kwargs for the dither stacker. Returns ------- metricBundleDict
rubin_sim/maf/batches/srdBatch.py
astrometryBatch
RileyWClarke/Flarubin
python
def astrometryBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, ditherStacker=None, ditherkwargs=None): 'Metrics for evaluating proper motion and parallax.\n\n Parameters\n ----------\n colmap : dict or None, optional\n A dictionary with a mapping of column names. Default will use OpsimV4 column names.\n runName : str, optional\n The name of the simulated survey. Default is "opsim".\n nside : int, optional\n Nside for the healpix slicer. Default 64.\n extraSql : str or None, optional\n Additional sql constraint to apply to all metrics.\n extraMetadata : str or None, optional\n Additional metadata to apply to all results.\n ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker\n Optional dither stacker to use to define ra/dec columns.\n ditherkwargs: dict, optional\n Optional dictionary of kwargs for the dither stacker.\n\n Returns\n -------\n metricBundleDict\n ' if (colmap is None): colmap = ColMapDict('fbs') bundleList = [] sql = metadata = 'All visits' if ((extraSql is not None) and (len(extraSql) > 0)): sql = extraSql if (extraMetadata is None): metadata = extraSql.replace('filter =', ).replace('filter=', ) metadata = metadata.replace('"', ).replace("'", ) if (extraMetadata is not None): metadata = extraMetadata subgroup = metadata (raCol, decCol, degrees, ditherStacker, ditherMeta) = radecCols(ditherStacker, colmap, ditherkwargs) metadata = combineMetadata(metadata, ditherMeta) rmags_para = [22.4, 24.0] rmags_pm = [20.5, 24.0] parallaxStacker = stackers.ParallaxFactorStacker(raCol=raCol, decCol=decCol, dateCol=colmap['mjd'], degrees=degrees) dcrStacker = stackers.DcrStacker(filterCol=colmap['filter'], altCol=colmap['alt'], degrees=degrees, raCol=raCol, decCol=decCol, lstCol=colmap['lst'], site='LSST', mjdCol=colmap['mjd']) slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': 'SRD Parallax', 'subgroup': subgroup, 'order': 0, 'caption': None} plotmaxVals = (2.0, 15.0) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Parallax Error (18k)')] summary.append(metrics.PercentileMetric(percentile=95, metricName='95th Percentile Parallax Error')) summary.extend(standardSummary()) for (rmag, plotmax) in zip(rmags_para, plotmaxVals): plotDict = {'xMin': 0, 'xMax': plotmax, 'colorMin': 0, 'colorMax': plotmax} metric = metrics.ParallaxMetric(metricName=('Parallax Error @ %.1f' % rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_para: metric = metrics.ParallaxMetric(metricName=('Normalized Parallax @ %.1f' % rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_para: metric = metrics.ParallaxCoverageMetric(metricName=('Parallax Coverage @ %.1f' % rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_para: metric = metrics.ParallaxDcrDegenMetric(metricName=('Parallax-DCR degeneracy @ %.1f' % rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = ('Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % rmag) caption += ' (0 is good, near -1 or 1 is bad).' bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[dcrStacker, parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 displayDict = {'group': 'SRD Proper Motion', 'subgroup': subgroup, 'order': 0, 'caption': None} plotmaxVals = (1.0, 5.0) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Proper Motion Error (18k)')] summary.append(metrics.PercentileMetric(metricName='95th Percentile Proper Motion Error')) summary.extend(standardSummary()) for (rmag, plotmax) in zip(rmags_pm, plotmaxVals): plotDict = {'xMin': 0, 'xMax': plotmax, 'colorMin': 0, 'colorMax': plotmax} metric = metrics.ProperMotionMetric(metricName=('Proper Motion Error @ %.1f' % rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[ditherStacker], displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for rmag in rmags_pm: metric = metrics.ProperMotionMetric(metricName=('Normalized Proper Motion @ %.1f' % rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def rapidRevisitBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, ditherStacker=None, ditherkwargs=None): 'Metrics for evaluating proper motion and parallax.\n\n Parameters\n ----------\n colmap : dict or None, optional\n A dictionary with a mapping of column names. Default will use OpsimV4 column names.\n runName : str, optional\n The name of the simulated survey. Default is "opsim".\n nside : int, optional\n Nside for the healpix slicer. Default 64.\n extraSql : str or None, optional\n Additional sql constraint to apply to all metrics.\n extraMetadata : str or None, optional\n Additional metadata to apply to all results.\n ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker\n Optional dither stacker to use to define ra/dec columns.\n ditherkwargs: dict, optional\n Optional dictionary of kwargs for the dither stacker.\n\n Returns\n -------\n metricBundleDict\n ' if (colmap is None): colmap = ColMapDict('fbs') bundleList = [] sql = '' metadata = 'All visits' if ((extraSql is not None) and (len(extraSql) > 0)): sql = extraSql if (extraMetadata is None): metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if (extraMetadata is not None): metadata = extraMetadata subgroup = metadata (raCol, decCol, degrees, ditherStacker, ditherMeta) = radecCols(ditherStacker, colmap, ditherkwargs) metadata = combineMetadata(metadata, ditherMeta) slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': 'SRD Rapid Revisits', 'subgroup': subgroup, 'order': 0, 'caption': None} dTmax = 30 m2 = metrics.NRevisitsMetric(dT=dTmax, mjdCol=colmap['mjd'], normed=False, metricName='NumberOfQuickRevisits') plotDict = {'colorMin': 400, 'colorMax': 2000, 'xMin': 400, 'xMax': 2000} caption = ('Number of consecutive visits with return times faster than %.1f minutes, ' % dTmax) caption += 'in any filter, all proposals. ' displayDict['caption'] = caption bundle = mb.MetricBundle(m2, slicer, sql, plotDict=plotDict, plotFuncs=subsetPlots, stackerList=[ditherStacker], metadata=metadata, displayDict=displayDict, summaryMetrics=standardSummary(withCount=False)) bundleList.append(bundle) displayDict['order'] += 1 dTmin = (40.0 / 60.0) dTpairs = 20.0 dTmax = 30.0 nOne = 82 nTwo = 28 pixArea = float(hp.nside2pixarea(nside, degrees=True)) scale = (pixArea * hp.nside2npix(nside)) m1 = metrics.RapidRevisitMetric(metricName='RapidRevisits', mjdCol=colmap['mjd'], dTmin=(((dTmin / 60.0) / 60.0) / 24.0), dTpairs=((dTpairs / 60.0) / 24.0), dTmax=((dTmax / 60.0) / 24.0), minN1=nOne, minN2=nTwo) plotDict = {'xMin': 0, 'xMax': 1, 'colorMin': 0, 'colorMax': 1, 'logScale': False} cutoff1 = 0.9 summaryStats = [metrics.FracAboveMetric(cutoff=cutoff1, scale=scale, metricName='Area (sq deg)')] caption = ('Rapid Revisit: area that receives at least %d visits between %.3f and %.1f minutes, ' % (nOne, dTmin, dTmax)) caption += ('with at least %d of those visits falling between %.3f and %.1f minutes. ' % (nTwo, dTmin, dTpairs)) caption += 'Summary statistic "Area" indicates the area on the sky which meets this requirement. (SRD design specification is 2000 sq deg).' displayDict['caption'] = caption bundle = mb.MetricBundle(m1, slicer, sql, plotDict=plotDict, plotFuncs=subsetPlots, stackerList=[ditherStacker], metadata=metadata, displayDict=displayDict, summaryMetrics=summaryStats) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
2,913,542,167,369,276,400
Metrics for evaluating proper motion and parallax. Parameters ---------- colmap : dict or None, optional A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, optional The name of the simulated survey. Default is "opsim". nside : int, optional Nside for the healpix slicer. Default 64. extraSql : str or None, optional Additional sql constraint to apply to all metrics. extraMetadata : str or None, optional Additional metadata to apply to all results. ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker Optional dither stacker to use to define ra/dec columns. ditherkwargs: dict, optional Optional dictionary of kwargs for the dither stacker. Returns ------- metricBundleDict
rubin_sim/maf/batches/srdBatch.py
rapidRevisitBatch
RileyWClarke/Flarubin
python
def rapidRevisitBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, ditherStacker=None, ditherkwargs=None): 'Metrics for evaluating proper motion and parallax.\n\n Parameters\n ----------\n colmap : dict or None, optional\n A dictionary with a mapping of column names. Default will use OpsimV4 column names.\n runName : str, optional\n The name of the simulated survey. Default is "opsim".\n nside : int, optional\n Nside for the healpix slicer. Default 64.\n extraSql : str or None, optional\n Additional sql constraint to apply to all metrics.\n extraMetadata : str or None, optional\n Additional metadata to apply to all results.\n ditherStacker: str or rubin_sim.maf.stackers.BaseDitherStacker\n Optional dither stacker to use to define ra/dec columns.\n ditherkwargs: dict, optional\n Optional dictionary of kwargs for the dither stacker.\n\n Returns\n -------\n metricBundleDict\n ' if (colmap is None): colmap = ColMapDict('fbs') bundleList = [] sql = metadata = 'All visits' if ((extraSql is not None) and (len(extraSql) > 0)): sql = extraSql if (extraMetadata is None): metadata = extraSql.replace('filter =', ).replace('filter=', ) metadata = metadata.replace('"', ).replace("'", ) if (extraMetadata is not None): metadata = extraMetadata subgroup = metadata (raCol, decCol, degrees, ditherStacker, ditherMeta) = radecCols(ditherStacker, colmap, ditherkwargs) metadata = combineMetadata(metadata, ditherMeta) slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': 'SRD Rapid Revisits', 'subgroup': subgroup, 'order': 0, 'caption': None} dTmax = 30 m2 = metrics.NRevisitsMetric(dT=dTmax, mjdCol=colmap['mjd'], normed=False, metricName='NumberOfQuickRevisits') plotDict = {'colorMin': 400, 'colorMax': 2000, 'xMin': 400, 'xMax': 2000} caption = ('Number of consecutive visits with return times faster than %.1f minutes, ' % dTmax) caption += 'in any filter, all proposals. ' displayDict['caption'] = caption bundle = mb.MetricBundle(m2, slicer, sql, plotDict=plotDict, plotFuncs=subsetPlots, stackerList=[ditherStacker], metadata=metadata, displayDict=displayDict, summaryMetrics=standardSummary(withCount=False)) bundleList.append(bundle) displayDict['order'] += 1 dTmin = (40.0 / 60.0) dTpairs = 20.0 dTmax = 30.0 nOne = 82 nTwo = 28 pixArea = float(hp.nside2pixarea(nside, degrees=True)) scale = (pixArea * hp.nside2npix(nside)) m1 = metrics.RapidRevisitMetric(metricName='RapidRevisits', mjdCol=colmap['mjd'], dTmin=(((dTmin / 60.0) / 60.0) / 24.0), dTpairs=((dTpairs / 60.0) / 24.0), dTmax=((dTmax / 60.0) / 24.0), minN1=nOne, minN2=nTwo) plotDict = {'xMin': 0, 'xMax': 1, 'colorMin': 0, 'colorMax': 1, 'logScale': False} cutoff1 = 0.9 summaryStats = [metrics.FracAboveMetric(cutoff=cutoff1, scale=scale, metricName='Area (sq deg)')] caption = ('Rapid Revisit: area that receives at least %d visits between %.3f and %.1f minutes, ' % (nOne, dTmin, dTmax)) caption += ('with at least %d of those visits falling between %.3f and %.1f minutes. ' % (nTwo, dTmin, dTpairs)) caption += 'Summary statistic "Area" indicates the area on the sky which meets this requirement. (SRD design specification is 2000 sq deg).' displayDict['caption'] = caption bundle = mb.MetricBundle(m1, slicer, sql, plotDict=plotDict, plotFuncs=subsetPlots, stackerList=[ditherStacker], metadata=metadata, displayDict=displayDict, summaryMetrics=summaryStats) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
1,702,168,743,392,494,600
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
intersight/model/compute_vmedia_relationship.py
additional_properties_type
CiscoDevNet/intersight-python
python
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' lazy_import() return {'class_id': (str,), 'moid': (str,), 'selector': (str,), 'link': (str,), 'account_moid': (str,), 'create_time': (datetime,), 'domain_group_moid': (str,), 'mod_time': (datetime,), 'owners': ([str], none_type), 'shared_scope': (str,), 'tags': ([MoTag], none_type), 'version_context': (MoVersionContext,), 'ancestors': ([MoBaseMoRelationship], none_type), 'parent': (MoBaseMoRelationship,), 'permission_resources': ([MoBaseMoRelationship], none_type), 'display_names': (DisplayNames,), 'device_mo_id': (str,), 'dn': (str,), 'rn': (str,), 'enabled': (bool,), 'encryption': (bool,), 'low_power_usb': (bool,), 'compute_physical_unit': (ComputePhysicalRelationship,), 'inventory_device_info': (InventoryDeviceInfoRelationship,), 'mappings': ([ComputeMappingRelationship], none_type), 'registered_device': (AssetDeviceRegistrationRelationship,), 'object_type': (str,)}
6,070,496,696,298,718,000
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
intersight/model/compute_vmedia_relationship.py
openapi_types
CiscoDevNet/intersight-python
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' lazy_import() return {'class_id': (str,), 'moid': (str,), 'selector': (str,), 'link': (str,), 'account_moid': (str,), 'create_time': (datetime,), 'domain_group_moid': (str,), 'mod_time': (datetime,), 'owners': ([str], none_type), 'shared_scope': (str,), 'tags': ([MoTag], none_type), 'version_context': (MoVersionContext,), 'ancestors': ([MoBaseMoRelationship], none_type), 'parent': (MoBaseMoRelationship,), 'permission_resources': ([MoBaseMoRelationship], none_type), 'display_names': (DisplayNames,), 'device_mo_id': (str,), 'dn': (str,), 'rn': (str,), 'enabled': (bool,), 'encryption': (bool,), 'low_power_usb': (bool,), 'compute_physical_unit': (ComputePhysicalRelationship,), 'inventory_device_info': (InventoryDeviceInfoRelationship,), 'mappings': ([ComputeMappingRelationship], none_type), 'registered_device': (AssetDeviceRegistrationRelationship,), 'object_type': (str,)}
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'ComputeVmediaRelationship - a model defined in OpenAPI\n\n Args:\n\n Keyword Args:\n class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "mo.MoRef", must be one of ["mo.MoRef", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n moid (str): The Moid of the referenced REST resource.. [optional] # noqa: E501\n selector (str): An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of \'moid\' by clients. 1. If \'moid\' is set this field is ignored. 1. If \'selector\' is set and \'moid\' is empty/absent from the request, Intersight determines the Moid of the resource matching the filter expression and populates it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq \'3AA8B7T11\'.. [optional] # noqa: E501\n link (str): A URL to an instance of the \'mo.MoRef\' class.. [optional] # noqa: E501\n account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501\n create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501\n domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501\n mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501\n owners ([str], none_type): [optional] # noqa: E501\n shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a \'shared\' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501\n tags ([MoTag], none_type): [optional] # noqa: E501\n version_context (MoVersionContext): [optional] # noqa: E501\n ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501\n parent (MoBaseMoRelationship): [optional] # noqa: E501\n permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501\n display_names (DisplayNames): [optional] # noqa: E501\n device_mo_id (str): The database identifier of the registered device of an object.. [optional] # noqa: E501\n dn (str): The Distinguished Name unambiguously identifies an object in the system.. [optional] # noqa: E501\n rn (str): The Relative Name uniquely identifies an object within a given context.. [optional] # noqa: E501\n enabled (bool): State of the Virtual Media service on the server.. [optional] if omitted the server will use the default value of True # noqa: E501\n encryption (bool): If enabled, allows encryption of all Virtual Media communications.. [optional] # noqa: E501\n low_power_usb (bool): If enabled, the virtual drives appear on the boot selection menu after mapping the image and rebooting the host.. [optional] if omitted the server will use the default value of True # noqa: E501\n compute_physical_unit (ComputePhysicalRelationship): [optional] # noqa: E501\n inventory_device_info (InventoryDeviceInfoRelationship): [optional] # noqa: E501\n mappings ([ComputeMappingRelationship], none_type): An array of relationships to computeMapping resources.. [optional] # noqa: E501\n registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501\n object_type (str): The fully-qualified name of the remote type referred by this relationship.. [optional] # noqa: E501\n ' class_id = kwargs.get('class_id', 'mo.MoRef') _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes} required_args = {'class_id': class_id} model_args = {} model_args.update(required_args) model_args.update(kwargs) composed_info = validate_get_composed_info(constant_args, model_args, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] unused_args = composed_info[3] for (var_name, var_value) in required_args.items(): setattr(self, var_name, var_value) for (var_name, var_value) in kwargs.items(): if ((var_name in unused_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (not self._additional_properties_model_instances)): continue setattr(self, var_name, var_value)
-6,755,999,235,934,246,000
ComputeVmediaRelationship - a model defined in OpenAPI Args: Keyword Args: class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "mo.MoRef", must be one of ["mo.MoRef", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) moid (str): The Moid of the referenced REST resource.. [optional] # noqa: E501 selector (str): An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. 1. If 'moid' is set this field is ignored. 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the resource matching the filter expression and populates it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.. [optional] # noqa: E501 link (str): A URL to an instance of the 'mo.MoRef' class.. [optional] # noqa: E501 account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501 create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501 domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501 mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501 owners ([str], none_type): [optional] # noqa: E501 shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501 tags ([MoTag], none_type): [optional] # noqa: E501 version_context (MoVersionContext): [optional] # noqa: E501 ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501 parent (MoBaseMoRelationship): [optional] # noqa: E501 permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501 display_names (DisplayNames): [optional] # noqa: E501 device_mo_id (str): The database identifier of the registered device of an object.. [optional] # noqa: E501 dn (str): The Distinguished Name unambiguously identifies an object in the system.. [optional] # noqa: E501 rn (str): The Relative Name uniquely identifies an object within a given context.. [optional] # noqa: E501 enabled (bool): State of the Virtual Media service on the server.. [optional] if omitted the server will use the default value of True # noqa: E501 encryption (bool): If enabled, allows encryption of all Virtual Media communications.. [optional] # noqa: E501 low_power_usb (bool): If enabled, the virtual drives appear on the boot selection menu after mapping the image and rebooting the host.. [optional] if omitted the server will use the default value of True # noqa: E501 compute_physical_unit (ComputePhysicalRelationship): [optional] # noqa: E501 inventory_device_info (InventoryDeviceInfoRelationship): [optional] # noqa: E501 mappings ([ComputeMappingRelationship], none_type): An array of relationships to computeMapping resources.. [optional] # noqa: E501 registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501 object_type (str): The fully-qualified name of the remote type referred by this relationship.. [optional] # noqa: E501
intersight/model/compute_vmedia_relationship.py
__init__
CiscoDevNet/intersight-python
python
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'ComputeVmediaRelationship - a model defined in OpenAPI\n\n Args:\n\n Keyword Args:\n class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "mo.MoRef", must be one of ["mo.MoRef", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n moid (str): The Moid of the referenced REST resource.. [optional] # noqa: E501\n selector (str): An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of \'moid\' by clients. 1. If \'moid\' is set this field is ignored. 1. If \'selector\' is set and \'moid\' is empty/absent from the request, Intersight determines the Moid of the resource matching the filter expression and populates it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq \'3AA8B7T11\'.. [optional] # noqa: E501\n link (str): A URL to an instance of the \'mo.MoRef\' class.. [optional] # noqa: E501\n account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501\n create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501\n domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501\n mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501\n owners ([str], none_type): [optional] # noqa: E501\n shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a \'shared\' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501\n tags ([MoTag], none_type): [optional] # noqa: E501\n version_context (MoVersionContext): [optional] # noqa: E501\n ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501\n parent (MoBaseMoRelationship): [optional] # noqa: E501\n permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501\n display_names (DisplayNames): [optional] # noqa: E501\n device_mo_id (str): The database identifier of the registered device of an object.. [optional] # noqa: E501\n dn (str): The Distinguished Name unambiguously identifies an object in the system.. [optional] # noqa: E501\n rn (str): The Relative Name uniquely identifies an object within a given context.. [optional] # noqa: E501\n enabled (bool): State of the Virtual Media service on the server.. [optional] if omitted the server will use the default value of True # noqa: E501\n encryption (bool): If enabled, allows encryption of all Virtual Media communications.. [optional] # noqa: E501\n low_power_usb (bool): If enabled, the virtual drives appear on the boot selection menu after mapping the image and rebooting the host.. [optional] if omitted the server will use the default value of True # noqa: E501\n compute_physical_unit (ComputePhysicalRelationship): [optional] # noqa: E501\n inventory_device_info (InventoryDeviceInfoRelationship): [optional] # noqa: E501\n mappings ([ComputeMappingRelationship], none_type): An array of relationships to computeMapping resources.. [optional] # noqa: E501\n registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501\n object_type (str): The fully-qualified name of the remote type referred by this relationship.. [optional] # noqa: E501\n ' class_id = kwargs.get('class_id', 'mo.MoRef') _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes} required_args = {'class_id': class_id} model_args = {} model_args.update(required_args) model_args.update(kwargs) composed_info = validate_get_composed_info(constant_args, model_args, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] unused_args = composed_info[3] for (var_name, var_value) in required_args.items(): setattr(self, var_name, var_value) for (var_name, var_value) in kwargs.items(): if ((var_name in unused_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (not self._additional_properties_model_instances)): continue setattr(self, var_name, var_value)
def conv3x3(in_planes, out_planes, stride=1): '3x3 convolution with padding' return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
-798,009,169,856,366,800
3x3 convolution with padding
ever/module/_hrnet.py
conv3x3
Bobholamovic/ever
python
def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def read_go_deps(main_packages, build_tags): '\n read_go_deps returns a list of module dependencies in JSON format.\n Main modules are excluded; only dependencies are returned.\n\n Unlike `go list -m all`, this function excludes modules that are only\n required for running tests.\n ' go_list_args = ['go', 'list', '-deps', '-json'] if build_tags: go_list_args.extend(['-tags', build_tags]) output = subprocess.check_output((go_list_args + main_packages)).decode('utf-8') modules = {} decoder = json.JSONDecoder() while True: output = output.strip() if (not output): break (pkg, end) = decoder.raw_decode(output) output = output[end:] if ('Standard' in pkg): continue module = pkg['Module'] if ('Main' not in module): modules[module['Path']] = module return sorted(modules.values(), key=(lambda module: module['Path']))
-277,839,500,052,257,000
read_go_deps returns a list of module dependencies in JSON format. Main modules are excluded; only dependencies are returned. Unlike `go list -m all`, this function excludes modules that are only required for running tests.
script/generate_notice.py
read_go_deps
cyrille-leclerc/apm-server
python
def read_go_deps(main_packages, build_tags): '\n read_go_deps returns a list of module dependencies in JSON format.\n Main modules are excluded; only dependencies are returned.\n\n Unlike `go list -m all`, this function excludes modules that are only\n required for running tests.\n ' go_list_args = ['go', 'list', '-deps', '-json'] if build_tags: go_list_args.extend(['-tags', build_tags]) output = subprocess.check_output((go_list_args + main_packages)).decode('utf-8') modules = {} decoder = json.JSONDecoder() while True: output = output.strip() if (not output): break (pkg, end) = decoder.raw_decode(output) output = output[end:] if ('Standard' in pkg): continue module = pkg['Module'] if ('Main' not in module): modules[module['Path']] = module return sorted(modules.values(), key=(lambda module: module['Path']))
def get_service(hass, config, discovery_info=None): 'Get the Facebook notification service.' return FacebookNotificationService(config[CONF_PAGE_ACCESS_TOKEN])
-8,924,997,349,872,512,000
Get the Facebook notification service.
homeassistant/components/notify/facebook.py
get_service
Anthonymcqueen21/home-assistant
python
def get_service(hass, config, discovery_info=None): return FacebookNotificationService(config[CONF_PAGE_ACCESS_TOKEN])
def __init__(self, access_token): 'Initialize the service.' self.page_access_token = access_token
5,738,748,362,617,202,000
Initialize the service.
homeassistant/components/notify/facebook.py
__init__
Anthonymcqueen21/home-assistant
python
def __init__(self, access_token): self.page_access_token = access_token
def send_message(self, message='', **kwargs): 'Send some message.' payload = {'access_token': self.page_access_token} targets = kwargs.get(ATTR_TARGET) data = kwargs.get(ATTR_DATA) body_message = {'text': message} if (data is not None): body_message.update(data) if ('attachment' in body_message): body_message.pop('text') if (not targets): _LOGGER.error('At least 1 target is required') return for target in targets: if target.startswith('+'): recipient = {'phone_number': target} else: recipient = {'id': target} body = {'recipient': recipient, 'message': body_message} import json resp = requests.post(BASE_URL, data=json.dumps(body), params=payload, headers={CONTENT_TYPE: CONTENT_TYPE_JSON}, timeout=10) if (resp.status_code != 200): obj = resp.json() error_message = obj['error']['message'] error_code = obj['error']['code'] _LOGGER.error('Error %s : %s (Code %s)', resp.status_code, error_message, error_code)
1,549,348,951,645,210,600
Send some message.
homeassistant/components/notify/facebook.py
send_message
Anthonymcqueen21/home-assistant
python
def send_message(self, message=, **kwargs): payload = {'access_token': self.page_access_token} targets = kwargs.get(ATTR_TARGET) data = kwargs.get(ATTR_DATA) body_message = {'text': message} if (data is not None): body_message.update(data) if ('attachment' in body_message): body_message.pop('text') if (not targets): _LOGGER.error('At least 1 target is required') return for target in targets: if target.startswith('+'): recipient = {'phone_number': target} else: recipient = {'id': target} body = {'recipient': recipient, 'message': body_message} import json resp = requests.post(BASE_URL, data=json.dumps(body), params=payload, headers={CONTENT_TYPE: CONTENT_TYPE_JSON}, timeout=10) if (resp.status_code != 200): obj = resp.json() error_message = obj['error']['message'] error_code = obj['error']['code'] _LOGGER.error('Error %s : %s (Code %s)', resp.status_code, error_message, error_code)
def test_ppv(): 'Verifies correctness of the PPV calculation' nose.tools.eq_(ppv([1], [1]), 1.0) nose.tools.eq_(ppv([1, 1], [1, 0]), 1.0) nose.tools.eq_(ppv([1, 0, 0, 1], [1, 1, 1, 1]), 0.5) nose.tools.eq_(ppv([1, 0, 0, 1], [0, 1, 1, 0]), 0.0) nose.tools.eq_(ppv([1, 0, 0, 1], [1, 1, 0, 1]), (2.0 / 3)) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]), 1.0) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1]), 0.8) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1]), 0.6) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 1, 0, 1]), 0.4) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1]), 0.2) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]), 0.0) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0, 1], [1, 0]))) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0, 2], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0, 1], [1, 0, 2])))
-8,664,074,173,561,047,000
Verifies correctness of the PPV calculation
tests/test_metrics.py
test_ppv
MGHComputationalPathology/CalicoML
python
def test_ppv(): nose.tools.eq_(ppv([1], [1]), 1.0) nose.tools.eq_(ppv([1, 1], [1, 0]), 1.0) nose.tools.eq_(ppv([1, 0, 0, 1], [1, 1, 1, 1]), 0.5) nose.tools.eq_(ppv([1, 0, 0, 1], [0, 1, 1, 0]), 0.0) nose.tools.eq_(ppv([1, 0, 0, 1], [1, 1, 0, 1]), (2.0 / 3)) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]), 1.0) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1]), 0.8) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1]), 0.6) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 1, 0, 1]), 0.4) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1]), 0.2) nose.tools.eq_(ppv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]), 0.0) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0, 1], [1, 0]))) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0, 2], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : ppv([1, 0, 1], [1, 0, 2])))
def test_npv(): 'Verifies correctness of the NPV calculation' nose.tools.eq_(npv([0], [0]), 1.0) nose.tools.eq_(npv([0, 0], [0, 1]), 1.0) nose.tools.eq_(npv([0, 1], [0, 0]), 0.5) nose.tools.eq_(npv([1, 0, 0, 1], [0, 0, 0, 0]), 0.5) nose.tools.eq_(npv([1, 0, 0, 1], [0, 1, 1, 0]), 0.0) nose.tools.eq_(npv([0, 1, 1, 0], [0, 0, 1, 0]), (2.0 / 3)) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]), 1.0) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1]), 0.8) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1]), 0.6) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 1, 0, 1]), 0.4) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1]), 0.2) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]), 0.0) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0, 1], [1, 0]))) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0, 2], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0, 1], [1, 0, 2])))
1,187,938,013,456,644,900
Verifies correctness of the NPV calculation
tests/test_metrics.py
test_npv
MGHComputationalPathology/CalicoML
python
def test_npv(): nose.tools.eq_(npv([0], [0]), 1.0) nose.tools.eq_(npv([0, 0], [0, 1]), 1.0) nose.tools.eq_(npv([0, 1], [0, 0]), 0.5) nose.tools.eq_(npv([1, 0, 0, 1], [0, 0, 0, 0]), 0.5) nose.tools.eq_(npv([1, 0, 0, 1], [0, 1, 1, 0]), 0.0) nose.tools.eq_(npv([0, 1, 1, 0], [0, 0, 1, 0]), (2.0 / 3)) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]), 1.0) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0, 1]), 0.8) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 1]), 0.6) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 1, 0, 1]), 0.4) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1]), 0.2) nose.tools.eq_(npv([1, 0, 1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]), 0.0) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0, 1], [1, 0]))) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0, 2], [1, 0, 1]))) nose.tools.assert_raises(AssertionError, (lambda : npv([1, 0, 1], [1, 0, 2])))
def test_roc(): 'Tests the ROC class' def checkme(y_true, y_pred, expected_auc): 'Tests the ROC for a single set of predictions. Mostly sanity checks since all the computation is done\n by scikit, which we assume is correct' roc = ROC.from_scores(y_true, y_pred) nose.tools.assert_almost_equal(roc.auc, expected_auc) nose.tools.ok_(all(((0 <= fpr_val <= 1) for fpr_val in roc.fpr))) nose.tools.ok_(all(((0 <= tpr_val <= 1) for tpr_val in roc.tpr))) nose.tools.assert_list_equal(list(roc.dataframe['tpr']), list(roc.tpr)) nose.tools.assert_list_equal(list(roc.dataframe['thresholds']), list(roc.thresholds)) for prop in ['fpr', 'tpr', 'thresholds']: nose.tools.assert_list_equal(list(roc.dataframe[prop]), list(getattr(roc, prop))) nose.tools.assert_greater_equal(len(roc.dataframe[prop]), 2) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], 1.0)) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1], 0.0)) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0], 0.5)) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 0, 0, 0], 0.75))
8,086,216,524,832,628,000
Tests the ROC class
tests/test_metrics.py
test_roc
MGHComputationalPathology/CalicoML
python
def test_roc(): def checkme(y_true, y_pred, expected_auc): 'Tests the ROC for a single set of predictions. Mostly sanity checks since all the computation is done\n by scikit, which we assume is correct' roc = ROC.from_scores(y_true, y_pred) nose.tools.assert_almost_equal(roc.auc, expected_auc) nose.tools.ok_(all(((0 <= fpr_val <= 1) for fpr_val in roc.fpr))) nose.tools.ok_(all(((0 <= tpr_val <= 1) for tpr_val in roc.tpr))) nose.tools.assert_list_equal(list(roc.dataframe['tpr']), list(roc.tpr)) nose.tools.assert_list_equal(list(roc.dataframe['thresholds']), list(roc.thresholds)) for prop in ['fpr', 'tpr', 'thresholds']: nose.tools.assert_list_equal(list(roc.dataframe[prop]), list(getattr(roc, prop))) nose.tools.assert_greater_equal(len(roc.dataframe[prop]), 2) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], 1.0)) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1], 0.0)) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0], 0.5)) (yield (checkme, [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 0, 0, 0], 0.75))
def test_auc_ci(): "Validates the AUC confidence interval by comparing with R's pROC" def checkme(y_true, y_pred): 'Test utility' roc = ROC.from_scores(y_true, y_pred) print(roc.auc_ci) np.testing.assert_allclose(roc.auc_ci.estimate, roc.auc, atol=0.01) proc = importr('pROC') r_ci_obj = proc.ci(proc.roc(FloatVector(y_true), FloatVector(y_pred), ci=True), method='bootstrap') r_ci_dict = dict(list(r_ci_obj.items())) np.testing.assert_allclose(r_ci_dict['2.5%'], roc.auc_ci.low, atol=0.02) np.testing.assert_allclose(r_ci_dict['97.5%'], roc.auc_ci.high, atol=0.02) np.random.seed(12648430) (yield (checkme, ([1, 1, 1, 1, 0, 0, 0, 0] * 10), ([1, 1, 1, 1, 0, 0, 0, 0] * 10))) (yield (checkme, ([1, 1, 1, 1, 0, 0, 0, 0] * 10), ([1, 0, 1, 0, 1, 0, 1, 0] * 10))) for _ in range(5): y_true = np.random.choice([0, 1], size=100) y_pred = np.random.normal(size=100) y_pred[(y_true == 1)] += np.abs(np.random.normal()) (yield (checkme, y_true, y_pred))
8,535,585,425,928,689,000
Validates the AUC confidence interval by comparing with R's pROC
tests/test_metrics.py
test_auc_ci
MGHComputationalPathology/CalicoML
python
def test_auc_ci(): def checkme(y_true, y_pred): 'Test utility' roc = ROC.from_scores(y_true, y_pred) print(roc.auc_ci) np.testing.assert_allclose(roc.auc_ci.estimate, roc.auc, atol=0.01) proc = importr('pROC') r_ci_obj = proc.ci(proc.roc(FloatVector(y_true), FloatVector(y_pred), ci=True), method='bootstrap') r_ci_dict = dict(list(r_ci_obj.items())) np.testing.assert_allclose(r_ci_dict['2.5%'], roc.auc_ci.low, atol=0.02) np.testing.assert_allclose(r_ci_dict['97.5%'], roc.auc_ci.high, atol=0.02) np.random.seed(12648430) (yield (checkme, ([1, 1, 1, 1, 0, 0, 0, 0] * 10), ([1, 1, 1, 1, 0, 0, 0, 0] * 10))) (yield (checkme, ([1, 1, 1, 1, 0, 0, 0, 0] * 10), ([1, 0, 1, 0, 1, 0, 1, 0] * 10))) for _ in range(5): y_true = np.random.choice([0, 1], size=100) y_pred = np.random.normal(size=100) y_pred[(y_true == 1)] += np.abs(np.random.normal()) (yield (checkme, y_true, y_pred))
def test_compute_averaged_metrics(): ' Tests compute_averaged_metrics function' y_truth = [0, 1, 2, 0, 1, 2] scores1 = [[0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]] result1 = compute_averaged_metrics(y_truth, scores1, roc_auc_function) nose.tools.assert_almost_equal(1.0, result1, delta=1e-06) scores2 = [[0.1, 0.1, 0.8], [0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1]] result2 = compute_averaged_metrics(y_truth, scores2, roc_auc_function) nose.tools.assert_almost_equal(0.375, result2, delta=1e-06)
-7,717,467,559,827,050,000
Tests compute_averaged_metrics function
tests/test_metrics.py
test_compute_averaged_metrics
MGHComputationalPathology/CalicoML
python
def test_compute_averaged_metrics(): ' ' y_truth = [0, 1, 2, 0, 1, 2] scores1 = [[0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]] result1 = compute_averaged_metrics(y_truth, scores1, roc_auc_function) nose.tools.assert_almost_equal(1.0, result1, delta=1e-06) scores2 = [[0.1, 0.1, 0.8], [0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1]] result2 = compute_averaged_metrics(y_truth, scores2, roc_auc_function) nose.tools.assert_almost_equal(0.375, result2, delta=1e-06)
def test_pearson(): ' Validate pearson correlation' X = np.asarray([[1, 2], [(- 2), 8], [3, 5]]) y = np.asarray([(- 1), (- 2), 0]) (rs_pearson, ps_pearson) = f_pearson(X, y) nose.tools.assert_almost_equal(0.07318639504032803, ps_pearson[0], delta=1e-06) nose.tools.assert_almost_equal(0.6666666666666666, ps_pearson[1], delta=1e-06) nose.tools.assert_almost_equal(0.993399267799, rs_pearson[0], delta=1e-06) nose.tools.assert_almost_equal((- 0.5), rs_pearson[1], delta=1e-06)
1,935,036,754,687,980,000
Validate pearson correlation
tests/test_metrics.py
test_pearson
MGHComputationalPathology/CalicoML
python
def test_pearson(): ' ' X = np.asarray([[1, 2], [(- 2), 8], [3, 5]]) y = np.asarray([(- 1), (- 2), 0]) (rs_pearson, ps_pearson) = f_pearson(X, y) nose.tools.assert_almost_equal(0.07318639504032803, ps_pearson[0], delta=1e-06) nose.tools.assert_almost_equal(0.6666666666666666, ps_pearson[1], delta=1e-06) nose.tools.assert_almost_equal(0.993399267799, rs_pearson[0], delta=1e-06) nose.tools.assert_almost_equal((- 0.5), rs_pearson[1], delta=1e-06)
def test_accuracy_from_confusion_matrix(): ' test accuracy computations from confusion matrix ' y_truth = [0, 1, 2, 0, 1, 2] y_score = [[0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.1, 0.8, 0.1], [0.1, 0.8, 0.1], [0.8, 0.1, 0.1]] y_pred = [0, 1, 2, 1, 1, 0] computed_confusion_matrix = confusion_matrix(y_truth, y_pred) accuracy = accuracy_from_confusion_matrix(y_truth, y_score, computed_confusion_matrix) nose.tools.assert_almost_equal(0.6666667, accuracy, delta=1e-06)
1,542,621,636,828,830,700
test accuracy computations from confusion matrix
tests/test_metrics.py
test_accuracy_from_confusion_matrix
MGHComputationalPathology/CalicoML
python
def test_accuracy_from_confusion_matrix(): ' ' y_truth = [0, 1, 2, 0, 1, 2] y_score = [[0.7, 0.2, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], [0.1, 0.8, 0.1], [0.1, 0.8, 0.1], [0.8, 0.1, 0.1]] y_pred = [0, 1, 2, 1, 1, 0] computed_confusion_matrix = confusion_matrix(y_truth, y_pred) accuracy = accuracy_from_confusion_matrix(y_truth, y_score, computed_confusion_matrix) nose.tools.assert_almost_equal(0.6666667, accuracy, delta=1e-06)
def test_conditional_means_selector(): ' test ConditionalMeansSelector class ' cms = ConditionalMeansSelector(f_pearson) test_y = np.asarray([3, 2, 1, 0, 3, 2, 1, 0]) test_x = np.asarray([[0, 3], [5, 2], [9, 1], [13, 0], [0, 3], [5, 2], [9, 1], [13, 0]]) (rs_cond_means, ps_cond_means) = cms.selector_function(test_x, test_y) nose.tools.assert_almost_equal(1.0, rs_cond_means[0], delta=1e-06) nose.tools.assert_almost_equal(1.0, rs_cond_means[1], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means[0], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means[1], delta=1e-06) (rs_cond_means_wrong, _) = f_pearson(test_x, test_y) nose.tools.assert_not_almost_equal(1.0, rs_cond_means_wrong[0], delta=1e-06) cms_pairwise = ConditionalMeansSelector(pearsonr, True) (rs_cond_means_pw, ps_cond_means_pw) = cms_pairwise.selector_function(test_x, test_y) nose.tools.assert_almost_equal(1.0, rs_cond_means_pw[0], delta=1e-06) nose.tools.assert_almost_equal(1.0, rs_cond_means_pw[1], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means_pw[0], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means_pw[1], delta=1e-06)
5,418,517,911,469,496,000
test ConditionalMeansSelector class
tests/test_metrics.py
test_conditional_means_selector
MGHComputationalPathology/CalicoML
python
def test_conditional_means_selector(): ' ' cms = ConditionalMeansSelector(f_pearson) test_y = np.asarray([3, 2, 1, 0, 3, 2, 1, 0]) test_x = np.asarray([[0, 3], [5, 2], [9, 1], [13, 0], [0, 3], [5, 2], [9, 1], [13, 0]]) (rs_cond_means, ps_cond_means) = cms.selector_function(test_x, test_y) nose.tools.assert_almost_equal(1.0, rs_cond_means[0], delta=1e-06) nose.tools.assert_almost_equal(1.0, rs_cond_means[1], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means[0], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means[1], delta=1e-06) (rs_cond_means_wrong, _) = f_pearson(test_x, test_y) nose.tools.assert_not_almost_equal(1.0, rs_cond_means_wrong[0], delta=1e-06) cms_pairwise = ConditionalMeansSelector(pearsonr, True) (rs_cond_means_pw, ps_cond_means_pw) = cms_pairwise.selector_function(test_x, test_y) nose.tools.assert_almost_equal(1.0, rs_cond_means_pw[0], delta=1e-06) nose.tools.assert_almost_equal(1.0, rs_cond_means_pw[1], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means_pw[0], delta=1e-06) nose.tools.assert_almost_equal(0.0, ps_cond_means_pw[1], delta=1e-06)
def checkme(y_true, y_pred, expected_auc): 'Tests the ROC for a single set of predictions. Mostly sanity checks since all the computation is done\n by scikit, which we assume is correct' roc = ROC.from_scores(y_true, y_pred) nose.tools.assert_almost_equal(roc.auc, expected_auc) nose.tools.ok_(all(((0 <= fpr_val <= 1) for fpr_val in roc.fpr))) nose.tools.ok_(all(((0 <= tpr_val <= 1) for tpr_val in roc.tpr))) nose.tools.assert_list_equal(list(roc.dataframe['tpr']), list(roc.tpr)) nose.tools.assert_list_equal(list(roc.dataframe['thresholds']), list(roc.thresholds)) for prop in ['fpr', 'tpr', 'thresholds']: nose.tools.assert_list_equal(list(roc.dataframe[prop]), list(getattr(roc, prop))) nose.tools.assert_greater_equal(len(roc.dataframe[prop]), 2)
-8,855,786,280,949,161,000
Tests the ROC for a single set of predictions. Mostly sanity checks since all the computation is done by scikit, which we assume is correct
tests/test_metrics.py
checkme
MGHComputationalPathology/CalicoML
python
def checkme(y_true, y_pred, expected_auc): 'Tests the ROC for a single set of predictions. Mostly sanity checks since all the computation is done\n by scikit, which we assume is correct' roc = ROC.from_scores(y_true, y_pred) nose.tools.assert_almost_equal(roc.auc, expected_auc) nose.tools.ok_(all(((0 <= fpr_val <= 1) for fpr_val in roc.fpr))) nose.tools.ok_(all(((0 <= tpr_val <= 1) for tpr_val in roc.tpr))) nose.tools.assert_list_equal(list(roc.dataframe['tpr']), list(roc.tpr)) nose.tools.assert_list_equal(list(roc.dataframe['thresholds']), list(roc.thresholds)) for prop in ['fpr', 'tpr', 'thresholds']: nose.tools.assert_list_equal(list(roc.dataframe[prop]), list(getattr(roc, prop))) nose.tools.assert_greater_equal(len(roc.dataframe[prop]), 2)
def checkme(y_true, y_pred): 'Test utility' roc = ROC.from_scores(y_true, y_pred) print(roc.auc_ci) np.testing.assert_allclose(roc.auc_ci.estimate, roc.auc, atol=0.01) proc = importr('pROC') r_ci_obj = proc.ci(proc.roc(FloatVector(y_true), FloatVector(y_pred), ci=True), method='bootstrap') r_ci_dict = dict(list(r_ci_obj.items())) np.testing.assert_allclose(r_ci_dict['2.5%'], roc.auc_ci.low, atol=0.02) np.testing.assert_allclose(r_ci_dict['97.5%'], roc.auc_ci.high, atol=0.02)
8,098,000,074,996,210,000
Test utility
tests/test_metrics.py
checkme
MGHComputationalPathology/CalicoML
python
def checkme(y_true, y_pred): roc = ROC.from_scores(y_true, y_pred) print(roc.auc_ci) np.testing.assert_allclose(roc.auc_ci.estimate, roc.auc, atol=0.01) proc = importr('pROC') r_ci_obj = proc.ci(proc.roc(FloatVector(y_true), FloatVector(y_pred), ci=True), method='bootstrap') r_ci_dict = dict(list(r_ci_obj.items())) np.testing.assert_allclose(r_ci_dict['2.5%'], roc.auc_ci.low, atol=0.02) np.testing.assert_allclose(r_ci_dict['97.5%'], roc.auc_ci.high, atol=0.02)
def request_hook(self, method, path, data, params, **kwargs): '\n Used by Jira Client to apply the jira-cloud authentication\n ' url_params = dict(parse_qs(urlsplit(path).query)) url_params.update((params or {})) path = path.split('?')[0] jwt_payload = {'iss': JIRA_KEY, 'iat': datetime.datetime.utcnow(), 'exp': (datetime.datetime.utcnow() + datetime.timedelta(seconds=(5 * 60))), 'qsh': get_query_hash(path, method.upper(), url_params)} encoded_jwt = jwt.encode(jwt_payload, self.shared_secret) params = dict(jwt=encoded_jwt, **(url_params or {})) request_spec = kwargs.copy() request_spec.update(dict(method=method, path=path, data=data, params=params)) return request_spec
631,998,840,742,974,600
Used by Jira Client to apply the jira-cloud authentication
src/sentry/integrations/jira/client.py
request_hook
YtvwlD/sentry
python
def request_hook(self, method, path, data, params, **kwargs): '\n \n ' url_params = dict(parse_qs(urlsplit(path).query)) url_params.update((params or {})) path = path.split('?')[0] jwt_payload = {'iss': JIRA_KEY, 'iat': datetime.datetime.utcnow(), 'exp': (datetime.datetime.utcnow() + datetime.timedelta(seconds=(5 * 60))), 'qsh': get_query_hash(path, method.upper(), url_params)} encoded_jwt = jwt.encode(jwt_payload, self.shared_secret) params = dict(jwt=encoded_jwt, **(url_params or {})) request_spec = kwargs.copy() request_spec.update(dict(method=method, path=path, data=data, params=params)) return request_spec
def request(self, method, path, data=None, params=None, **kwargs): '\n Use the request_hook method for our specific style of Jira to\n add authentication data and transform parameters.\n ' request_spec = self.jira_style.request_hook(method, path, data, params, **kwargs) return self._request(**request_spec)
3,131,367,285,858,472,400
Use the request_hook method for our specific style of Jira to add authentication data and transform parameters.
src/sentry/integrations/jira/client.py
request
YtvwlD/sentry
python
def request(self, method, path, data=None, params=None, **kwargs): '\n Use the request_hook method for our specific style of Jira to\n add authentication data and transform parameters.\n ' request_spec = self.jira_style.request_hook(method, path, data, params, **kwargs) return self._request(**request_spec)
def get_cached(self, url, params=None): '\n Basic Caching mechanism for Jira metadata which changes infrequently\n ' query = '' if params: query = json.dumps(params, sort_keys=True) key = (self.jira_style.cache_prefix + md5(url, query, self.base_url).hexdigest()) cached_result = cache.get(key) if (not cached_result): cached_result = self.get(url, params=params) cache.set(key, cached_result, 240) return cached_result
-6,390,656,553,114,833,000
Basic Caching mechanism for Jira metadata which changes infrequently
src/sentry/integrations/jira/client.py
get_cached
YtvwlD/sentry
python
def get_cached(self, url, params=None): '\n \n ' query = if params: query = json.dumps(params, sort_keys=True) key = (self.jira_style.cache_prefix + md5(url, query, self.base_url).hexdigest()) cached_result = cache.get(key) if (not cached_result): cached_result = self.get(url, params=params) cache.set(key, cached_result, 240) return cached_result
def make_keras_optimizer_class(cls): 'Constructs a DP Keras optimizer class from an existing one.' class DPOptimizerClass(cls): 'Differentially private subclass of given class cls.\n\n The class tf.keras.optimizers.Optimizer has two methods to compute\n gradients, `_compute_gradients` and `get_gradients`. The first works\n with eager execution, while the second runs in graph mode and is used\n by canned estimators.\n\n Internally, DPOptimizerClass stores hyperparameters both individually\n and encapsulated in a `GaussianSumQuery` object for these two use cases.\n However, this should be invisible to users of this class.\n ' def __init__(self, l2_norm_clip, noise_multiplier, changing_clipping=False, num_microbatches=None, gradient_norm=None, *args, **kwargs): 'Initialize the DPOptimizerClass.\n\n Args:\n l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)\n noise_multiplier: Ratio of the standard deviation to the clipping norm\n num_microbatches: The number of microbatches into which each minibatch\n is split.\n ' super(DPOptimizerClass, self).__init__(*args, **kwargs) self._l2_norm_clip = l2_norm_clip self._norm_clip = tf.Variable(l2_norm_clip) self._noise_multiplier = noise_multiplier self._num_microbatches = num_microbatches self._dp_sum_query = gaussian_query.GaussianSumQuery(l2_norm_clip, (l2_norm_clip * noise_multiplier)) self._global_state = None self._was_dp_gradients_called = False self._changing_clipping = changing_clipping self.gradient_norm = gradient_norm def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): 'DP version of superclass method.' self._was_dp_gradients_called = True if ((not callable(loss)) and (tape is None)): raise ValueError('`tape` is required when a `Tensor` loss is passed.') tape = (tape if (tape is not None) else tf.GradientTape()) if callable(loss): with tape: if (not callable(var_list)): tape.watch(var_list) if callable(loss): loss = loss() microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) if callable(var_list): var_list = var_list() else: with tape: microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) var_list = tf.nest.flatten(var_list) with tf.keras.backend.name_scope((self._name + '/gradients')): jacobian = tape.jacobian(microbatch_losses, var_list) if self._changing_clipping: if False: self._norm_clip.assign_add(0.002) tf.print('cur C:', self._norm_clip.value(), output_stream=sys.stdout) else: gr_norm = jacobian.copy() for i in range(len(gr_norm)): gr_norm[i] = tf.norm(gr_norm[i]) gr_mean = tf.math.reduce_mean(gr_norm) self._norm_clip.assign(gr_mean) C = self._norm_clip.value() tf.print('cur C:', C, output_stream=sys.stdout) def clip_gradients(g): return tf.clip_by_global_norm(g, self._norm_clip.value())[0] clipped_gradients = tf.map_fn(clip_gradients, jacobian) def reduce_noise_normalize_batch(g): summed_gradient = tf.reduce_sum(g, axis=0) noise_stddev = (self._l2_norm_clip * self._noise_multiplier) noise = tf.random.normal(tf.shape(input=summed_gradient), stddev=noise_stddev) noised_gradient = tf.add(summed_gradient, noise) return tf.truediv(noised_gradient, self._num_microbatches) final_gradients = tf.nest.map_structure(reduce_noise_normalize_batch, clipped_gradients) return list(zip(final_gradients, var_list)) def get_gradients(self, loss, params): if self._changing_clipping: self._l2_norm_clip *= 0.99 tf.print('cur C:', self._l2_norm_clip, output_stream=sys.stdout) 'DP version of superclass method.' self._was_dp_gradients_called = True if (self._global_state is None): self._global_state = self._dp_sum_query.initial_global_state() microbatch_losses = tf.reshape(loss, [self._num_microbatches, (- 1)]) sample_params = self._dp_sum_query.derive_sample_params(self._global_state) def process_microbatch(i, sample_state): 'Process one microbatch (record) with privacy helper.' mean_loss = tf.reduce_mean(input_tensor=tf.gather(microbatch_losses, [i])) grads = tf.gradients(mean_loss, params) sample_state = self._dp_sum_query.accumulate_record(sample_params, sample_state, grads) return sample_state sample_state = self._dp_sum_query.initial_sample_state(params) for idx in range(self._num_microbatches): sample_state = process_microbatch(idx, sample_state) (grad_sums, self._global_state) = self._dp_sum_query.get_noised_result(sample_state, self._global_state) def normalize(v): try: return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32)) except TypeError: return None final_grads = tf.nest.map_structure(normalize, grad_sums) return final_grads def apply_gradients(self, grads_and_vars, global_step=None, name=None): assert self._was_dp_gradients_called, 'Neither _compute_gradients() or get_gradients() on the differentially private optimizer was called. This means the training is not differentially private. It may be the case that you need to upgrade to TF 2.4 or higher to use this particular optimizer.' return super(DPOptimizerClass, self).apply_gradients(grads_and_vars, global_step, name) return DPOptimizerClass
-3,486,794,200,675,963,400
Constructs a DP Keras optimizer class from an existing one.
tutorials/dp_optimizer_adp.py
make_keras_optimizer_class
Jerry-li-uw/privacy
python
def make_keras_optimizer_class(cls): class DPOptimizerClass(cls): 'Differentially private subclass of given class cls.\n\n The class tf.keras.optimizers.Optimizer has two methods to compute\n gradients, `_compute_gradients` and `get_gradients`. The first works\n with eager execution, while the second runs in graph mode and is used\n by canned estimators.\n\n Internally, DPOptimizerClass stores hyperparameters both individually\n and encapsulated in a `GaussianSumQuery` object for these two use cases.\n However, this should be invisible to users of this class.\n ' def __init__(self, l2_norm_clip, noise_multiplier, changing_clipping=False, num_microbatches=None, gradient_norm=None, *args, **kwargs): 'Initialize the DPOptimizerClass.\n\n Args:\n l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)\n noise_multiplier: Ratio of the standard deviation to the clipping norm\n num_microbatches: The number of microbatches into which each minibatch\n is split.\n ' super(DPOptimizerClass, self).__init__(*args, **kwargs) self._l2_norm_clip = l2_norm_clip self._norm_clip = tf.Variable(l2_norm_clip) self._noise_multiplier = noise_multiplier self._num_microbatches = num_microbatches self._dp_sum_query = gaussian_query.GaussianSumQuery(l2_norm_clip, (l2_norm_clip * noise_multiplier)) self._global_state = None self._was_dp_gradients_called = False self._changing_clipping = changing_clipping self.gradient_norm = gradient_norm def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): 'DP version of superclass method.' self._was_dp_gradients_called = True if ((not callable(loss)) and (tape is None)): raise ValueError('`tape` is required when a `Tensor` loss is passed.') tape = (tape if (tape is not None) else tf.GradientTape()) if callable(loss): with tape: if (not callable(var_list)): tape.watch(var_list) if callable(loss): loss = loss() microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) if callable(var_list): var_list = var_list() else: with tape: microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) var_list = tf.nest.flatten(var_list) with tf.keras.backend.name_scope((self._name + '/gradients')): jacobian = tape.jacobian(microbatch_losses, var_list) if self._changing_clipping: if False: self._norm_clip.assign_add(0.002) tf.print('cur C:', self._norm_clip.value(), output_stream=sys.stdout) else: gr_norm = jacobian.copy() for i in range(len(gr_norm)): gr_norm[i] = tf.norm(gr_norm[i]) gr_mean = tf.math.reduce_mean(gr_norm) self._norm_clip.assign(gr_mean) C = self._norm_clip.value() tf.print('cur C:', C, output_stream=sys.stdout) def clip_gradients(g): return tf.clip_by_global_norm(g, self._norm_clip.value())[0] clipped_gradients = tf.map_fn(clip_gradients, jacobian) def reduce_noise_normalize_batch(g): summed_gradient = tf.reduce_sum(g, axis=0) noise_stddev = (self._l2_norm_clip * self._noise_multiplier) noise = tf.random.normal(tf.shape(input=summed_gradient), stddev=noise_stddev) noised_gradient = tf.add(summed_gradient, noise) return tf.truediv(noised_gradient, self._num_microbatches) final_gradients = tf.nest.map_structure(reduce_noise_normalize_batch, clipped_gradients) return list(zip(final_gradients, var_list)) def get_gradients(self, loss, params): if self._changing_clipping: self._l2_norm_clip *= 0.99 tf.print('cur C:', self._l2_norm_clip, output_stream=sys.stdout) 'DP version of superclass method.' self._was_dp_gradients_called = True if (self._global_state is None): self._global_state = self._dp_sum_query.initial_global_state() microbatch_losses = tf.reshape(loss, [self._num_microbatches, (- 1)]) sample_params = self._dp_sum_query.derive_sample_params(self._global_state) def process_microbatch(i, sample_state): 'Process one microbatch (record) with privacy helper.' mean_loss = tf.reduce_mean(input_tensor=tf.gather(microbatch_losses, [i])) grads = tf.gradients(mean_loss, params) sample_state = self._dp_sum_query.accumulate_record(sample_params, sample_state, grads) return sample_state sample_state = self._dp_sum_query.initial_sample_state(params) for idx in range(self._num_microbatches): sample_state = process_microbatch(idx, sample_state) (grad_sums, self._global_state) = self._dp_sum_query.get_noised_result(sample_state, self._global_state) def normalize(v): try: return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32)) except TypeError: return None final_grads = tf.nest.map_structure(normalize, grad_sums) return final_grads def apply_gradients(self, grads_and_vars, global_step=None, name=None): assert self._was_dp_gradients_called, 'Neither _compute_gradients() or get_gradients() on the differentially private optimizer was called. This means the training is not differentially private. It may be the case that you need to upgrade to TF 2.4 or higher to use this particular optimizer.' return super(DPOptimizerClass, self).apply_gradients(grads_and_vars, global_step, name) return DPOptimizerClass
def __init__(self, l2_norm_clip, noise_multiplier, changing_clipping=False, num_microbatches=None, gradient_norm=None, *args, **kwargs): 'Initialize the DPOptimizerClass.\n\n Args:\n l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)\n noise_multiplier: Ratio of the standard deviation to the clipping norm\n num_microbatches: The number of microbatches into which each minibatch\n is split.\n ' super(DPOptimizerClass, self).__init__(*args, **kwargs) self._l2_norm_clip = l2_norm_clip self._norm_clip = tf.Variable(l2_norm_clip) self._noise_multiplier = noise_multiplier self._num_microbatches = num_microbatches self._dp_sum_query = gaussian_query.GaussianSumQuery(l2_norm_clip, (l2_norm_clip * noise_multiplier)) self._global_state = None self._was_dp_gradients_called = False self._changing_clipping = changing_clipping self.gradient_norm = gradient_norm
-7,996,842,349,113,027,000
Initialize the DPOptimizerClass. Args: l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) noise_multiplier: Ratio of the standard deviation to the clipping norm num_microbatches: The number of microbatches into which each minibatch is split.
tutorials/dp_optimizer_adp.py
__init__
Jerry-li-uw/privacy
python
def __init__(self, l2_norm_clip, noise_multiplier, changing_clipping=False, num_microbatches=None, gradient_norm=None, *args, **kwargs): 'Initialize the DPOptimizerClass.\n\n Args:\n l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients)\n noise_multiplier: Ratio of the standard deviation to the clipping norm\n num_microbatches: The number of microbatches into which each minibatch\n is split.\n ' super(DPOptimizerClass, self).__init__(*args, **kwargs) self._l2_norm_clip = l2_norm_clip self._norm_clip = tf.Variable(l2_norm_clip) self._noise_multiplier = noise_multiplier self._num_microbatches = num_microbatches self._dp_sum_query = gaussian_query.GaussianSumQuery(l2_norm_clip, (l2_norm_clip * noise_multiplier)) self._global_state = None self._was_dp_gradients_called = False self._changing_clipping = changing_clipping self.gradient_norm = gradient_norm
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): 'DP version of superclass method.' self._was_dp_gradients_called = True if ((not callable(loss)) and (tape is None)): raise ValueError('`tape` is required when a `Tensor` loss is passed.') tape = (tape if (tape is not None) else tf.GradientTape()) if callable(loss): with tape: if (not callable(var_list)): tape.watch(var_list) if callable(loss): loss = loss() microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) if callable(var_list): var_list = var_list() else: with tape: microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) var_list = tf.nest.flatten(var_list) with tf.keras.backend.name_scope((self._name + '/gradients')): jacobian = tape.jacobian(microbatch_losses, var_list) if self._changing_clipping: if False: self._norm_clip.assign_add(0.002) tf.print('cur C:', self._norm_clip.value(), output_stream=sys.stdout) else: gr_norm = jacobian.copy() for i in range(len(gr_norm)): gr_norm[i] = tf.norm(gr_norm[i]) gr_mean = tf.math.reduce_mean(gr_norm) self._norm_clip.assign(gr_mean) C = self._norm_clip.value() tf.print('cur C:', C, output_stream=sys.stdout) def clip_gradients(g): return tf.clip_by_global_norm(g, self._norm_clip.value())[0] clipped_gradients = tf.map_fn(clip_gradients, jacobian) def reduce_noise_normalize_batch(g): summed_gradient = tf.reduce_sum(g, axis=0) noise_stddev = (self._l2_norm_clip * self._noise_multiplier) noise = tf.random.normal(tf.shape(input=summed_gradient), stddev=noise_stddev) noised_gradient = tf.add(summed_gradient, noise) return tf.truediv(noised_gradient, self._num_microbatches) final_gradients = tf.nest.map_structure(reduce_noise_normalize_batch, clipped_gradients) return list(zip(final_gradients, var_list))
5,076,299,590,624,403,000
DP version of superclass method.
tutorials/dp_optimizer_adp.py
_compute_gradients
Jerry-li-uw/privacy
python
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None): self._was_dp_gradients_called = True if ((not callable(loss)) and (tape is None)): raise ValueError('`tape` is required when a `Tensor` loss is passed.') tape = (tape if (tape is not None) else tf.GradientTape()) if callable(loss): with tape: if (not callable(var_list)): tape.watch(var_list) if callable(loss): loss = loss() microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) if callable(var_list): var_list = var_list() else: with tape: microbatch_losses = tf.reduce_mean(tf.reshape(loss, [self._num_microbatches, (- 1)]), axis=1) var_list = tf.nest.flatten(var_list) with tf.keras.backend.name_scope((self._name + '/gradients')): jacobian = tape.jacobian(microbatch_losses, var_list) if self._changing_clipping: if False: self._norm_clip.assign_add(0.002) tf.print('cur C:', self._norm_clip.value(), output_stream=sys.stdout) else: gr_norm = jacobian.copy() for i in range(len(gr_norm)): gr_norm[i] = tf.norm(gr_norm[i]) gr_mean = tf.math.reduce_mean(gr_norm) self._norm_clip.assign(gr_mean) C = self._norm_clip.value() tf.print('cur C:', C, output_stream=sys.stdout) def clip_gradients(g): return tf.clip_by_global_norm(g, self._norm_clip.value())[0] clipped_gradients = tf.map_fn(clip_gradients, jacobian) def reduce_noise_normalize_batch(g): summed_gradient = tf.reduce_sum(g, axis=0) noise_stddev = (self._l2_norm_clip * self._noise_multiplier) noise = tf.random.normal(tf.shape(input=summed_gradient), stddev=noise_stddev) noised_gradient = tf.add(summed_gradient, noise) return tf.truediv(noised_gradient, self._num_microbatches) final_gradients = tf.nest.map_structure(reduce_noise_normalize_batch, clipped_gradients) return list(zip(final_gradients, var_list))
def process_microbatch(i, sample_state): 'Process one microbatch (record) with privacy helper.' mean_loss = tf.reduce_mean(input_tensor=tf.gather(microbatch_losses, [i])) grads = tf.gradients(mean_loss, params) sample_state = self._dp_sum_query.accumulate_record(sample_params, sample_state, grads) return sample_state
-7,686,618,661,728,659,000
Process one microbatch (record) with privacy helper.
tutorials/dp_optimizer_adp.py
process_microbatch
Jerry-li-uw/privacy
python
def process_microbatch(i, sample_state): mean_loss = tf.reduce_mean(input_tensor=tf.gather(microbatch_losses, [i])) grads = tf.gradients(mean_loss, params) sample_state = self._dp_sum_query.accumulate_record(sample_params, sample_state, grads) return sample_state
def _preprocess_graph(G, weight): 'Compute edge weights and eliminate zero-weight edges.\n ' if G.is_directed(): H = nx.MultiGraph() H.add_nodes_from(G) H.add_weighted_edges_from(((u, v, e.get(weight, 1.0)) for (u, v, e) in G.edges(data=True) if (u != v)), weight=weight) G = H if (not G.is_multigraph()): edges = ((u, v, abs(e.get(weight, 1.0))) for (u, v, e) in G.edges(data=True) if (u != v)) else: edges = ((u, v, sum((abs(e.get(weight, 1.0)) for e in G[u][v].values()))) for (u, v) in G.edges() if (u != v)) H = nx.Graph() H.add_nodes_from(G) H.add_weighted_edges_from(((u, v, e) for (u, v, e) in edges if (e != 0))) return H
-7,964,628,692,638,116,000
Compute edge weights and eliminate zero-weight edges.
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
_preprocess_graph
AlexKovrigin/facerecognition
python
def _preprocess_graph(G, weight): '\n ' if G.is_directed(): H = nx.MultiGraph() H.add_nodes_from(G) H.add_weighted_edges_from(((u, v, e.get(weight, 1.0)) for (u, v, e) in G.edges(data=True) if (u != v)), weight=weight) G = H if (not G.is_multigraph()): edges = ((u, v, abs(e.get(weight, 1.0))) for (u, v, e) in G.edges(data=True) if (u != v)) else: edges = ((u, v, sum((abs(e.get(weight, 1.0)) for e in G[u][v].values()))) for (u, v) in G.edges() if (u != v)) H = nx.Graph() H.add_nodes_from(G) H.add_weighted_edges_from(((u, v, e) for (u, v, e) in edges if (e != 0))) return H
def _rcm_estimate(G, nodelist): 'Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.\n ' G = G.subgraph(nodelist) order = reverse_cuthill_mckee_ordering(G) n = len(nodelist) index = dict(zip(nodelist, range(n))) x = ndarray(n, dtype=float) for (i, u) in enumerate(order): x[index[u]] = i x -= ((n - 1) / 2.0) return x
1,117,101,656,544,271,700
Estimate the Fiedler vector using the reverse Cuthill-McKee ordering.
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
_rcm_estimate
AlexKovrigin/facerecognition
python
def _rcm_estimate(G, nodelist): '\n ' G = G.subgraph(nodelist) order = reverse_cuthill_mckee_ordering(G) n = len(nodelist) index = dict(zip(nodelist, range(n))) x = ndarray(n, dtype=float) for (i, u) in enumerate(order): x[index[u]] = i x -= ((n - 1) / 2.0) return x
def _tracemin_fiedler(L, X, normalized, tol, method): "Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix of\n of the graph. This function starts with the Laplacian L, not the Graph.\n\n Parameters\n ----------\n L : Laplacian of a possibly weighted or normalized, but undirected graph\n\n X : Initial guess for a solution. Usually a matrix of random numbers.\n This function allows more than one column in X to identify more than\n one eigenvector if desired.\n\n normalized : bool\n Whether the normalized Laplacian matrix is used.\n\n tol : float\n Tolerance of relative residual in eigenvalue computation.\n Warning: There is no limit on number of iterations.\n\n method : string\n Should be 'tracemin_pcg', 'tracemin_chol' or 'tracemin_lu'.\n Otherwise exception is raised.\n\n Returns\n -------\n sigma, X : Two NumPy arrays of floats.\n The lowest eigenvalues and corresponding eigenvectors of L.\n The size of input X determines the size of these outputs.\n As this is for Fiedler vectors, the zero eigenvalue (and\n constant eigenvector) are avoided.\n " n = X.shape[0] if normalized: e = sqrt(L.diagonal()) D = spdiags((1.0 / e), [0], n, n, format='csr') L = ((D * L) * D) e *= (1.0 / norm(e, 2)) if normalized: def project(X): 'Make X orthogonal to the nullspace of L.\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (dot(X[:, j], e) * e) else: def project(X): 'Make X orthogonal to the nullspace of L.\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (X[:, j].sum() / n) if (method == 'tracemin_pcg'): D = L.diagonal().astype(float) solver = _PCGSolver((lambda x: (L * x)), (lambda x: (D * x))) elif ((method == 'tracemin_chol') or (method == 'tracemin_lu')): A = csc_matrix(L, dtype=float, copy=True) i = (A.indptr[1:] - A.indptr[:(- 1)]).argmax() A[(i, i)] = float('inf') if (method == 'tracemin_chol'): solver = _CholeskySolver(A) else: solver = _LUSolver(A) else: raise nx.NetworkXError(('Unknown linear system solver: ' + method)) Lnorm = abs(L).sum(axis=1).flatten().max() project(X) W = asmatrix(ndarray(X.shape, order='F')) while True: X = qr(X)[0] W[:, :] = (L * X) H = (X.T * W) (sigma, Y) = eigh(H, overwrite_a=True) X *= Y res = (dasum(((W * asmatrix(Y)[:, 0]) - (sigma[0] * X[:, 0]))) / Lnorm) if (res < tol): break W[:, :] = solver.solve(X, tol) X = (inv((W.T * X)) * W.T).T project(X) return (sigma, asarray(X))
-5,370,466,597,142,446,000
Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm. The Fiedler vector of a connected undirected graph is the eigenvector corresponding to the second smallest eigenvalue of the Laplacian matrix of of the graph. This function starts with the Laplacian L, not the Graph. Parameters ---------- L : Laplacian of a possibly weighted or normalized, but undirected graph X : Initial guess for a solution. Usually a matrix of random numbers. This function allows more than one column in X to identify more than one eigenvector if desired. normalized : bool Whether the normalized Laplacian matrix is used. tol : float Tolerance of relative residual in eigenvalue computation. Warning: There is no limit on number of iterations. method : string Should be 'tracemin_pcg', 'tracemin_chol' or 'tracemin_lu'. Otherwise exception is raised. Returns ------- sigma, X : Two NumPy arrays of floats. The lowest eigenvalues and corresponding eigenvectors of L. The size of input X determines the size of these outputs. As this is for Fiedler vectors, the zero eigenvalue (and constant eigenvector) are avoided.
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
_tracemin_fiedler
AlexKovrigin/facerecognition
python
def _tracemin_fiedler(L, X, normalized, tol, method): "Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix of\n of the graph. This function starts with the Laplacian L, not the Graph.\n\n Parameters\n ----------\n L : Laplacian of a possibly weighted or normalized, but undirected graph\n\n X : Initial guess for a solution. Usually a matrix of random numbers.\n This function allows more than one column in X to identify more than\n one eigenvector if desired.\n\n normalized : bool\n Whether the normalized Laplacian matrix is used.\n\n tol : float\n Tolerance of relative residual in eigenvalue computation.\n Warning: There is no limit on number of iterations.\n\n method : string\n Should be 'tracemin_pcg', 'tracemin_chol' or 'tracemin_lu'.\n Otherwise exception is raised.\n\n Returns\n -------\n sigma, X : Two NumPy arrays of floats.\n The lowest eigenvalues and corresponding eigenvectors of L.\n The size of input X determines the size of these outputs.\n As this is for Fiedler vectors, the zero eigenvalue (and\n constant eigenvector) are avoided.\n " n = X.shape[0] if normalized: e = sqrt(L.diagonal()) D = spdiags((1.0 / e), [0], n, n, format='csr') L = ((D * L) * D) e *= (1.0 / norm(e, 2)) if normalized: def project(X): 'Make X orthogonal to the nullspace of L.\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (dot(X[:, j], e) * e) else: def project(X): 'Make X orthogonal to the nullspace of L.\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (X[:, j].sum() / n) if (method == 'tracemin_pcg'): D = L.diagonal().astype(float) solver = _PCGSolver((lambda x: (L * x)), (lambda x: (D * x))) elif ((method == 'tracemin_chol') or (method == 'tracemin_lu')): A = csc_matrix(L, dtype=float, copy=True) i = (A.indptr[1:] - A.indptr[:(- 1)]).argmax() A[(i, i)] = float('inf') if (method == 'tracemin_chol'): solver = _CholeskySolver(A) else: solver = _LUSolver(A) else: raise nx.NetworkXError(('Unknown linear system solver: ' + method)) Lnorm = abs(L).sum(axis=1).flatten().max() project(X) W = asmatrix(ndarray(X.shape, order='F')) while True: X = qr(X)[0] W[:, :] = (L * X) H = (X.T * W) (sigma, Y) = eigh(H, overwrite_a=True) X *= Y res = (dasum(((W * asmatrix(Y)[:, 0]) - (sigma[0] * X[:, 0]))) / Lnorm) if (res < tol): break W[:, :] = solver.solve(X, tol) X = (inv((W.T * X)) * W.T).T project(X) return (sigma, asarray(X))
def _get_fiedler_func(method): 'Return a function that solves the Fiedler eigenvalue problem.\n ' if (method == 'tracemin'): method = 'tracemin_pcg' if (method in ('tracemin_pcg', 'tracemin_chol', 'tracemin_lu')): def find_fiedler(L, x, normalized, tol): q = (1 if (method == 'tracemin_pcg') else min(4, (L.shape[0] - 1))) X = asmatrix(normal(size=(q, L.shape[0]))).T (sigma, X) = _tracemin_fiedler(L, X, normalized, tol, method) return (sigma[0], X[:, 0]) elif ((method == 'lanczos') or (method == 'lobpcg')): def find_fiedler(L, x, normalized, tol): L = csc_matrix(L, dtype=float) n = L.shape[0] if normalized: D = spdiags((1.0 / sqrt(L.diagonal())), [0], n, n, format='csc') L = ((D * L) * D) if ((method == 'lanczos') or (n < 10)): (sigma, X) = eigsh(L, 2, which='SM', tol=tol, return_eigenvectors=True) return (sigma[1], X[:, 1]) else: X = asarray(asmatrix(x).T) M = spdiags((1.0 / L.diagonal()), [0], n, n) Y = ones(n) if normalized: Y /= D.diagonal() (sigma, X) = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol, maxiter=n, largest=False) return (sigma[0], X[:, 0]) else: raise nx.NetworkXError(("unknown method '%s'." % method)) return find_fiedler
700,050,216,056,586,100
Return a function that solves the Fiedler eigenvalue problem.
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
_get_fiedler_func
AlexKovrigin/facerecognition
python
def _get_fiedler_func(method): '\n ' if (method == 'tracemin'): method = 'tracemin_pcg' if (method in ('tracemin_pcg', 'tracemin_chol', 'tracemin_lu')): def find_fiedler(L, x, normalized, tol): q = (1 if (method == 'tracemin_pcg') else min(4, (L.shape[0] - 1))) X = asmatrix(normal(size=(q, L.shape[0]))).T (sigma, X) = _tracemin_fiedler(L, X, normalized, tol, method) return (sigma[0], X[:, 0]) elif ((method == 'lanczos') or (method == 'lobpcg')): def find_fiedler(L, x, normalized, tol): L = csc_matrix(L, dtype=float) n = L.shape[0] if normalized: D = spdiags((1.0 / sqrt(L.diagonal())), [0], n, n, format='csc') L = ((D * L) * D) if ((method == 'lanczos') or (n < 10)): (sigma, X) = eigsh(L, 2, which='SM', tol=tol, return_eigenvectors=True) return (sigma[1], X[:, 1]) else: X = asarray(asmatrix(x).T) M = spdiags((1.0 / L.diagonal()), [0], n, n) Y = ones(n) if normalized: Y /= D.diagonal() (sigma, X) = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol, maxiter=n, largest=False) return (sigma[0], X[:, 0]) else: raise nx.NetworkXError(("unknown method '%s'." % method)) return find_fiedler
@not_implemented_for('directed') def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-08, method='tracemin_pcg'): "Return the algebraic connectivity of an undirected graph.\n\n The algebraic connectivity of a connected undirected graph is the second\n smallest eigenvalue of its Laplacian matrix.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n algebraic_connectivity : float\n Algebraic connectivity.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is directed.\n\n NetworkXError\n If G has less than two nodes.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n " if (len(G) < 2): raise nx.NetworkXError('graph has less than two nodes.') G = _preprocess_graph(G, weight) if (not nx.is_connected(G)): return 0.0 L = nx.laplacian_matrix(G) if (L.shape[0] == 2): return ((2.0 * L[(0, 0)]) if (not normalized) else 2.0) find_fiedler = _get_fiedler_func(method) x = (None if (method != 'lobpcg') else _rcm_estimate(G, G)) (sigma, fiedler) = find_fiedler(L, x, normalized, tol) return sigma
-3,385,227,640,535,646,000
Return the algebraic connectivity of an undirected graph. The algebraic connectivity of a connected undirected graph is the second smallest eigenvalue of its Laplacian matrix. Parameters ---------- G : NetworkX graph An undirected graph. weight : object, optional (default: None) The data key used to determine the weight of each edge. If None, then each edge has unit weight. normalized : bool, optional (default: False) Whether the normalized Laplacian matrix is used. tol : float, optional (default: 1e-8) Tolerance of relative residual in eigenvalue computation. method : string, optional (default: 'tracemin_pcg') Method of eigenvalue computation. It must be one of the tracemin options shown below (TraceMIN), 'lanczos' (Lanczos iteration) or 'lobpcg' (LOBPCG). The TraceMIN algorithm uses a linear system solver. The following values allow specifying the solver to be used. =============== ======================================== Value Solver =============== ======================================== 'tracemin_pcg' Preconditioned conjugate gradient method 'tracemin_chol' Cholesky factorization 'tracemin_lu' LU factorization =============== ======================================== Returns ------- algebraic_connectivity : float Algebraic connectivity. Raises ------ NetworkXNotImplemented If G is directed. NetworkXError If G has less than two nodes. Notes ----- Edge weights are interpreted by their absolute values. For MultiGraph's, weights of parallel edges are summed. Zero-weighted edges are ignored. To use Cholesky factorization in the TraceMIN algorithm, the :samp:`scikits.sparse` package must be installed. See Also -------- laplacian_matrix
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
algebraic_connectivity
AlexKovrigin/facerecognition
python
@not_implemented_for('directed') def algebraic_connectivity(G, weight='weight', normalized=False, tol=1e-08, method='tracemin_pcg'): "Return the algebraic connectivity of an undirected graph.\n\n The algebraic connectivity of a connected undirected graph is the second\n smallest eigenvalue of its Laplacian matrix.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n algebraic_connectivity : float\n Algebraic connectivity.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is directed.\n\n NetworkXError\n If G has less than two nodes.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n " if (len(G) < 2): raise nx.NetworkXError('graph has less than two nodes.') G = _preprocess_graph(G, weight) if (not nx.is_connected(G)): return 0.0 L = nx.laplacian_matrix(G) if (L.shape[0] == 2): return ((2.0 * L[(0, 0)]) if (not normalized) else 2.0) find_fiedler = _get_fiedler_func(method) x = (None if (method != 'lobpcg') else _rcm_estimate(G, G)) (sigma, fiedler) = find_fiedler(L, x, normalized, tol) return sigma
@not_implemented_for('directed') def fiedler_vector(G, weight='weight', normalized=False, tol=1e-08, method='tracemin_pcg'): "Return the Fiedler vector of a connected undirected graph.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix of\n of the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n fiedler_vector : NumPy array of floats.\n Fiedler vector.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is directed.\n\n NetworkXError\n If G has less than two nodes or is not connected.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n " if (len(G) < 2): raise nx.NetworkXError('graph has less than two nodes.') G = _preprocess_graph(G, weight) if (not nx.is_connected(G)): raise nx.NetworkXError('graph is not connected.') if (len(G) == 2): return array([1.0, (- 1.0)]) find_fiedler = _get_fiedler_func(method) L = nx.laplacian_matrix(G) x = (None if (method != 'lobpcg') else _rcm_estimate(G, G)) (sigma, fiedler) = find_fiedler(L, x, normalized, tol) return fiedler
8,916,865,751,598,344,000
Return the Fiedler vector of a connected undirected graph. The Fiedler vector of a connected undirected graph is the eigenvector corresponding to the second smallest eigenvalue of the Laplacian matrix of of the graph. Parameters ---------- G : NetworkX graph An undirected graph. weight : object, optional (default: None) The data key used to determine the weight of each edge. If None, then each edge has unit weight. normalized : bool, optional (default: False) Whether the normalized Laplacian matrix is used. tol : float, optional (default: 1e-8) Tolerance of relative residual in eigenvalue computation. method : string, optional (default: 'tracemin_pcg') Method of eigenvalue computation. It must be one of the tracemin options shown below (TraceMIN), 'lanczos' (Lanczos iteration) or 'lobpcg' (LOBPCG). The TraceMIN algorithm uses a linear system solver. The following values allow specifying the solver to be used. =============== ======================================== Value Solver =============== ======================================== 'tracemin_pcg' Preconditioned conjugate gradient method 'tracemin_chol' Cholesky factorization 'tracemin_lu' LU factorization =============== ======================================== Returns ------- fiedler_vector : NumPy array of floats. Fiedler vector. Raises ------ NetworkXNotImplemented If G is directed. NetworkXError If G has less than two nodes or is not connected. Notes ----- Edge weights are interpreted by their absolute values. For MultiGraph's, weights of parallel edges are summed. Zero-weighted edges are ignored. To use Cholesky factorization in the TraceMIN algorithm, the :samp:`scikits.sparse` package must be installed. See Also -------- laplacian_matrix
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
fiedler_vector
AlexKovrigin/facerecognition
python
@not_implemented_for('directed') def fiedler_vector(G, weight='weight', normalized=False, tol=1e-08, method='tracemin_pcg'): "Return the Fiedler vector of a connected undirected graph.\n\n The Fiedler vector of a connected undirected graph is the eigenvector\n corresponding to the second smallest eigenvalue of the Laplacian matrix of\n of the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n fiedler_vector : NumPy array of floats.\n Fiedler vector.\n\n Raises\n ------\n NetworkXNotImplemented\n If G is directed.\n\n NetworkXError\n If G has less than two nodes or is not connected.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n " if (len(G) < 2): raise nx.NetworkXError('graph has less than two nodes.') G = _preprocess_graph(G, weight) if (not nx.is_connected(G)): raise nx.NetworkXError('graph is not connected.') if (len(G) == 2): return array([1.0, (- 1.0)]) find_fiedler = _get_fiedler_func(method) L = nx.laplacian_matrix(G) x = (None if (method != 'lobpcg') else _rcm_estimate(G, G)) (sigma, fiedler) = find_fiedler(L, x, normalized, tol) return fiedler
def spectral_ordering(G, weight='weight', normalized=False, tol=1e-08, method='tracemin_pcg'): "Compute the spectral_ordering of a graph.\n\n The spectral ordering of a graph is an ordering of its nodes where nodes\n in the same weakly connected components appear contiguous and ordered by\n their corresponding elements in the Fiedler vector of the component.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n spectral_ordering : NumPy array of floats.\n Spectral ordering of nodes.\n\n Raises\n ------\n NetworkXError\n If G is empty.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n " if (len(G) == 0): raise nx.NetworkXError('graph is empty.') G = _preprocess_graph(G, weight) find_fiedler = _get_fiedler_func(method) order = [] for component in nx.connected_components(G): size = len(component) if (size > 2): L = nx.laplacian_matrix(G, component) x = (None if (method != 'lobpcg') else _rcm_estimate(G, component)) (sigma, fiedler) = find_fiedler(L, x, normalized, tol) sort_info = zip(fiedler, range(size), component) order.extend((u for (x, c, u) in sorted(sort_info))) else: order.extend(component) return order
8,107,492,088,688,550,000
Compute the spectral_ordering of a graph. The spectral ordering of a graph is an ordering of its nodes where nodes in the same weakly connected components appear contiguous and ordered by their corresponding elements in the Fiedler vector of the component. Parameters ---------- G : NetworkX graph A graph. weight : object, optional (default: None) The data key used to determine the weight of each edge. If None, then each edge has unit weight. normalized : bool, optional (default: False) Whether the normalized Laplacian matrix is used. tol : float, optional (default: 1e-8) Tolerance of relative residual in eigenvalue computation. method : string, optional (default: 'tracemin_pcg') Method of eigenvalue computation. It must be one of the tracemin options shown below (TraceMIN), 'lanczos' (Lanczos iteration) or 'lobpcg' (LOBPCG). The TraceMIN algorithm uses a linear system solver. The following values allow specifying the solver to be used. =============== ======================================== Value Solver =============== ======================================== 'tracemin_pcg' Preconditioned conjugate gradient method 'tracemin_chol' Cholesky factorization 'tracemin_lu' LU factorization =============== ======================================== Returns ------- spectral_ordering : NumPy array of floats. Spectral ordering of nodes. Raises ------ NetworkXError If G is empty. Notes ----- Edge weights are interpreted by their absolute values. For MultiGraph's, weights of parallel edges are summed. Zero-weighted edges are ignored. To use Cholesky factorization in the TraceMIN algorithm, the :samp:`scikits.sparse` package must be installed. See Also -------- laplacian_matrix
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
spectral_ordering
AlexKovrigin/facerecognition
python
def spectral_ordering(G, weight='weight', normalized=False, tol=1e-08, method='tracemin_pcg'): "Compute the spectral_ordering of a graph.\n\n The spectral ordering of a graph is an ordering of its nodes where nodes\n in the same weakly connected components appear contiguous and ordered by\n their corresponding elements in the Fiedler vector of the component.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph.\n\n weight : object, optional (default: None)\n The data key used to determine the weight of each edge. If None, then\n each edge has unit weight.\n\n normalized : bool, optional (default: False)\n Whether the normalized Laplacian matrix is used.\n\n tol : float, optional (default: 1e-8)\n Tolerance of relative residual in eigenvalue computation.\n\n method : string, optional (default: 'tracemin_pcg')\n Method of eigenvalue computation. It must be one of the tracemin\n options shown below (TraceMIN), 'lanczos' (Lanczos iteration)\n or 'lobpcg' (LOBPCG).\n\n The TraceMIN algorithm uses a linear system solver. The following\n values allow specifying the solver to be used.\n\n =============== ========================================\n Value Solver\n =============== ========================================\n 'tracemin_pcg' Preconditioned conjugate gradient method\n 'tracemin_chol' Cholesky factorization\n 'tracemin_lu' LU factorization\n =============== ========================================\n\n Returns\n -------\n spectral_ordering : NumPy array of floats.\n Spectral ordering of nodes.\n\n Raises\n ------\n NetworkXError\n If G is empty.\n\n Notes\n -----\n Edge weights are interpreted by their absolute values. For MultiGraph's,\n weights of parallel edges are summed. Zero-weighted edges are ignored.\n\n To use Cholesky factorization in the TraceMIN algorithm, the\n :samp:`scikits.sparse` package must be installed.\n\n See Also\n --------\n laplacian_matrix\n " if (len(G) == 0): raise nx.NetworkXError('graph is empty.') G = _preprocess_graph(G, weight) find_fiedler = _get_fiedler_func(method) order = [] for component in nx.connected_components(G): size = len(component) if (size > 2): L = nx.laplacian_matrix(G, component) x = (None if (method != 'lobpcg') else _rcm_estimate(G, component)) (sigma, fiedler) = find_fiedler(L, x, normalized, tol) sort_info = zip(fiedler, range(size), component) order.extend((u for (x, c, u) in sorted(sort_info))) else: order.extend(component) return order
def project(X): 'Make X orthogonal to the nullspace of L.\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (dot(X[:, j], e) * e)
7,863,523,705,977,705,000
Make X orthogonal to the nullspace of L.
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
project
AlexKovrigin/facerecognition
python
def project(X): '\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (dot(X[:, j], e) * e)
def project(X): 'Make X orthogonal to the nullspace of L.\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (X[:, j].sum() / n)
-2,174,882,100,608,564,000
Make X orthogonal to the nullspace of L.
venv/Lib/site-packages/networkx/linalg/algebraicconnectivity.py
project
AlexKovrigin/facerecognition
python
def project(X): '\n ' X = asarray(X) for j in range(X.shape[1]): X[:, j] -= (X[:, j].sum() / n)
def setup(self): '\n Setting up test parameters\n ' log.info('Starting the test setup') super(TestBulkPodAttachPerformance, self).setup() self.benchmark_name = 'bulk_pod_attach_time' helpers.pull_images(constants.PERF_IMAGE)
532,208,516,122,918,100
Setting up test parameters
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
setup
Sravikaz/ocs-ci
python
def setup(self): '\n \n ' log.info('Starting the test setup') super(TestBulkPodAttachPerformance, self).setup() self.benchmark_name = 'bulk_pod_attach_time' helpers.pull_images(constants.PERF_IMAGE)
@pytest.fixture() def base_setup(self, project_factory, interface_type, storageclass_factory): '\n A setup phase for the test\n\n Args:\n interface_type: Interface type\n storageclass_factory: A fixture to create everything needed for a storage class\n ' self.interface = interface_type self.sc_obj = storageclass_factory(self.interface) proj_obj = project_factory() self.namespace = proj_obj.namespace if (self.interface == constants.CEPHFILESYSTEM): self.sc = 'CephFS' if (self.interface == constants.CEPHBLOCKPOOL): self.sc = 'RBD'
5,094,464,455,281,032,000
A setup phase for the test Args: interface_type: Interface type storageclass_factory: A fixture to create everything needed for a storage class
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
base_setup
Sravikaz/ocs-ci
python
@pytest.fixture() def base_setup(self, project_factory, interface_type, storageclass_factory): '\n A setup phase for the test\n\n Args:\n interface_type: Interface type\n storageclass_factory: A fixture to create everything needed for a storage class\n ' self.interface = interface_type self.sc_obj = storageclass_factory(self.interface) proj_obj = project_factory() self.namespace = proj_obj.namespace if (self.interface == constants.CEPHFILESYSTEM): self.sc = 'CephFS' if (self.interface == constants.CEPHBLOCKPOOL): self.sc = 'RBD'
@pytest.mark.parametrize(argnames=['interface_type', 'bulk_size'], argvalues=[pytest.param(*[constants.CEPHBLOCKPOOL, 120]), pytest.param(*[constants.CEPHBLOCKPOOL, 240]), pytest.param(*[constants.CEPHFILESYSTEM, 120]), pytest.param(*[constants.CEPHFILESYSTEM, 240])]) @pytest.mark.usefixtures(base_setup.__name__) @polarion_id('OCS-1620') def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size): '\n Measures pods attachment time in bulk_size bulk\n\n Args:\n teardown_factory: A fixture used when we want a new resource that was created during the tests\n to be removed in the teardown phase.\n bulk_size: Size of the bulk to be tested\n Returns:\n\n ' test_start_time = PASTest.get_time() log.info(f'Start creating bulk of new {bulk_size} PVCs') (pvc_objs, _) = helpers.create_multiple_pvcs(sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time(self.interface, pvc_objs, status='start') end_time = helpers.get_provision_time(self.interface, pvc_objs, status='end') total_time = (end_time - start_time).total_seconds() log.info(f'{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds.') pvc_names_list = [] for pvc_obj in pvc_objs: pvc_names_list.append(pvc_obj.name) log.info(f'{self.interface} : Before pod attach') bulk_start_time = time.time() pod_data_list = list() pod_data_list.extend(scale_lib.attach_multiple_pvc_to_pod_dict(pvc_list=pvc_names_list, namespace=self.namespace, pvcs_per_pod=1)) lcl = locals() tmp_path = pathlib.Path(ocsci_log_path()) obj_name = 'obj1' lcl[f'pod_kube_{obj_name}'] = ObjectConfFile(name=f'pod_kube_{obj_name}', obj_dict_list=pod_data_list, project=defaults.ROOK_CLUSTER_NAMESPACE, tmp_path=tmp_path) lcl[f'pod_kube_{obj_name}'].create(namespace=self.namespace) log.info('Checking that pods are running') pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job(kube_job_obj=lcl[f'pod_kube_{obj_name}'], namespace=self.namespace, no_of_pod=len(pod_data_list), timeout=180) for pod_name in pod_running_list: pod_obj = get_pod_obj(pod_name, self.namespace) teardown_factory(pod_obj) bulk_end_time = time.time() bulk_total_time = (bulk_end_time - bulk_start_time) log.info(f'Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds') self.get_env_info() full_log_path = get_full_test_logs_path(cname=self) self.results_path = get_full_test_logs_path(cname=self) full_log_path += f'-{self.sc}' full_results = self.init_full_results(ResultsAnalyse(self.uuid, self.crd_data, full_log_path, 'pod_bulk_attachtime')) full_results.add_key('storageclass', self.sc) full_results.add_key('pod_bulk_attach_time', bulk_total_time) full_results.add_key('pvc_size', self.pvc_size) full_results.add_key('bulk_size', bulk_size) test_end_time = PASTest.get_time() full_results.add_key('test_time', {'start': test_start_time, 'end': test_end_time}) if full_results.es_write(): res_link = full_results.results_link() log.info(f'The result can be found at : {res_link}') self.write_result_to_file(res_link)
-3,165,609,193,103,609,300
Measures pods attachment time in bulk_size bulk Args: teardown_factory: A fixture used when we want a new resource that was created during the tests to be removed in the teardown phase. bulk_size: Size of the bulk to be tested Returns:
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
test_bulk_pod_attach_performance
Sravikaz/ocs-ci
python
@pytest.mark.parametrize(argnames=['interface_type', 'bulk_size'], argvalues=[pytest.param(*[constants.CEPHBLOCKPOOL, 120]), pytest.param(*[constants.CEPHBLOCKPOOL, 240]), pytest.param(*[constants.CEPHFILESYSTEM, 120]), pytest.param(*[constants.CEPHFILESYSTEM, 240])]) @pytest.mark.usefixtures(base_setup.__name__) @polarion_id('OCS-1620') def test_bulk_pod_attach_performance(self, teardown_factory, bulk_size): '\n Measures pods attachment time in bulk_size bulk\n\n Args:\n teardown_factory: A fixture used when we want a new resource that was created during the tests\n to be removed in the teardown phase.\n bulk_size: Size of the bulk to be tested\n Returns:\n\n ' test_start_time = PASTest.get_time() log.info(f'Start creating bulk of new {bulk_size} PVCs') (pvc_objs, _) = helpers.create_multiple_pvcs(sc_name=self.sc_obj.name, namespace=self.namespace, number_of_pvc=bulk_size, size=self.pvc_size, burst=True) for pvc_obj in pvc_objs: pvc_obj.reload() teardown_factory(pvc_obj) with ThreadPoolExecutor(max_workers=5) as executor: for pvc_obj in pvc_objs: executor.submit(helpers.wait_for_resource_state, pvc_obj, constants.STATUS_BOUND) executor.submit(pvc_obj.reload) start_time = helpers.get_provision_time(self.interface, pvc_objs, status='start') end_time = helpers.get_provision_time(self.interface, pvc_objs, status='end') total_time = (end_time - start_time).total_seconds() log.info(f'{self.interface}: Bulk of {bulk_size} PVCs creation time is {total_time} seconds.') pvc_names_list = [] for pvc_obj in pvc_objs: pvc_names_list.append(pvc_obj.name) log.info(f'{self.interface} : Before pod attach') bulk_start_time = time.time() pod_data_list = list() pod_data_list.extend(scale_lib.attach_multiple_pvc_to_pod_dict(pvc_list=pvc_names_list, namespace=self.namespace, pvcs_per_pod=1)) lcl = locals() tmp_path = pathlib.Path(ocsci_log_path()) obj_name = 'obj1' lcl[f'pod_kube_{obj_name}'] = ObjectConfFile(name=f'pod_kube_{obj_name}', obj_dict_list=pod_data_list, project=defaults.ROOK_CLUSTER_NAMESPACE, tmp_path=tmp_path) lcl[f'pod_kube_{obj_name}'].create(namespace=self.namespace) log.info('Checking that pods are running') pod_running_list = scale_lib.check_all_pod_reached_running_state_in_kube_job(kube_job_obj=lcl[f'pod_kube_{obj_name}'], namespace=self.namespace, no_of_pod=len(pod_data_list), timeout=180) for pod_name in pod_running_list: pod_obj = get_pod_obj(pod_name, self.namespace) teardown_factory(pod_obj) bulk_end_time = time.time() bulk_total_time = (bulk_end_time - bulk_start_time) log.info(f'Bulk attach time of {len(pod_running_list)} pods is {bulk_total_time} seconds') self.get_env_info() full_log_path = get_full_test_logs_path(cname=self) self.results_path = get_full_test_logs_path(cname=self) full_log_path += f'-{self.sc}' full_results = self.init_full_results(ResultsAnalyse(self.uuid, self.crd_data, full_log_path, 'pod_bulk_attachtime')) full_results.add_key('storageclass', self.sc) full_results.add_key('pod_bulk_attach_time', bulk_total_time) full_results.add_key('pvc_size', self.pvc_size) full_results.add_key('bulk_size', bulk_size) test_end_time = PASTest.get_time() full_results.add_key('test_time', {'start': test_start_time, 'end': test_end_time}) if full_results.es_write(): res_link = full_results.results_link() log.info(f'The result can be found at : {res_link}') self.write_result_to_file(res_link)
def test_bulk_pod_attach_results(self): '\n This is not a test - it is only check that previous test ran and finish as expected\n and reporting the full results (links in the ES) of previous tests (4)\n ' self.number_of_tests = 4 self.results_path = get_full_test_logs_path(cname=self, fname='test_bulk_pod_attach_performance') self.results_file = os.path.join(self.results_path, 'all_results.txt') log.info(f'Check results in {self.results_file}') self.check_tests_results() self.push_to_dashboard(test_name='Bulk Pod Attach Time')
7,541,969,266,199,892,000
This is not a test - it is only check that previous test ran and finish as expected and reporting the full results (links in the ES) of previous tests (4)
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
test_bulk_pod_attach_results
Sravikaz/ocs-ci
python
def test_bulk_pod_attach_results(self): '\n This is not a test - it is only check that previous test ran and finish as expected\n and reporting the full results (links in the ES) of previous tests (4)\n ' self.number_of_tests = 4 self.results_path = get_full_test_logs_path(cname=self, fname='test_bulk_pod_attach_performance') self.results_file = os.path.join(self.results_path, 'all_results.txt') log.info(f'Check results in {self.results_file}') self.check_tests_results() self.push_to_dashboard(test_name='Bulk Pod Attach Time')
def init_full_results(self, full_results): '\n Initialize the full results object which will send to the ES server\n\n Args:\n full_results (obj): an empty ResultsAnalyse object\n\n Returns:\n ResultsAnalyse (obj): the input object filled with data\n\n ' for key in self.environment: full_results.add_key(key, self.environment[key]) full_results.add_key('index', full_results.new_index) return full_results
7,311,891,208,824,734,000
Initialize the full results object which will send to the ES server Args: full_results (obj): an empty ResultsAnalyse object Returns: ResultsAnalyse (obj): the input object filled with data
tests/e2e/performance/csi_tests/test_bulk_pod_attachtime_performance.py
init_full_results
Sravikaz/ocs-ci
python
def init_full_results(self, full_results): '\n Initialize the full results object which will send to the ES server\n\n Args:\n full_results (obj): an empty ResultsAnalyse object\n\n Returns:\n ResultsAnalyse (obj): the input object filled with data\n\n ' for key in self.environment: full_results.add_key(key, self.environment[key]) full_results.add_key('index', full_results.new_index) return full_results
@pytest.fixture(scope='function', autouse=True) def reset_loggers(): 'Prevent logging handlers from capturing temporary file handles.\n\n For example, a test that uses the `capsys` fixture and calls\n `logging.exception()` will initialize logging with a default handler that\n captures `sys.stderr`. When the test ends, the file handles will be closed\n and `sys.stderr` will be returned to its original handle, but the logging\n will have a dangling reference to the temporary handle used in the `capsys`\n fixture.\n\n ' logger = logging.getLogger() for handler in logger.handlers: logger.removeHandler(handler)
4,657,129,917,017,633,000
Prevent logging handlers from capturing temporary file handles. For example, a test that uses the `capsys` fixture and calls `logging.exception()` will initialize logging with a default handler that captures `sys.stderr`. When the test ends, the file handles will be closed and `sys.stderr` will be returned to its original handle, but the logging will have a dangling reference to the temporary handle used in the `capsys` fixture.
tests/conftest.py
reset_loggers
JasperJuergensen/elastalert
python
@pytest.fixture(scope='function', autouse=True) def reset_loggers(): 'Prevent logging handlers from capturing temporary file handles.\n\n For example, a test that uses the `capsys` fixture and calls\n `logging.exception()` will initialize logging with a default handler that\n captures `sys.stderr`. When the test ends, the file handles will be closed\n and `sys.stderr` will be returned to its original handle, but the logging\n will have a dangling reference to the temporary handle used in the `capsys`\n fixture.\n\n ' logger = logging.getLogger() for handler in logger.handlers: logger.removeHandler(handler)
@pytest.fixture(scope='function') def environ(): 'py.test fixture to get a fresh mutable environment.' old_env = os.environ new_env = dict(list(old_env.items())) os.environ = new_env (yield os.environ) os.environ = old_env
-5,649,586,395,634,223,000
py.test fixture to get a fresh mutable environment.
tests/conftest.py
environ
JasperJuergensen/elastalert
python
@pytest.fixture(scope='function') def environ(): old_env = os.environ new_env = dict(list(old_env.items())) os.environ = new_env (yield os.environ) os.environ = old_env
def remove_prefix(val: str, prefix: str) -> str: 'This function removes a prefix from a string.\n\n Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead.\n ' return (val[len(prefix):] if val.startswith(prefix) else val)
-4,224,620,920,993,406,500
This function removes a prefix from a string. Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead.
cbmgr.py
remove_prefix
b33f/couchbase-cli
python
def remove_prefix(val: str, prefix: str) -> str: 'This function removes a prefix from a string.\n\n Note this is a built-in function in Python 3.9 once we upgrade to it we should use it instead.\n ' return (val[len(prefix):] if val.startswith(prefix) else val)
def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None): 'rest_initialiser is a decorator that does common subcommand tasks.\n\n The decorator will always creates a cluster manager and assign it to the subcommand variable rest\n :param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand\n :param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning\n :param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does\n the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check\n is stored on the instance parameter enterprise\n ' def inner(fn): def decorator(self, opts): self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug) if cluster_init_check: check_cluster_initialized(self.rest) if version_check: check_versions(self.rest) if (enterprise_check is not None): (enterprise, errors) = self.rest.is_enterprise() _exit_if_errors(errors) if (enterprise_check and (not enterprise)): _exit_if_errors(['Command only available in enterprise edition']) self.enterprise = enterprise return fn(self, opts) return decorator return inner
6,277,477,557,780,287,000
rest_initialiser is a decorator that does common subcommand tasks. The decorator will always creates a cluster manager and assign it to the subcommand variable rest :param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand :param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning :param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check is stored on the instance parameter enterprise
cbmgr.py
rest_initialiser
b33f/couchbase-cli
python
def rest_initialiser(cluster_init_check=False, version_check=False, enterprise_check=None): 'rest_initialiser is a decorator that does common subcommand tasks.\n\n The decorator will always creates a cluster manager and assign it to the subcommand variable rest\n :param cluster_init_check: if true it will check if the cluster is initialized before executing the subcommand\n :param version_check: if true it will check if the cluster and CLI version match if they do not it prints a warning\n :param enterprise_check: if true it will check if the cluster is enterprise and fail if not. If it is false it does\n the check but it does not fail if not enterprise. If none it does not perform the check. The result of the check\n is stored on the instance parameter enterprise\n ' def inner(fn): def decorator(self, opts): self.rest = ClusterManager(opts.cluster, opts.username, opts.password, opts.ssl, opts.ssl_verify, opts.cacert, opts.debug) if cluster_init_check: check_cluster_initialized(self.rest) if version_check: check_versions(self.rest) if (enterprise_check is not None): (enterprise, errors) = self.rest.is_enterprise() _exit_if_errors(errors) if (enterprise_check and (not enterprise)): _exit_if_errors(['Command only available in enterprise edition']) self.enterprise = enterprise return fn(self, opts) return decorator return inner
def index_storage_mode_to_param(value, default='plasma'): 'Converts the index storage mode to what Couchbase understands' if (value == 'default'): return default if (value == 'memopt'): return 'memory_optimized' return value
5,129,255,220,943,172,000
Converts the index storage mode to what Couchbase understands
cbmgr.py
index_storage_mode_to_param
b33f/couchbase-cli
python
def index_storage_mode_to_param(value, default='plasma'): if (value == 'default'): return default if (value == 'memopt'): return 'memory_optimized' return value
def process_services(services, enterprise): 'Converts services to a format Couchbase understands' sep = ',' if (services.find(sep) < 0): sep = ';' svc_set = set([w.strip() for w in services.split(sep)]) svc_candidate = ['data', 'index', 'query', 'fts', 'eventing', 'analytics', 'backup'] for svc in svc_set: if (svc not in svc_candidate): return (None, [f'`{svc}` is not a valid service']) if ((not enterprise) and (svc in ['eventing', 'analytics', 'backup'])): return (None, [f'{svc} service is only available on Enterprise Edition']) if (not enterprise): ce_svc_30 = set(['data']) ce_svc_40 = set(['data', 'index', 'query']) ce_svc_45 = set(['data', 'index', 'query', 'fts']) if (svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]): return (None, [f"Invalid service configuration. Community Edition only supports nodes with the following combinations of services: '{''.join(ce_svc_30)}', '{','.join(ce_svc_40)}' or '{','.join(ce_svc_45)}'"]) services = ','.join(svc_set) for (old, new) in [[';', ','], ['data', 'kv'], ['query', 'n1ql'], ['analytics', 'cbas']]: services = services.replace(old, new) return (services, None)
4,243,807,396,608,450,600
Converts services to a format Couchbase understands
cbmgr.py
process_services
b33f/couchbase-cli
python
def process_services(services, enterprise): sep = ',' if (services.find(sep) < 0): sep = ';' svc_set = set([w.strip() for w in services.split(sep)]) svc_candidate = ['data', 'index', 'query', 'fts', 'eventing', 'analytics', 'backup'] for svc in svc_set: if (svc not in svc_candidate): return (None, [f'`{svc}` is not a valid service']) if ((not enterprise) and (svc in ['eventing', 'analytics', 'backup'])): return (None, [f'{svc} service is only available on Enterprise Edition']) if (not enterprise): ce_svc_30 = set(['data']) ce_svc_40 = set(['data', 'index', 'query']) ce_svc_45 = set(['data', 'index', 'query', 'fts']) if (svc_set not in [ce_svc_30, ce_svc_40, ce_svc_45]): return (None, [f"Invalid service configuration. Community Edition only supports nodes with the following combinations of services: '{.join(ce_svc_30)}', '{','.join(ce_svc_40)}' or '{','.join(ce_svc_45)}'"]) services = ','.join(svc_set) for (old, new) in [[';', ','], ['data', 'kv'], ['query', 'n1ql'], ['analytics', 'cbas']]: services = services.replace(old, new) return (services, None)
def find_subcommands(): 'Finds all subcommand classes' clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass) subclasses = [cls for cls in clsmembers if (issubclass(cls[1], (Subcommand, LocalSubcommand)) and (cls[1] not in [Subcommand, LocalSubcommand]))] subcommands = [] for subclass in subclasses: name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])]) subcommands.append((name, subclass[1])) return subcommands
2,693,386,715,100,359,700
Finds all subcommand classes
cbmgr.py
find_subcommands
b33f/couchbase-cli
python
def find_subcommands(): clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass) subclasses = [cls for cls in clsmembers if (issubclass(cls[1], (Subcommand, LocalSubcommand)) and (cls[1] not in [Subcommand, LocalSubcommand]))] subcommands = [] for subclass in subclasses: name = '-'.join([part.lower() for part in re.findall('[A-Z][a-z]*', subclass[0])]) subcommands.append((name, subclass[1])) return subcommands
def apply_default_port(nodes): '\n Adds the default port if the port is missing.\n\n @type nodes: string\n @param nodes: A comma seprated list of nodes\n @rtype: array of strings\n @return: The nodes with the port postfixed on each one\n ' nodes = nodes.split(',') def append_port(node): if re.match('.*:\\d+$', node): return node return f'{node}:8091' return [append_port(x) for x in nodes]
-1,273,594,236,596,573,200
Adds the default port if the port is missing. @type nodes: string @param nodes: A comma seprated list of nodes @rtype: array of strings @return: The nodes with the port postfixed on each one
cbmgr.py
apply_default_port
b33f/couchbase-cli
python
def apply_default_port(nodes): '\n Adds the default port if the port is missing.\n\n @type nodes: string\n @param nodes: A comma seprated list of nodes\n @rtype: array of strings\n @return: The nodes with the port postfixed on each one\n ' nodes = nodes.split(',') def append_port(node): if re.match('.*:\\d+$', node): return node return f'{node}:8091' return [append_port(x) for x in nodes]
def parse(self, args): 'Parses the subcommand' if (len(args) == 0): self.short_help() return self.parser.parse_args(args)
8,049,019,916,786,133,000
Parses the subcommand
cbmgr.py
parse
b33f/couchbase-cli
python
def parse(self, args): if (len(args) == 0): self.short_help() return self.parser.parse_args(args)
def short_help(self, code=0): 'Prints the short help message and exits' self.parser.print_help() self.parser.exit(code)
-3,749,013,151,046,599,700
Prints the short help message and exits
cbmgr.py
short_help
b33f/couchbase-cli
python
def short_help(self, code=0): self.parser.print_help() self.parser.exit(code)
def execute(self, opts): 'Executes the subcommand' raise NotImplementedError
3,123,175,722,342,094,300
Executes the subcommand
cbmgr.py
execute
b33f/couchbase-cli
python
def execute(self, opts): raise NotImplementedError
@staticmethod def get_man_page_name(): 'Returns the man page name' raise NotImplementedError
1,340,514,292,959,518,200
Returns the man page name
cbmgr.py
get_man_page_name
b33f/couchbase-cli
python
@staticmethod def get_man_page_name(): raise NotImplementedError
@staticmethod def get_description(): 'Returns the command description' raise NotImplementedError
-9,053,620,187,950,240,000
Returns the command description
cbmgr.py
get_description
b33f/couchbase-cli
python
@staticmethod def get_description(): raise NotImplementedError
@staticmethod def get_man_page_name(): 'Returns the man page name' return (('couchbase-cli' + '.1') if (os.name != 'nt') else '.html')
7,705,955,945,054,665,000
Returns the man page name
cbmgr.py
get_man_page_name
b33f/couchbase-cli
python
@staticmethod def get_man_page_name(): return (('couchbase-cli' + '.1') if (os.name != 'nt') else '.html')
@staticmethod def is_hidden(): 'Whether or not the subcommand should be hidden from the help message' return False
7,354,332,169,593,689,000
Whether or not the subcommand should be hidden from the help message
cbmgr.py
is_hidden
b33f/couchbase-cli
python
@staticmethod def is_hidden(): return False
@staticmethod def is_hidden(): 'Whether or not the subcommand should be hidden from the help message' return False
7,354,332,169,593,689,000
Whether or not the subcommand should be hidden from the help message
cbmgr.py
is_hidden
b33f/couchbase-cli
python
@staticmethod def is_hidden(): return False
def __init__(self, subparser): 'setup the parser' self.rest = None repository_parser = subparser.add_parser('repository', help='Manage backup repositories', add_help=False, allow_abbrev=False) action_group = repository_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='Get all repositories') action_group.add_argument('--get', action='store_true', help='Get repository by id') action_group.add_argument('--archive', action='store_true', help='Archive a repository') action_group.add_argument('--add', action='store_true', help='Add a new active repository') action_group.add_argument('--remove', action='store_true', help='Remove an archived/imported repository') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help='Prints the short or long help message') group = repository_parser.add_argument_group('Backup service repository configuration') group.add_argument('--id', metavar='<id>', help='The repository id') group.add_argument('--new-id', metavar='<id>', help='The new repository id') group.add_argument('--state', metavar='<state>', choices=['active', 'archived', 'imported'], help='The repository state.') group.add_argument('--plan', metavar='<plan_name>', help='The plan to use as base for the repository') group.add_argument('--backup-archive', metavar='<archive>', help='The location to store the backups in') group.add_argument('--bucket-name', metavar='<name>', help='The bucket to backup') group.add_argument('--remove-data', action='store_true', help='Used to delete the repository data') cloud_group = repository_parser.add_argument_group('Backup repository cloud arguments') cloud_group.add_argument('--cloud-credentials-name', metavar='<name>', help='The stored clouds credential name to use for the new repository') cloud_group.add_argument('--cloud-staging-dir', metavar='<path>', help='The path to the staging directory') cloud_group.add_argument('--cloud-credentials-id', metavar='<id>', help='The ID to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-key', metavar='<key>', help='The key to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-region', metavar='<region>', help='The region for the object store') cloud_group.add_argument('--cloud-endpoint', metavar='<endpoint>', help='Overrides the default endpoint used to communicate with the cloud provider. Use for object store compatible third party solutions') cloud_group.add_argument('--s3-force-path-style', action='store_true', help='When using S3 or S3 compatible storage it will use the old path style.')
3,225,793,807,027,137,000
setup the parser
cbmgr.py
__init__
b33f/couchbase-cli
python
def __init__(self, subparser): self.rest = None repository_parser = subparser.add_parser('repository', help='Manage backup repositories', add_help=False, allow_abbrev=False) action_group = repository_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='Get all repositories') action_group.add_argument('--get', action='store_true', help='Get repository by id') action_group.add_argument('--archive', action='store_true', help='Archive a repository') action_group.add_argument('--add', action='store_true', help='Add a new active repository') action_group.add_argument('--remove', action='store_true', help='Remove an archived/imported repository') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help='Prints the short or long help message') group = repository_parser.add_argument_group('Backup service repository configuration') group.add_argument('--id', metavar='<id>', help='The repository id') group.add_argument('--new-id', metavar='<id>', help='The new repository id') group.add_argument('--state', metavar='<state>', choices=['active', 'archived', 'imported'], help='The repository state.') group.add_argument('--plan', metavar='<plan_name>', help='The plan to use as base for the repository') group.add_argument('--backup-archive', metavar='<archive>', help='The location to store the backups in') group.add_argument('--bucket-name', metavar='<name>', help='The bucket to backup') group.add_argument('--remove-data', action='store_true', help='Used to delete the repository data') cloud_group = repository_parser.add_argument_group('Backup repository cloud arguments') cloud_group.add_argument('--cloud-credentials-name', metavar='<name>', help='The stored clouds credential name to use for the new repository') cloud_group.add_argument('--cloud-staging-dir', metavar='<path>', help='The path to the staging directory') cloud_group.add_argument('--cloud-credentials-id', metavar='<id>', help='The ID to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-key', metavar='<key>', help='The key to use to communicate with the object store') cloud_group.add_argument('--cloud-credentials-region', metavar='<region>', help='The region for the object store') cloud_group.add_argument('--cloud-endpoint', metavar='<endpoint>', help='Overrides the default endpoint used to communicate with the cloud provider. Use for object store compatible third party solutions') cloud_group.add_argument('--s3-force-path-style', action='store_true', help='When using S3 or S3 compatible storage it will use the old path style.')
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): 'Run the backup-service repository subcommand' if opts.list: self.list_repositories(opts.state, (opts.output == 'json')) elif opts.get: self.get_repository(opts.id, opts.state, (opts.output == 'json')) elif opts.archive: self.archive_repository(opts.id, opts.new_id) elif opts.remove: self.remove_repository(opts.id, opts.state, opts.remove_data) elif opts.add: self.add_active_repository(opts.id, opts.plan, opts.backup_archive, bucket_name=opts.bucket_name, credentials_name=opts.cloud_credentials_name, credentials_id=opts.cloud_credentials_id, credentials_key=opts.cloud_credentials_key, cloud_region=opts.cloud_credentials_region, staging_dir=opts.cloud_staging_dir, cloud_endpoint=opts.cloud_endpoint, s3_path_style=opts.s3_force_path_style)
-1,515,356,174,697,526,300
Run the backup-service repository subcommand
cbmgr.py
execute
b33f/couchbase-cli
python
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): if opts.list: self.list_repositories(opts.state, (opts.output == 'json')) elif opts.get: self.get_repository(opts.id, opts.state, (opts.output == 'json')) elif opts.archive: self.archive_repository(opts.id, opts.new_id) elif opts.remove: self.remove_repository(opts.id, opts.state, opts.remove_data) elif opts.add: self.add_active_repository(opts.id, opts.plan, opts.backup_archive, bucket_name=opts.bucket_name, credentials_name=opts.cloud_credentials_name, credentials_id=opts.cloud_credentials_id, credentials_key=opts.cloud_credentials_key, cloud_region=opts.cloud_credentials_region, staging_dir=opts.cloud_staging_dir, cloud_endpoint=opts.cloud_endpoint, s3_path_style=opts.s3_force_path_style)
def remove_repository(self, repository_id: str, state: str, delete_repo: bool=False): "Removes the repository in state 'state' and with id 'repository_id'\n Args:\n repository_id (str): The repository id\n state (str): It must be either archived or imported otherwise it will return an error\n delete_repo (bool): Whether or not the backup repository should be deleted\n " if (not repository_id): _exit_if_errors(['--id is required']) if (not state): _exit_if_errors(['--state is required']) if (state not in ['archived', 'imported']): _exit_if_errors(['can only delete archived or imported repositories to delete an active repository it needs to be archived first']) if (delete_repo and (state == 'imported')): _exit_if_errors(['cannot delete the repository for an imported repository']) (_, errors) = self.rest.delete_backup_repository(repository_id, state, delete_repo) _exit_if_errors(errors) _success('Repository was deleted')
5,525,413,171,259,426,000
Removes the repository in state 'state' and with id 'repository_id' Args: repository_id (str): The repository id state (str): It must be either archived or imported otherwise it will return an error delete_repo (bool): Whether or not the backup repository should be deleted
cbmgr.py
remove_repository
b33f/couchbase-cli
python
def remove_repository(self, repository_id: str, state: str, delete_repo: bool=False): "Removes the repository in state 'state' and with id 'repository_id'\n Args:\n repository_id (str): The repository id\n state (str): It must be either archived or imported otherwise it will return an error\n delete_repo (bool): Whether or not the backup repository should be deleted\n " if (not repository_id): _exit_if_errors(['--id is required']) if (not state): _exit_if_errors(['--state is required']) if (state not in ['archived', 'imported']): _exit_if_errors(['can only delete archived or imported repositories to delete an active repository it needs to be archived first']) if (delete_repo and (state == 'imported')): _exit_if_errors(['cannot delete the repository for an imported repository']) (_, errors) = self.rest.delete_backup_repository(repository_id, state, delete_repo) _exit_if_errors(errors) _success('Repository was deleted')
def add_active_repository(self, repository_id: str, plan: str, archive: str, **kwargs): "Adds a new active repository identified by 'repository_id' and that uses 'plan' as base.\n\n Args:\n repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be\n returned.\n plan (str): The name of the plan to use as base for the repository. If it does not exist the service\n will return an error.\n archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of\n providing a path to a filesystem directory use the syntax.\n s3://<bucket-name>/<optional_prefix>/<archive>\n **kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region,\n staging_dir, cloud_endpoint, s3_path_style]\n " if (not repository_id): _exit_if_errors(['--id is required']) if (not plan): _exit_if_errors(['--plan is required']) if (not archive): _exit_if_errors(['--backup-archive is required']) _exit_if_errors(self.check_cloud_params(archive, **kwargs)) add_request_body = {'plan': plan, 'archive': archive} if kwargs.get('bucket_name', False): add_request_body['bucket_name'] = kwargs.get('bucket_name') if kwargs.get('credentials_name', False): add_request_body['cloud_credential_name'] = kwargs.get('credentials_name') if kwargs.get('credentials_id', False): add_request_body['cloud_credentials_id'] = kwargs.get('credentials_id') if kwargs.get('credentials_key', False): add_request_body['cloud_credentials_key'] = kwargs.get('credentials_key') if kwargs.get('cloud_region', False): add_request_body['cloud_credentials_region'] = kwargs.get('cloud_region') if kwargs.get('cloud_endpoint', False): add_request_body['cloud_endpoint'] = kwargs.get('cloud_endpoint') if kwargs.get('s3_path_style', False): add_request_body['cloud_force_path_style'] = kwargs.get('s3_path_style') (_, errors) = self.rest.add_backup_active_repository(repository_id, add_request_body) _exit_if_errors(errors) _success('Added repository')
-722,556,864,150,031,100
Adds a new active repository identified by 'repository_id' and that uses 'plan' as base. Args: repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be returned. plan (str): The name of the plan to use as base for the repository. If it does not exist the service will return an error. archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of providing a path to a filesystem directory use the syntax. s3://<bucket-name>/<optional_prefix>/<archive> **kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region, staging_dir, cloud_endpoint, s3_path_style]
cbmgr.py
add_active_repository
b33f/couchbase-cli
python
def add_active_repository(self, repository_id: str, plan: str, archive: str, **kwargs): "Adds a new active repository identified by 'repository_id' and that uses 'plan' as base.\n\n Args:\n repository_id (str): The ID to give to the repository. This must be unique, if it is not an error will be\n returned.\n plan (str): The name of the plan to use as base for the repository. If it does not exist the service\n will return an error.\n archive (str): The location to store the data in. It must be accessible by all nodes. To use S3 instead of\n providing a path to a filesystem directory use the syntax.\n s3://<bucket-name>/<optional_prefix>/<archive>\n **kwargs: Optional parameters [bucket_name, credentials_name, credentials_id, credentials_key, cloud_region,\n staging_dir, cloud_endpoint, s3_path_style]\n " if (not repository_id): _exit_if_errors(['--id is required']) if (not plan): _exit_if_errors(['--plan is required']) if (not archive): _exit_if_errors(['--backup-archive is required']) _exit_if_errors(self.check_cloud_params(archive, **kwargs)) add_request_body = {'plan': plan, 'archive': archive} if kwargs.get('bucket_name', False): add_request_body['bucket_name'] = kwargs.get('bucket_name') if kwargs.get('credentials_name', False): add_request_body['cloud_credential_name'] = kwargs.get('credentials_name') if kwargs.get('credentials_id', False): add_request_body['cloud_credentials_id'] = kwargs.get('credentials_id') if kwargs.get('credentials_key', False): add_request_body['cloud_credentials_key'] = kwargs.get('credentials_key') if kwargs.get('cloud_region', False): add_request_body['cloud_credentials_region'] = kwargs.get('cloud_region') if kwargs.get('cloud_endpoint', False): add_request_body['cloud_endpoint'] = kwargs.get('cloud_endpoint') if kwargs.get('s3_path_style', False): add_request_body['cloud_force_path_style'] = kwargs.get('s3_path_style') (_, errors) = self.rest.add_backup_active_repository(repository_id, add_request_body) _exit_if_errors(errors) _success('Added repository')
@staticmethod def check_cloud_params(archive: str, **kwargs) -> Optional[List[str]]: 'Checks that inside kwargs there is a valid set of parameters to add a cloud repository\n Args:\n archive (str): The archive to use for the repository.\n ' if (not archive.startswith('s3://')): return None creds_name = kwargs.get('credentials_name') region = kwargs.get('cloud_region') creds_id = kwargs.get('credentials_id') creds_key = kwargs.get('credentials_key') staging_dir = kwargs.get('staging_dir') if ((creds_name and (creds_id or creds_key)) or ((not creds_name) and (not (creds_id or creds_key)))): return ['must provide either --cloud-credentials-name or --cloud-credentials-key and --cloud-credentials-id'] if (not staging_dir): return ['--cloud-staging-dir is required'] if ((not creds_name) and (not region)): return ['--cloud-credentials-region is required'] return None
2,361,866,333,037,891,000
Checks that inside kwargs there is a valid set of parameters to add a cloud repository Args: archive (str): The archive to use for the repository.
cbmgr.py
check_cloud_params
b33f/couchbase-cli
python
@staticmethod def check_cloud_params(archive: str, **kwargs) -> Optional[List[str]]: 'Checks that inside kwargs there is a valid set of parameters to add a cloud repository\n Args:\n archive (str): The archive to use for the repository.\n ' if (not archive.startswith('s3://')): return None creds_name = kwargs.get('credentials_name') region = kwargs.get('cloud_region') creds_id = kwargs.get('credentials_id') creds_key = kwargs.get('credentials_key') staging_dir = kwargs.get('staging_dir') if ((creds_name and (creds_id or creds_key)) or ((not creds_name) and (not (creds_id or creds_key)))): return ['must provide either --cloud-credentials-name or --cloud-credentials-key and --cloud-credentials-id'] if (not staging_dir): return ['--cloud-staging-dir is required'] if ((not creds_name) and (not region)): return ['--cloud-credentials-region is required'] return None
def archive_repository(self, repository_id, new_id): 'Archive an repository. The archived repository will have the id `new_id`\n\n Args:\n repository_id (str): The active repository ID to be archived\n new_id (str): The id that will be given to the archived repository\n ' if (not repository_id): _exit_if_errors(['--id is required']) if (not new_id): _exit_if_errors(['--new-id is required']) (_, errors) = self.rest.archive_backup_repository(repository_id, new_id) _exit_if_errors(errors) _success('Archived repository')
-480,300,558,992,815,900
Archive an repository. The archived repository will have the id `new_id` Args: repository_id (str): The active repository ID to be archived new_id (str): The id that will be given to the archived repository
cbmgr.py
archive_repository
b33f/couchbase-cli
python
def archive_repository(self, repository_id, new_id): 'Archive an repository. The archived repository will have the id `new_id`\n\n Args:\n repository_id (str): The active repository ID to be archived\n new_id (str): The id that will be given to the archived repository\n ' if (not repository_id): _exit_if_errors(['--id is required']) if (not new_id): _exit_if_errors(['--new-id is required']) (_, errors) = self.rest.archive_backup_repository(repository_id, new_id) _exit_if_errors(errors) _success('Archived repository')
def list_repositories(self, state=None, json_out=False): "List the backup repositories.\n\n If a repository state is given only repositories in that state will be listed. This command supports listing both in\n json and human friendly format.\n\n Args:\n state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be\n retrieved.\n json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.\n " states = (['active', 'archived', 'imported'] if (state is None) else [state]) results = {} for get_state in states: (repositories, errors) = self.rest.get_backup_service_repositories(state=get_state) _exit_if_errors(errors) results[get_state] = repositories if json_out: print(json.dumps(results, indent=2)) else: self.human_friendly_print_repositories(results)
4,899,242,005,880,844,000
List the backup repositories. If a repository state is given only repositories in that state will be listed. This command supports listing both in json and human friendly format. Args: state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be retrieved. json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.
cbmgr.py
list_repositories
b33f/couchbase-cli
python
def list_repositories(self, state=None, json_out=False): "List the backup repositories.\n\n If a repository state is given only repositories in that state will be listed. This command supports listing both in\n json and human friendly format.\n\n Args:\n state (str, optional): One of ['active', 'imported', 'archived']. The repository on this state will be\n retrieved.\n json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.\n " states = (['active', 'archived', 'imported'] if (state is None) else [state]) results = {} for get_state in states: (repositories, errors) = self.rest.get_backup_service_repositories(state=get_state) _exit_if_errors(errors) results[get_state] = repositories if json_out: print(json.dumps(results, indent=2)) else: self.human_friendly_print_repositories(results)
def get_repository(self, repository_id, state, json_out=False): 'Retrieves one repository from the backup service\n\n If the repository does not exist an error will be returned\n\n Args:\n repository_id (str): The repository id to be retrieved\n state (str): The state of the repository to retrieve\n json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.\n ' if (not repository_id): _exit_if_errors(['--id is required']) if (not state): _exit_if_errors(['--state is required']) (repository, errors) = self.rest.get_backup_service_repository(repository_id, state) _exit_if_errors(errors) if json_out: print(json.dumps(repository, indent=2)) else: self.human_firendly_print_repository(repository)
-1,288,550,630,235,059,200
Retrieves one repository from the backup service If the repository does not exist an error will be returned Args: repository_id (str): The repository id to be retrieved state (str): The state of the repository to retrieve json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.
cbmgr.py
get_repository
b33f/couchbase-cli
python
def get_repository(self, repository_id, state, json_out=False): 'Retrieves one repository from the backup service\n\n If the repository does not exist an error will be returned\n\n Args:\n repository_id (str): The repository id to be retrieved\n state (str): The state of the repository to retrieve\n json_out (bool): If True the output will be JSON otherwise it will be a human friendly format.\n ' if (not repository_id): _exit_if_errors(['--id is required']) if (not state): _exit_if_errors(['--state is required']) (repository, errors) = self.rest.get_backup_service_repository(repository_id, state) _exit_if_errors(errors) if json_out: print(json.dumps(repository, indent=2)) else: self.human_firendly_print_repository(repository)
@staticmethod def human_firendly_print_repository(repository): 'Print the repository in a human friendly format\n\n Args:\n repository (obj): The backup repository information\n ' print(f"ID: {repository['id']}") print(f"State: {repository['state']}") print(f"Healthy: {(not (('health' in repository) and (not repository['health']['healthy'])))!s}") print(f"Archive: {repository['archive']}") print(f"Repository: {repository['repo']}") if ('bucket' in repository): print(f"Bucket: {repository['bucket']['name']}") if (('plan_name' in repository) and (repository['plan_name'] != '')): print(f"plan: {repository['plan_name']}") print(f"Creation time: {repository['creation_time']}") if (('scheduled' in repository) and repository['scheduled']): print() BackupServiceRepository.human_firendly_print_repository_scheduled_tasks(repository['scheduled']) one_off = (repository['running_one_off'] if ('running_one_off' in repository) else None) running_scheduled = (repository['running_tasks'] if ('running_tasks' in repository) else None) if (one_off or running_scheduled): print() BackupServiceRepository.human_friendly_print_running_tasks(one_off, running_scheduled)
-6,050,599,214,953,128,000
Print the repository in a human friendly format Args: repository (obj): The backup repository information
cbmgr.py
human_firendly_print_repository
b33f/couchbase-cli
python
@staticmethod def human_firendly_print_repository(repository): 'Print the repository in a human friendly format\n\n Args:\n repository (obj): The backup repository information\n ' print(f"ID: {repository['id']}") print(f"State: {repository['state']}") print(f"Healthy: {(not (('health' in repository) and (not repository['health']['healthy'])))!s}") print(f"Archive: {repository['archive']}") print(f"Repository: {repository['repo']}") if ('bucket' in repository): print(f"Bucket: {repository['bucket']['name']}") if (('plan_name' in repository) and (repository['plan_name'] != )): print(f"plan: {repository['plan_name']}") print(f"Creation time: {repository['creation_time']}") if (('scheduled' in repository) and repository['scheduled']): print() BackupServiceRepository.human_firendly_print_repository_scheduled_tasks(repository['scheduled']) one_off = (repository['running_one_off'] if ('running_one_off' in repository) else None) running_scheduled = (repository['running_tasks'] if ('running_tasks' in repository) else None) if (one_off or running_scheduled): print() BackupServiceRepository.human_friendly_print_running_tasks(one_off, running_scheduled)
@staticmethod def human_friendly_print_running_tasks(one_off, scheduled): 'Prints the running task summary in a human friendly way\n\n Args:\n one_off (map<str, task object>): Running one off tasks\n scheduled (map<str, task object>): Running scheduled tasks\n ' all_vals = [] name_pad = 5 if one_off: for name in one_off: if (len(name) > name_pad): name_pad = len(name) all_vals += one_off.values() if scheduled: for name in scheduled: if (len(name) > name_pad): name_pad = len(name) all_vals += scheduled.values() name_pad += 1 header = f"{'Name':<{name_pad}}| Task type | Status | Start" print(header) print(('-' * (len(header) + 5))) for task in all_vals: print(f"{task['name']:<{name_pad}}| {task['type'].title():<10}| {task['status']:<8} | {task['start']}")
-7,640,512,187,726,037,000
Prints the running task summary in a human friendly way Args: one_off (map<str, task object>): Running one off tasks scheduled (map<str, task object>): Running scheduled tasks
cbmgr.py
human_friendly_print_running_tasks
b33f/couchbase-cli
python
@staticmethod def human_friendly_print_running_tasks(one_off, scheduled): 'Prints the running task summary in a human friendly way\n\n Args:\n one_off (map<str, task object>): Running one off tasks\n scheduled (map<str, task object>): Running scheduled tasks\n ' all_vals = [] name_pad = 5 if one_off: for name in one_off: if (len(name) > name_pad): name_pad = len(name) all_vals += one_off.values() if scheduled: for name in scheduled: if (len(name) > name_pad): name_pad = len(name) all_vals += scheduled.values() name_pad += 1 header = f"{'Name':<{name_pad}}| Task type | Status | Start" print(header) print(('-' * (len(header) + 5))) for task in all_vals: print(f"{task['name']:<{name_pad}}| {task['type'].title():<10}| {task['status']:<8} | {task['start']}")
@staticmethod def human_firendly_print_repository_scheduled_tasks(scheduled): 'Print the scheduled task in a tabular format' name_pad = 5 for name in scheduled: if (len(name) > name_pad): name_pad = len(name) name_pad += 1 header = f"{'Name':<{name_pad}}| Task type | Next run" print('Scheduled tasks:') print(header) print(('-' * (len(header) + 5))) for task in scheduled.values(): print(f"{task['name']:<{name_pad}}| {task['task_type'].title():<10}| {task['next_run']}")
6,615,798,119,820,247,000
Print the scheduled task in a tabular format
cbmgr.py
human_firendly_print_repository_scheduled_tasks
b33f/couchbase-cli
python
@staticmethod def human_firendly_print_repository_scheduled_tasks(scheduled): name_pad = 5 for name in scheduled: if (len(name) > name_pad): name_pad = len(name) name_pad += 1 header = f"{'Name':<{name_pad}}| Task type | Next run" print('Scheduled tasks:') print(header) print(('-' * (len(header) + 5))) for task in scheduled.values(): print(f"{task['name']:<{name_pad}}| {task['task_type'].title():<10}| {task['next_run']}")
@staticmethod def human_friendly_print_repositories(repositories_map): 'This will print the repositories in a tabular format\n\n Args:\n repository_map (map<state (str), repository (list of objects)>)\n ' repository_count = 0 id_pad = 5 plan_pad = 7 for repositories in repositories_map.values(): for repository in repositories: repository_count += 1 if (id_pad < len(repository['id'])): id_pad = len(repository['id']) if (('plan_name' in repository) and (plan_pad < len(repository['plan_name']))): plan_pad = len(repository['plan_name']) if (repository_count == 0): print('No repositories found') return plan_pad += 1 id_pad += 1 header = f"{'ID':<{id_pad}}| {'State':<9}| {'plan':<{plan_pad}}| Healthy | Repository" print(header) print(('-' * len(header))) for (_, repositories) in sorted(repositories_map.items()): for repository in repositories: healthy = (not (('health' in repository) and (not repository['health']['healthy']))) plan_name = 'N/A' if (('plan_name' in repository) and (len(repository['plan_name']) != 0)): plan_name = repository['plan_name'] print(f"{repository['id']:<{id_pad}}| {repository['state']:<9}| {plan_name:<{plan_pad}}| {healthy!s:<7}| {repository['repo']}")
-8,155,784,159,145,587,000
This will print the repositories in a tabular format Args: repository_map (map<state (str), repository (list of objects)>)
cbmgr.py
human_friendly_print_repositories
b33f/couchbase-cli
python
@staticmethod def human_friendly_print_repositories(repositories_map): 'This will print the repositories in a tabular format\n\n Args:\n repository_map (map<state (str), repository (list of objects)>)\n ' repository_count = 0 id_pad = 5 plan_pad = 7 for repositories in repositories_map.values(): for repository in repositories: repository_count += 1 if (id_pad < len(repository['id'])): id_pad = len(repository['id']) if (('plan_name' in repository) and (plan_pad < len(repository['plan_name']))): plan_pad = len(repository['plan_name']) if (repository_count == 0): print('No repositories found') return plan_pad += 1 id_pad += 1 header = f"{'ID':<{id_pad}}| {'State':<9}| {'plan':<{plan_pad}}| Healthy | Repository" print(header) print(('-' * len(header))) for (_, repositories) in sorted(repositories_map.items()): for repository in repositories: healthy = (not (('health' in repository) and (not repository['health']['healthy']))) plan_name = 'N/A' if (('plan_name' in repository) and (len(repository['plan_name']) != 0)): plan_name = repository['plan_name'] print(f"{repository['id']:<{id_pad}}| {repository['state']:<9}| {plan_name:<{plan_pad}}| {healthy!s:<7}| {repository['repo']}")
def __init__(self, subparser): 'setup the parser' self.rest = None plan_parser = subparser.add_parser('plan', help='Manage backup plans', add_help=False, allow_abbrev=False) action_group = plan_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='List all available backup plans') action_group.add_argument('--get', action='store_true', help='Get a plan by name') action_group.add_argument('--remove', action='store_true', help='Remove a plan by name') action_group.add_argument('--add', action='store_true', help='Add a new plan') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help='Prints the short or long help message') options = plan_parser.add_argument_group('Plan options') options.add_argument('--name', metavar='<name>', help='Plan name') options.add_argument('--description', metavar='<description>', help='Optional description') options.add_argument('--services', metavar='<services>', help='A comma separated list of services to backup') options.add_argument('--task', metavar='<tasks>', nargs='+', help='JSON task definition')
7,797,398,368,704,295,000
setup the parser
cbmgr.py
__init__
b33f/couchbase-cli
python
def __init__(self, subparser): self.rest = None plan_parser = subparser.add_parser('plan', help='Manage backup plans', add_help=False, allow_abbrev=False) action_group = plan_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('--list', action='store_true', help='List all available backup plans') action_group.add_argument('--get', action='store_true', help='Get a plan by name') action_group.add_argument('--remove', action='store_true', help='Remove a plan by name') action_group.add_argument('--add', action='store_true', help='Add a new plan') action_group.add_argument('-h', '--help', action=CBHelpAction, klass=self, help='Prints the short or long help message') options = plan_parser.add_argument_group('Plan options') options.add_argument('--name', metavar='<name>', help='Plan name') options.add_argument('--description', metavar='<description>', help='Optional description') options.add_argument('--services', metavar='<services>', help='A comma separated list of services to backup') options.add_argument('--task', metavar='<tasks>', nargs='+', help='JSON task definition')
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): 'Run the backup plan managment command' if opts.list: self.list_plans((opts.output == 'json')) elif opts.get: self.get_plan(opts.name, (opts.output == 'json')) elif opts.remove: self.remove_plan(opts.name) elif opts.add: self.add_plan(opts.name, opts.services, opts.task, opts.description)
6,970,312,617,960,120,000
Run the backup plan managment command
cbmgr.py
execute
b33f/couchbase-cli
python
@rest_initialiser(version_check=True, enterprise_check=True, cluster_init_check=True) def execute(self, opts): if opts.list: self.list_plans((opts.output == 'json')) elif opts.get: self.get_plan(opts.name, (opts.output == 'json')) elif opts.remove: self.remove_plan(opts.name) elif opts.add: self.add_plan(opts.name, opts.services, opts.task, opts.description)
def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]): 'Add a new backup plan\n\n The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service.\n Args:\n name (str): The name to give the new plan. It must be unique.\n services (optional list): A list of services to backup if empty all services are backed up.\n tasks (optional list): A list of JSON strings representing the tasks to be run.\n description (optional str): A optional description string.\n ' if (not name): _exit_if_errors(['--name is required']) service_list = [] if services: service_list = [service.strip() for service in services.split(',')] tasks_objects = [] if tasks: for task_str in tasks: try: task = json.loads(task_str) tasks_objects.append(task) except json.decoder.JSONDecodeError as json_error: _exit_if_errors([f'invalid task {json_error!s}']) plan = {} if service_list: plan['services'] = service_list if tasks_objects: plan['tasks'] = tasks_objects if description: plan['description'] = description (_, errors) = self.rest.add_backup_plan(name, plan) _exit_if_errors(errors) _success('Added plan')
-9,096,600,411,109,224,000
Add a new backup plan The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service. Args: name (str): The name to give the new plan. It must be unique. services (optional list): A list of services to backup if empty all services are backed up. tasks (optional list): A list of JSON strings representing the tasks to be run. description (optional str): A optional description string.
cbmgr.py
add_plan
b33f/couchbase-cli
python
def add_plan(self, name: str, services: Optional[str], tasks: Optional[List[str]], description: Optional[str]): 'Add a new backup plan\n\n The validation of the inputs in the CLI is intentionally lacking as this is offloaded to the backup service.\n Args:\n name (str): The name to give the new plan. It must be unique.\n services (optional list): A list of services to backup if empty all services are backed up.\n tasks (optional list): A list of JSON strings representing the tasks to be run.\n description (optional str): A optional description string.\n ' if (not name): _exit_if_errors(['--name is required']) service_list = [] if services: service_list = [service.strip() for service in services.split(',')] tasks_objects = [] if tasks: for task_str in tasks: try: task = json.loads(task_str) tasks_objects.append(task) except json.decoder.JSONDecodeError as json_error: _exit_if_errors([f'invalid task {json_error!s}']) plan = {} if service_list: plan['services'] = service_list if tasks_objects: plan['tasks'] = tasks_objects if description: plan['description'] = description (_, errors) = self.rest.add_backup_plan(name, plan) _exit_if_errors(errors) _success('Added plan')
def remove_plan(self, name: str): 'Removes a plan by name' if (not name): _exit_if_errors(['--name is required']) (_, errors) = self.rest.delete_backup_plan(name) _exit_if_errors(errors) _success('Plan removed')
3,467,677,332,179,611,600
Removes a plan by name
cbmgr.py
remove_plan
b33f/couchbase-cli
python
def remove_plan(self, name: str): if (not name): _exit_if_errors(['--name is required']) (_, errors) = self.rest.delete_backup_plan(name) _exit_if_errors(errors) _success('Plan removed')
def get_plan(self, name: str, json_output: bool=False): 'Gets a backup plan by name\n\n Args:\n name (str): The name of the plan to retrieve\n json_output (bool): Whether to print in JSON or a more human friendly way\n ' if (not name): _exit_if_errors(['--name is required']) (plan, errors) = self.rest.get_backup_plan(name) _exit_if_errors(errors) if json_output: print(json.dumps(plan, indent=2)) else: self.human_print_plan(plan)
411,347,194,790,967,360
Gets a backup plan by name Args: name (str): The name of the plan to retrieve json_output (bool): Whether to print in JSON or a more human friendly way
cbmgr.py
get_plan
b33f/couchbase-cli
python
def get_plan(self, name: str, json_output: bool=False): 'Gets a backup plan by name\n\n Args:\n name (str): The name of the plan to retrieve\n json_output (bool): Whether to print in JSON or a more human friendly way\n ' if (not name): _exit_if_errors(['--name is required']) (plan, errors) = self.rest.get_backup_plan(name) _exit_if_errors(errors) if json_output: print(json.dumps(plan, indent=2)) else: self.human_print_plan(plan)
def list_plans(self, json_output: bool=False): 'Prints all the plans stored in the backup service\n\n Args:\n json_output (bool): Whether to print in JSON or a more human friendly way\n ' (plans, errors) = self.rest.list_backup_plans() _exit_if_errors(errors) if json_output: print(json.dumps(plans, indent=2)) else: self.human_print_plans(plans)
-8,035,865,599,006,419,000
Prints all the plans stored in the backup service Args: json_output (bool): Whether to print in JSON or a more human friendly way
cbmgr.py
list_plans
b33f/couchbase-cli
python
def list_plans(self, json_output: bool=False): 'Prints all the plans stored in the backup service\n\n Args:\n json_output (bool): Whether to print in JSON or a more human friendly way\n ' (plans, errors) = self.rest.list_backup_plans() _exit_if_errors(errors) if json_output: print(json.dumps(plans, indent=2)) else: self.human_print_plans(plans)
@staticmethod def human_print_plan(plan: object): 'Prints the plan in a human friendly way' print(f"Name: {plan['name']}") print(f"Description: {(plan['description'] if ('description' in plan) else 'N/A')}") print(f"Services: {BackupServicePlan.service_list_to_str(plan['services'])}") print(f"Default: {(plan['default'] if ('deafult' in plan) else False)!s}") if (not plan['tasks']): return print() print('Tasks:') task_name_pad = 5 schedule_pad = 10 for task in plan['tasks']: if (len(task['name']) > task_name_pad): task_name_pad = len(task['name']) task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule']) if (len(task['schedule_str']) > schedule_pad): schedule_pad = len(task['schedule_str']) task_name_pad += 1 schedule_pad += 1 header = f"{'Name':<{task_name_pad}} | {'Schedule':<{schedule_pad}} | Options" print(header) print(('-' * (len(header) + 5))) for task in plan['tasks']: options = BackupServicePlan.format_options(task) print(f"{task['name']:<{task_name_pad}} | {task['schedule_str']:<{schedule_pad}} | {options}")
4,812,293,286,272,715,000
Prints the plan in a human friendly way
cbmgr.py
human_print_plan
b33f/couchbase-cli
python
@staticmethod def human_print_plan(plan: object): print(f"Name: {plan['name']}") print(f"Description: {(plan['description'] if ('description' in plan) else 'N/A')}") print(f"Services: {BackupServicePlan.service_list_to_str(plan['services'])}") print(f"Default: {(plan['default'] if ('deafult' in plan) else False)!s}") if (not plan['tasks']): return print() print('Tasks:') task_name_pad = 5 schedule_pad = 10 for task in plan['tasks']: if (len(task['name']) > task_name_pad): task_name_pad = len(task['name']) task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule']) if (len(task['schedule_str']) > schedule_pad): schedule_pad = len(task['schedule_str']) task_name_pad += 1 schedule_pad += 1 header = f"{'Name':<{task_name_pad}} | {'Schedule':<{schedule_pad}} | Options" print(header) print(('-' * (len(header) + 5))) for task in plan['tasks']: options = BackupServicePlan.format_options(task) print(f"{task['name']:<{task_name_pad}} | {task['schedule_str']:<{schedule_pad}} | {options}")
@staticmethod def format_options(task: object) -> str: 'Format the full backup or merge options' options = 'N/A' if ((task['task_type'] == 'BACKUP') and task['full_backup']): options = 'Full backup' elif (task['task_type'] == 'MERGE'): if ('merge_options' in task): options = f"Merge from {task['merge_options']['offset_start']} to {task['merge_options']['offset_end']}" else: options = 'Merge everything' return options
-8,492,650,752,532,442,000
Format the full backup or merge options
cbmgr.py
format_options
b33f/couchbase-cli
python
@staticmethod def format_options(task: object) -> str: options = 'N/A' if ((task['task_type'] == 'BACKUP') and task['full_backup']): options = 'Full backup' elif (task['task_type'] == 'MERGE'): if ('merge_options' in task): options = f"Merge from {task['merge_options']['offset_start']} to {task['merge_options']['offset_end']}" else: options = 'Merge everything' return options
@staticmethod def format_schedule(schedule: object) -> str: 'Format the schedule object in a string of the format <task> every <frequency>? <period> (at <time>)?' task_start = f"{schedule['job_type'].lower()}" frequency_part = 'every' if (schedule['frequency'] == 1): period = schedule['period'].lower() period = (period if (period[(- 1)] != 's') else period[:(- 1)]) frequency_part += f' {period}' else: frequency_part += f" {schedule['frequency']} {schedule['period'].lower()}" time_part = (f" at {schedule['time']}" if ('time' in schedule) else '') return f'{task_start} {frequency_part}{time_part}'
7,484,876,531,967,456,000
Format the schedule object in a string of the format <task> every <frequency>? <period> (at <time>)?
cbmgr.py
format_schedule
b33f/couchbase-cli
python
@staticmethod def format_schedule(schedule: object) -> str: task_start = f"{schedule['job_type'].lower()}" frequency_part = 'every' if (schedule['frequency'] == 1): period = schedule['period'].lower() period = (period if (period[(- 1)] != 's') else period[:(- 1)]) frequency_part += f' {period}' else: frequency_part += f" {schedule['frequency']} {schedule['period'].lower()}" time_part = (f" at {schedule['time']}" if ('time' in schedule) else ) return f'{task_start} {frequency_part}{time_part}'
@staticmethod def human_print_plans(plans: List[Any]): 'Prints a table with an overview of each plan' if (not plans): print('No plans') return name_pad = 5 service_pad = 8 for plan in plans: if (len(plan['name']) > name_pad): name_pad = len(plan['name']) services_str = BackupServicePlan.service_list_to_str(plan['services']) if (len(services_str) > service_pad): service_pad = len(services_str) name_pad += 1 service_pad += 1 header = f"{'Name':<{name_pad}} | # Tasks | {'Services':<{service_pad}} | Default" print(header) print(('-' * (len(header) + 5))) for plan in plans: task_len = (len(plan['tasks']) if (('tasks' in plan) and plan['tasks']) else 0) print(f"{plan['name']:<{name_pad}} | {task_len:<7} | {BackupServicePlan.service_list_to_str(plan['services']):<{service_pad}} | {(plan['default'] if ('default' in plan) else False)!s}")
5,685,385,219,044,567,000
Prints a table with an overview of each plan
cbmgr.py
human_print_plans
b33f/couchbase-cli
python
@staticmethod def human_print_plans(plans: List[Any]): if (not plans): print('No plans') return name_pad = 5 service_pad = 8 for plan in plans: if (len(plan['name']) > name_pad): name_pad = len(plan['name']) services_str = BackupServicePlan.service_list_to_str(plan['services']) if (len(services_str) > service_pad): service_pad = len(services_str) name_pad += 1 service_pad += 1 header = f"{'Name':<{name_pad}} | # Tasks | {'Services':<{service_pad}} | Default" print(header) print(('-' * (len(header) + 5))) for plan in plans: task_len = (len(plan['tasks']) if (('tasks' in plan) and plan['tasks']) else 0) print(f"{plan['name']:<{name_pad}} | {task_len:<7} | {BackupServicePlan.service_list_to_str(plan['services']):<{service_pad}} | {(plan['default'] if ('default' in plan) else False)!s}")
@staticmethod def service_list_to_str(services: Optional[List[Any]]) -> str: 'convert the list of services to a concise list of services' if (not services): return 'all' convert = {'gsi': 'Indexing', 'cbas': 'Analytics', 'ft': 'Full Text Search'} return ', '.join([(convert[service] if (service in convert) else service.title()) for service in services])
-5,899,828,531,706,344,000
convert the list of services to a concise list of services
cbmgr.py
service_list_to_str
b33f/couchbase-cli
python
@staticmethod def service_list_to_str(services: Optional[List[Any]]) -> str: if (not services): return 'all' convert = {'gsi': 'Indexing', 'cbas': 'Analytics', 'ft': 'Full Text Search'} return ', '.join([(convert[service] if (service in convert) else service.title()) for service in services])
def build_evaluator_list(base_ds, dataset_name): 'Helper function to build the list of evaluators for a given dataset' evaluator_list = [] if args.no_detection: return evaluator_list iou_types = ['bbox'] if args.masks: iou_types.append('segm') evaluator_list.append(CocoEvaluator(base_ds, tuple(iou_types), useCats=False)) if ('refexp' in dataset_name): evaluator_list.append(RefExpEvaluator(base_ds, 'bbox')) if ('clevrref' in dataset_name): evaluator_list.append(ClevrRefEvaluator(base_ds, 'bbox')) if ('flickr' in dataset_name): evaluator_list.append(FlickrEvaluator(args.flickr_dataset_path, subset=('test' if args.test else 'val'), merge_boxes=(args.GT_type == 'merged'))) if ('phrasecut' in dataset_name): evaluator_list.append(PhrasecutEvaluator(('test' if args.test else 'miniv'), ann_folder=args.phrasecut_orig_ann_path, output_dir=os.path.join(output_dir, 'phrasecut_eval'), eval_mask=args.masks)) return evaluator_list
-2,649,098,745,732,784,000
Helper function to build the list of evaluators for a given dataset
main.py
build_evaluator_list
TopCoder2K/mdetr
python
def build_evaluator_list(base_ds, dataset_name): evaluator_list = [] if args.no_detection: return evaluator_list iou_types = ['bbox'] if args.masks: iou_types.append('segm') evaluator_list.append(CocoEvaluator(base_ds, tuple(iou_types), useCats=False)) if ('refexp' in dataset_name): evaluator_list.append(RefExpEvaluator(base_ds, 'bbox')) if ('clevrref' in dataset_name): evaluator_list.append(ClevrRefEvaluator(base_ds, 'bbox')) if ('flickr' in dataset_name): evaluator_list.append(FlickrEvaluator(args.flickr_dataset_path, subset=('test' if args.test else 'val'), merge_boxes=(args.GT_type == 'merged'))) if ('phrasecut' in dataset_name): evaluator_list.append(PhrasecutEvaluator(('test' if args.test else 'miniv'), ann_folder=args.phrasecut_orig_ann_path, output_dir=os.path.join(output_dir, 'phrasecut_eval'), eval_mask=args.masks)) return evaluator_list
def time_nifti_to_numpy(N_TRIALS): '\n Times how fast a framework can read a nifti file and convert it to numpy\n ' datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') img_paths = [] for dtype in ['CHAR', 'DOUBLE', 'FLOAT', 'SHORT', 'UNSIGNEDCHAR', 'UNSIGNEDSHORT']: for dim in [2, 3]: img_paths.append(os.path.join(datadir, ('image_%s_%iD.nii.gz' % (dtype, dim)))) def test_nibabel(): for img_path in img_paths: array = nib.load(img_path).get_data() def test_itk(): for img_path in img_paths: array = itk.GetArrayFromImage(itk.imread(img_path)) def test_ants(): for img_path in img_paths: array = ants.image_read(img_path).numpy() nib_start = time.time() for i in range(N_TRIALS): test_nibabel() nib_end = time.time() print(('NIBABEL TIME: %.3f seconds' % (nib_end - nib_start))) itk_start = time.time() for i in range(N_TRIALS): test_itk() itk_end = time.time() print(('ITK TIME: %.3f seconds' % (itk_end - itk_start))) ants_start = time.time() for i in range(N_TRIALS): test_ants() ants_end = time.time() print(('ANTS TIME: %.3f seconds' % (ants_end - ants_start)))
-2,848,315,200,530,991,600
Times how fast a framework can read a nifti file and convert it to numpy
tests/timings.py
time_nifti_to_numpy
ncullen93/ANTsPy
python
def time_nifti_to_numpy(N_TRIALS): '\n \n ' datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data') img_paths = [] for dtype in ['CHAR', 'DOUBLE', 'FLOAT', 'SHORT', 'UNSIGNEDCHAR', 'UNSIGNEDSHORT']: for dim in [2, 3]: img_paths.append(os.path.join(datadir, ('image_%s_%iD.nii.gz' % (dtype, dim)))) def test_nibabel(): for img_path in img_paths: array = nib.load(img_path).get_data() def test_itk(): for img_path in img_paths: array = itk.GetArrayFromImage(itk.imread(img_path)) def test_ants(): for img_path in img_paths: array = ants.image_read(img_path).numpy() nib_start = time.time() for i in range(N_TRIALS): test_nibabel() nib_end = time.time() print(('NIBABEL TIME: %.3f seconds' % (nib_end - nib_start))) itk_start = time.time() for i in range(N_TRIALS): test_itk() itk_end = time.time() print(('ITK TIME: %.3f seconds' % (itk_end - itk_start))) ants_start = time.time() for i in range(N_TRIALS): test_ants() ants_end = time.time() print(('ANTS TIME: %.3f seconds' % (ants_end - ants_start)))
def ElusimicrobiaBacteriumRifoxyc2Full3412(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph: 'Return new instance of the Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n ' return AutomaticallyRetrievedGraph(graph_name='ElusimicrobiaBacteriumRifoxyc2Full3412', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
-3,087,674,889,159,628,300
Return new instance of the Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ```
bindings/python/ensmallen/datasets/string/elusimicrobiabacteriumrifoxyc2full3412.py
ElusimicrobiaBacteriumRifoxyc2Full3412
AnacletoLAB/ensmallen
python
def ElusimicrobiaBacteriumRifoxyc2Full3412(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph: 'Return new instance of the Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Elusimicrobia bacterium RIFOXYC2_FULL_34_12 graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n ' return AutomaticallyRetrievedGraph(graph_name='ElusimicrobiaBacteriumRifoxyc2Full3412', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
def __init__(__self__, *, group_id: pulumi.Input[str], users: pulumi.Input[Sequence[pulumi.Input[str]]]): '\n The set of arguments for constructing a GroupMemberships resource.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n ' pulumi.set(__self__, 'group_id', group_id) pulumi.set(__self__, 'users', users)
-5,059,611,389,573,100,000
The set of arguments for constructing a GroupMemberships resource. :param pulumi.Input[str] group_id: ID of a Okta group. :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
__init__
pulumi/pulumi-okta
python
def __init__(__self__, *, group_id: pulumi.Input[str], users: pulumi.Input[Sequence[pulumi.Input[str]]]): '\n The set of arguments for constructing a GroupMemberships resource.\n :param pulumi.Input[str] group_id: ID of a Okta group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] users: The list of Okta user IDs which the group should have membership managed for.\n ' pulumi.set(__self__, 'group_id', group_id) pulumi.set(__self__, 'users', users)
@property @pulumi.getter(name='groupId') def group_id(self) -> pulumi.Input[str]: '\n ID of a Okta group.\n ' return pulumi.get(self, 'group_id')
-9,015,551,660,011,583,000
ID of a Okta group.
sdk/python/pulumi_okta/group_memberships.py
group_id
pulumi/pulumi-okta
python
@property @pulumi.getter(name='groupId') def group_id(self) -> pulumi.Input[str]: '\n \n ' return pulumi.get(self, 'group_id')
@property @pulumi.getter def users(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: '\n The list of Okta user IDs which the group should have membership managed for.\n ' return pulumi.get(self, 'users')
1,672,975,142,781,222,700
The list of Okta user IDs which the group should have membership managed for.
sdk/python/pulumi_okta/group_memberships.py
users
pulumi/pulumi-okta
python
@property @pulumi.getter def users(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: '\n \n ' return pulumi.get(self, 'users')