language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def dvr_due_followups(human_resource_id=None): """ Number of activities due for follow-up @param human_resource_id: count only activities assigned to this HR """ # Generate a request for case activities and customise it r = S3Request("dvr", "case_activity", args = ["count_due_followups"], get_vars = {}, ) r.customise_resource() resource = r.resource # Filter to exclude closed case activities if current.deployment_settings.get_dvr_case_activity_use_status(): status_filter = (FS("status_id$is_closed") == False) else: status_filter = (FS("completed") == False) # Filter for due follow-ups query = (FS("followup") == True) & \ (FS("followup_date") <= datetime.datetime.utcnow().date()) & \ status_filter & \ (FS("person_id$dvr_case.archived") == False) if human_resource_id: query &= (FS("human_resource_id") == human_resource_id) resource.add_filter(query) return resource.count()
def dvr_due_followups(human_resource_id=None): """ Number of activities due for follow-up @param human_resource_id: count only activities assigned to this HR """ # Generate a request for case activities and customise it r = S3Request("dvr", "case_activity", args = ["count_due_followups"], get_vars = {}, ) r.customise_resource() resource = r.resource # Filter to exclude closed case activities if current.deployment_settings.get_dvr_case_activity_use_status(): status_filter = (FS("status_id$is_closed") == False) else: status_filter = (FS("completed") == False) # Filter for due follow-ups query = (FS("followup") == True) & \ (FS("followup_date") <= datetime.datetime.utcnow().date()) & \ status_filter & \ (FS("person_id$dvr_case.archived") == False) if human_resource_id: query &= (FS("human_resource_id") == human_resource_id) resource.add_filter(query) return resource.count()
Python
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link @param k: the key (dvr_activity.id) @param v: the representation of the key @param row: the row with this key (unused here) """ url = URL(c="dvr", f="activity", args=[k], extension="") return A(v, _href = url)
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link @param k: the key (dvr_activity.id) @param v: the representation of the key @param row: the row with this key (unused here) """ url = URL(c="dvr", f="activity", args=[k], extension="") return A(v, _href = url)
Python
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link @param k: the key (dvr_case_activity.id) @param v: the representation of the key @param row: the row with this key """ try: beneficiary = row.pr_person except AttributeError: return v url = URL(c = "dvr", f = "person", args = [beneficiary.id, "case_activity", k], extension = "", ) return A(v, _href = url)
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link @param k: the key (dvr_case_activity.id) @param v: the representation of the key @param row: the row with this key """ try: beneficiary = row.pr_person except AttributeError: return v url = URL(c = "dvr", f = "person", args = [beneficiary.id, "case_activity", k], extension = "", ) return A(v, _href = url)
Python
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link @param k: the key (doc_entity.doc_id) @param v: the representation of the key @param row: the row with this key """ link = v if row: if row.instance_type == "dvr_case_activity": try: person_id = row.pr_person.id case_activity_id = row.dvr_case_activity.id except AttributeError: pass else: url = URL(c = "dvr", f = "person", args = [person_id, "case_activity", case_activity_id, ], extension="", ) link = A(v, _href=url) return link
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link @param k: the key (doc_entity.doc_id) @param v: the representation of the key @param row: the row with this key """ link = v if row: if row.instance_type == "dvr_case_activity": try: person_id = row.pr_person.id case_activity_id = row.dvr_case_activity.id except AttributeError: pass else: url = URL(c = "dvr", f = "person", args = [person_id, "case_activity", case_activity_id, ], extension="", ) link = A(v, _href=url) return link
Python
def apply_method(self, r, **attr): """ Main entry point for REST interface. @param r: the S3Request instance @param attr: controller parameters """ # User must be permitted to update allowance information permitted = self._permitted("update") if not permitted: r.unauthorised() if r.representation in ("html", "iframe"): if r.http in ("GET", "POST"): output = self.bulk_update_status(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) else: r.error(415, current.ERROR.BAD_FORMAT) return output
def apply_method(self, r, **attr): """ Main entry point for REST interface. @param r: the S3Request instance @param attr: controller parameters """ # User must be permitted to update allowance information permitted = self._permitted("update") if not permitted: r.unauthorised() if r.representation in ("html", "iframe"): if r.http in ("GET", "POST"): output = self.bulk_update_status(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) else: r.error(415, current.ERROR.BAD_FORMAT) return output
Python
def bulk_update_status(self, r, **attr): """ Method to bulk-update status of allowance payments @param r: the S3Request instance @param attr: controller parameters """ T = current.T s3db = current.s3db settings = current.deployment_settings response = current.response output = {"title": T("Update Allowance Status"), } status_opts = dict(s3db.dvr_allowance_status_opts) # Can not bulk-update from or to status "paid" del status_opts[2] # Form fields formfields = [s3_date("from_date", label = T("Planned From"), set_min = "#allowance_to_date", ), s3_date("to_date", default = "now", label = T("Planned Until"), set_max = "#allowance_from_date", empty = False, ), Field("current_status", "integer", default = 1, # pending label = T("Current Status"), requires = IS_IN_SET(status_opts), ), Field("new_status", "integer", default = 4, # missed label = T("New Status"), requires = IS_IN_SET(status_opts), ), ] # Form buttons submit_btn = INPUT(_class = "tiny primary button", _name = "submit", _type = "submit", _value = T("Update"), ) cancel_btn = A(T("Cancel"), _href = r.url(id=None, method=""), _class = "action-lnk", ) buttons = [submit_btn, cancel_btn] # Generate the form and add it to the output resourcename = r.resource.name formstyle = settings.get_ui_formstyle() form = SQLFORM.factory(record = None, showid = False, formstyle = formstyle, table_name = resourcename, buttons = buttons, *formfields) output["form"] = form # Process the form formname = "%s/manage" % resourcename if form.accepts(r.post_vars, current.session, formname = formname, onvalidation = self.validate, keepvalues = False, hideerror = False, ): formvars = form.vars current_status = formvars.current_status new_status = formvars.new_status table = s3db.dvr_allowance query = current.auth.s3_accessible_query("update", table) & \ (table.status == current_status) & \ (table.deleted != True) from_date = formvars.from_date if from_date: query &= table.date >= from_date to_date = formvars.to_date if to_date: query &= table.date <= to_date result = current.db(query).update(status=int(new_status)) if result: response.confirmation = T("%(number)s records updated") % \ {"number": result} else: response.warning = T("No records found") response.view = self._view(r, "update.html") return output
def bulk_update_status(self, r, **attr): """ Method to bulk-update status of allowance payments @param r: the S3Request instance @param attr: controller parameters """ T = current.T s3db = current.s3db settings = current.deployment_settings response = current.response output = {"title": T("Update Allowance Status"), } status_opts = dict(s3db.dvr_allowance_status_opts) # Can not bulk-update from or to status "paid" del status_opts[2] # Form fields formfields = [s3_date("from_date", label = T("Planned From"), set_min = "#allowance_to_date", ), s3_date("to_date", default = "now", label = T("Planned Until"), set_max = "#allowance_from_date", empty = False, ), Field("current_status", "integer", default = 1, # pending label = T("Current Status"), requires = IS_IN_SET(status_opts), ), Field("new_status", "integer", default = 4, # missed label = T("New Status"), requires = IS_IN_SET(status_opts), ), ] # Form buttons submit_btn = INPUT(_class = "tiny primary button", _name = "submit", _type = "submit", _value = T("Update"), ) cancel_btn = A(T("Cancel"), _href = r.url(id=None, method=""), _class = "action-lnk", ) buttons = [submit_btn, cancel_btn] # Generate the form and add it to the output resourcename = r.resource.name formstyle = settings.get_ui_formstyle() form = SQLFORM.factory(record = None, showid = False, formstyle = formstyle, table_name = resourcename, buttons = buttons, *formfields) output["form"] = form # Process the form formname = "%s/manage" % resourcename if form.accepts(r.post_vars, current.session, formname = formname, onvalidation = self.validate, keepvalues = False, hideerror = False, ): formvars = form.vars current_status = formvars.current_status new_status = formvars.new_status table = s3db.dvr_allowance query = current.auth.s3_accessible_query("update", table) & \ (table.status == current_status) & \ (table.deleted != True) from_date = formvars.from_date if from_date: query &= table.date >= from_date to_date = formvars.to_date if to_date: query &= table.date <= to_date result = current.db(query).update(status=int(new_status)) if result: response.confirmation = T("%(number)s records updated") % \ {"number": result} else: response.warning = T("No records found") response.view = self._view(r, "update.html") return output
Python
def dvr_get_household_size(person_id, dob=False, formatted=True): """ Helper function to calculate the household size (counting only members with active cases) @param person_id: the person record ID @param dob: the date of birth of that person (if known) @param formatted: return household size info as string @return: household size info as string if formatted=True, otherwise tuple (number_of_adults, number_of_children) """ db = current.db s3db = current.s3db ptable = s3db.pr_person gtable = s3db.pr_group mtable = s3db.pr_group_membership ctable = s3db.dvr_case stable = s3db.dvr_case_status from dateutil.relativedelta import relativedelta now = current.request.utcnow.date() # Default result adults, children, children_u1 = 1, 0, 0 # Count the person in question if dob is False: query = (ptable.id == person_id) row = db(query).select(ptable.date_of_birth, limitby = (0, 1), ).first() if row: dob = row.date_of_birth if dob: age = relativedelta(now, dob).years if age < 18: adults, children = 0, 1 if age < 1: children_u1 = 1 # Household members which have already been counted members = set([person_id]) counted = members.add # Get all case groups this person belongs to query = ((mtable.person_id == person_id) & \ (mtable.deleted != True) & \ (gtable.id == mtable.group_id) & \ (gtable.group_type == 7)) rows = db(query).select(gtable.id) group_ids = set(row.id for row in rows) if group_ids: join = [ptable.on(ptable.id == mtable.person_id), ctable.on((ctable.person_id == ptable.id) & \ (ctable.archived != True) & \ (ctable.deleted != True)), ] left = [stable.on(stable.id == ctable.status_id), ] query = (mtable.group_id.belongs(group_ids)) & \ (mtable.deleted != True) & \ (stable.is_closed != True) rows = db(query).select(ptable.id, ptable.date_of_birth, join = join, left = left, ) for row in rows: person, dob = row.id, row.date_of_birth if person not in members: age = relativedelta(now, dob).years if dob else None if age is not None and age < 18: children += 1 if age < 1: children_u1 += 1 else: adults += 1 counted(person) if not formatted: return adults, children, children_u1 T = current.T template = "%(number)s %(label)s" details = [] if adults: label = T("Adults") if adults != 1 else T("Adult") details.append(template % {"number": adults, "label": label, }) if children: label = T("Children") if children != 1 else T("Child") details.append(template % {"number": children, "label": label, }) details = ", ".join(details) if children_u1: if children_u1 == 1: label = T("Child under 1 year") else: label = T("Children under 1 year") details = "%s (%s)" % (details, template % {"number": children_u1, "label": label, }, ) return details
def dvr_get_household_size(person_id, dob=False, formatted=True): """ Helper function to calculate the household size (counting only members with active cases) @param person_id: the person record ID @param dob: the date of birth of that person (if known) @param formatted: return household size info as string @return: household size info as string if formatted=True, otherwise tuple (number_of_adults, number_of_children) """ db = current.db s3db = current.s3db ptable = s3db.pr_person gtable = s3db.pr_group mtable = s3db.pr_group_membership ctable = s3db.dvr_case stable = s3db.dvr_case_status from dateutil.relativedelta import relativedelta now = current.request.utcnow.date() # Default result adults, children, children_u1 = 1, 0, 0 # Count the person in question if dob is False: query = (ptable.id == person_id) row = db(query).select(ptable.date_of_birth, limitby = (0, 1), ).first() if row: dob = row.date_of_birth if dob: age = relativedelta(now, dob).years if age < 18: adults, children = 0, 1 if age < 1: children_u1 = 1 # Household members which have already been counted members = set([person_id]) counted = members.add # Get all case groups this person belongs to query = ((mtable.person_id == person_id) & \ (mtable.deleted != True) & \ (gtable.id == mtable.group_id) & \ (gtable.group_type == 7)) rows = db(query).select(gtable.id) group_ids = set(row.id for row in rows) if group_ids: join = [ptable.on(ptable.id == mtable.person_id), ctable.on((ctable.person_id == ptable.id) & \ (ctable.archived != True) & \ (ctable.deleted != True)), ] left = [stable.on(stable.id == ctable.status_id), ] query = (mtable.group_id.belongs(group_ids)) & \ (mtable.deleted != True) & \ (stable.is_closed != True) rows = db(query).select(ptable.id, ptable.date_of_birth, join = join, left = left, ) for row in rows: person, dob = row.id, row.date_of_birth if person not in members: age = relativedelta(now, dob).years if dob else None if age is not None and age < 18: children += 1 if age < 1: children_u1 += 1 else: adults += 1 counted(person) if not formatted: return adults, children, children_u1 T = current.T template = "%(number)s %(label)s" details = [] if adults: label = T("Adults") if adults != 1 else T("Adult") details.append(template % {"number": adults, "label": label, }) if children: label = T("Children") if children != 1 else T("Child") details.append(template % {"number": children, "label": label, }) details = ", ".join(details) if children_u1: if children_u1 == 1: label = T("Child under 1 year") else: label = T("Children under 1 year") details = "%s (%s)" % (details, template % {"number": children_u1, "label": label, }, ) return details
Python
def apply_method(self, r, **attr): """ Main entry point for REST interface. @param r: the S3Request instance @param attr: controller parameters """ if not self.permitted(): current.auth.permission.fail() output = {} representation = r.representation if representation == "html": if r.http in ("GET", "POST"): output = self.registration_form(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) elif representation == "json": if r.http == "POST": output = self.registration_ajax(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) else: r.error(415, current.ERROR.BAD_FORMAT) return output
def apply_method(self, r, **attr): """ Main entry point for REST interface. @param r: the S3Request instance @param attr: controller parameters """ if not self.permitted(): current.auth.permission.fail() output = {} representation = r.representation if representation == "html": if r.http in ("GET", "POST"): output = self.registration_form(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) elif representation == "json": if r.http == "POST": output = self.registration_ajax(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) else: r.error(415, current.ERROR.BAD_FORMAT) return output
Python
def registration_form(self, r, **attr): """ Render and process the registration form @param r: the S3Request instance @param attr: controller parameters """ T = current.T response = current.response settings = current.deployment_settings output = {} http = r.http request_vars = r.get_vars check = True label = None if http == "POST": # Form submission request_vars = r.post_vars if "check" in request_vars: # Only check ID label, don't register an event label = request_vars.get("label") else: # Form has been submitted with "Register" check = False else: # Coming from external scan app (e.g. Zxing), or from a link label = request_vars.get("label") scanner = request_vars.get("scanner") person = None pe_label = None if label is not None: # Identify the person person = self.get_person(label) if person is None: if http == "GET": response.error = T("No person found with this ID number") else: pe_label = person.pe_label request_vars["label"] = pe_label # Get person details, waiting intervals, flag and permission info flags = [] intervals = {} if person: # Person details person_details = self.person_details(person) profile_picture = self.profile_picture(person) # Blocking periods for events event_types = self.get_event_types() blocked = self.get_blocked_events(person.id) for type_id, info in blocked.items(): event_type = event_types.get(type_id) if not event_type: continue code = event_type.code msg, dt = info intervals[code] = (s3_str(msg), "%sZ" % s3_encode_iso_datetime(dt), ) # Flag info flag_info = dvr_get_flag_instructions(person.id, action = self.ACTION, ) permitted = flag_info["permitted"] if check: info = flag_info["info"] for flagname, instructions in info: flags.append({"n": s3_str(T(flagname)), "i": s3_str(T(instructions)), }) else: person_details = "" profile_picture = None permitted = False # Identify the event type event_code = request_vars.get("event") event_type = self.get_event_type(event_code) if not event_type: # Fall back to default event type event_type = self.get_event_type() event_code = event_type.code if event_type else None # Whether the event registration is actionable actionable = event_code is not None # Standard form fields and data formfields = [Field("label", label = T("ID"), requires = IS_NOT_EMPTY(error_message=T("Enter or scan an ID")), ), Field("person", label = "", writable = False, default = "", ), Field("flaginfo", label = "", writable = False, ), ] data = {"id": "", "label": pe_label, "person": person_details, "flaginfo": "", } # Hidden fields to store event type, scanner, flag info and permission hidden = {"event": event_code, "scanner": scanner, "actionable": json.dumps(actionable), "permitted": json.dumps(permitted), "flags": json.dumps(flags), "intervals": json.dumps(intervals), "image": profile_picture, } # Additional form data widget_id, submit = self.get_form_data(person, formfields, data, hidden, permitted = permitted, ) # Form buttons check_btn = INPUT(_class = "tiny secondary button check-btn", _name = "check", _type = "submit", _value = T("Check ID"), ) submit_btn = INPUT(_class = "tiny primary button submit-btn", _name = "submit", _type = "submit", _value = submit, ) # Toggle buttons (active button first, otherwise pressing Enter # hits the disabled button so requiring an extra tab step) actionable = hidden.get("actionable") == "true" if person and actionable and permitted: check_btn["_disabled"] = "disabled" check_btn.add_class("hide") buttons = [submit_btn, check_btn] else: submit_btn["_disabled"] = "disabled" submit_btn.add_class("hide") buttons = [check_btn, submit_btn] # Add the cancel-action buttons.append(A(T("Cancel"), _class = "cancel-action action-lnk")) resourcename = r.resource.name # Generate the form and add it to the output formstyle = settings.get_ui_formstyle() form = SQLFORM.factory(record = data if check else None, showid = False, formstyle = formstyle, table_name = resourcename, buttons = buttons, hidden = hidden, _id = widget_id, *formfields) output["form"] = form # Process the form formname = "%s/registration" % resourcename if form.accepts(r.post_vars, current.session, onvalidation = self.validate, formname = formname, keepvalues = False, hideerror = False, ): if not check: self.accept(r, form, event_type=event_type) header = self.get_header(event_type) output.update(header) # ZXing Barcode Scanner Launch Button output["zxing"] = self.get_zxing_launch_button(event_code) # Custom view response.view = self._view(r, "dvr/register_case_event.html") # Show profile picture by default or only on demand? show_picture = settings.get_dvr_event_registration_show_picture() # Inject JS options = {"tablename": resourcename, "ajaxURL": r.url(None, method = "register", representation = "json", ), "showPicture": show_picture, "showPictureText": s3_str(T("Show Picture")), "hidePictureText": s3_str(T("Hide Picture")), } self.inject_js(widget_id, options) return output
def registration_form(self, r, **attr): """ Render and process the registration form @param r: the S3Request instance @param attr: controller parameters """ T = current.T response = current.response settings = current.deployment_settings output = {} http = r.http request_vars = r.get_vars check = True label = None if http == "POST": # Form submission request_vars = r.post_vars if "check" in request_vars: # Only check ID label, don't register an event label = request_vars.get("label") else: # Form has been submitted with "Register" check = False else: # Coming from external scan app (e.g. Zxing), or from a link label = request_vars.get("label") scanner = request_vars.get("scanner") person = None pe_label = None if label is not None: # Identify the person person = self.get_person(label) if person is None: if http == "GET": response.error = T("No person found with this ID number") else: pe_label = person.pe_label request_vars["label"] = pe_label # Get person details, waiting intervals, flag and permission info flags = [] intervals = {} if person: # Person details person_details = self.person_details(person) profile_picture = self.profile_picture(person) # Blocking periods for events event_types = self.get_event_types() blocked = self.get_blocked_events(person.id) for type_id, info in blocked.items(): event_type = event_types.get(type_id) if not event_type: continue code = event_type.code msg, dt = info intervals[code] = (s3_str(msg), "%sZ" % s3_encode_iso_datetime(dt), ) # Flag info flag_info = dvr_get_flag_instructions(person.id, action = self.ACTION, ) permitted = flag_info["permitted"] if check: info = flag_info["info"] for flagname, instructions in info: flags.append({"n": s3_str(T(flagname)), "i": s3_str(T(instructions)), }) else: person_details = "" profile_picture = None permitted = False # Identify the event type event_code = request_vars.get("event") event_type = self.get_event_type(event_code) if not event_type: # Fall back to default event type event_type = self.get_event_type() event_code = event_type.code if event_type else None # Whether the event registration is actionable actionable = event_code is not None # Standard form fields and data formfields = [Field("label", label = T("ID"), requires = IS_NOT_EMPTY(error_message=T("Enter or scan an ID")), ), Field("person", label = "", writable = False, default = "", ), Field("flaginfo", label = "", writable = False, ), ] data = {"id": "", "label": pe_label, "person": person_details, "flaginfo": "", } # Hidden fields to store event type, scanner, flag info and permission hidden = {"event": event_code, "scanner": scanner, "actionable": json.dumps(actionable), "permitted": json.dumps(permitted), "flags": json.dumps(flags), "intervals": json.dumps(intervals), "image": profile_picture, } # Additional form data widget_id, submit = self.get_form_data(person, formfields, data, hidden, permitted = permitted, ) # Form buttons check_btn = INPUT(_class = "tiny secondary button check-btn", _name = "check", _type = "submit", _value = T("Check ID"), ) submit_btn = INPUT(_class = "tiny primary button submit-btn", _name = "submit", _type = "submit", _value = submit, ) # Toggle buttons (active button first, otherwise pressing Enter # hits the disabled button so requiring an extra tab step) actionable = hidden.get("actionable") == "true" if person and actionable and permitted: check_btn["_disabled"] = "disabled" check_btn.add_class("hide") buttons = [submit_btn, check_btn] else: submit_btn["_disabled"] = "disabled" submit_btn.add_class("hide") buttons = [check_btn, submit_btn] # Add the cancel-action buttons.append(A(T("Cancel"), _class = "cancel-action action-lnk")) resourcename = r.resource.name # Generate the form and add it to the output formstyle = settings.get_ui_formstyle() form = SQLFORM.factory(record = data if check else None, showid = False, formstyle = formstyle, table_name = resourcename, buttons = buttons, hidden = hidden, _id = widget_id, *formfields) output["form"] = form # Process the form formname = "%s/registration" % resourcename if form.accepts(r.post_vars, current.session, onvalidation = self.validate, formname = formname, keepvalues = False, hideerror = False, ): if not check: self.accept(r, form, event_type=event_type) header = self.get_header(event_type) output.update(header) # ZXing Barcode Scanner Launch Button output["zxing"] = self.get_zxing_launch_button(event_code) # Custom view response.view = self._view(r, "dvr/register_case_event.html") # Show profile picture by default or only on demand? show_picture = settings.get_dvr_event_registration_show_picture() # Inject JS options = {"tablename": resourcename, "ajaxURL": r.url(None, method = "register", representation = "json", ), "showPicture": show_picture, "showPictureText": s3_str(T("Show Picture")), "hidePictureText": s3_str(T("Hide Picture")), } self.inject_js(widget_id, options) return output
Python
def accept(self, r, form, event_type=None): """ Helper function to process the form @param r: the S3Request @param form: the FORM @param event_type: the event_type (Row) """ T = current.T response = current.response formvars = form.vars person_id = formvars.person_id success = False if not formvars.get("permitted"): response.error = T("Event registration not permitted") elif person_id: event_type_id = event_type.id if event_type else None success = self.register_event(person_id, event_type_id) if success: success = True response.confirmation = T("Event registered") else: response.error = T("Could not register event") else: response.error = T("Person not found") return success
def accept(self, r, form, event_type=None): """ Helper function to process the form @param r: the S3Request @param form: the FORM @param event_type: the event_type (Row) """ T = current.T response = current.response formvars = form.vars person_id = formvars.person_id success = False if not formvars.get("permitted"): response.error = T("Event registration not permitted") elif person_id: event_type_id = event_type.id if event_type else None success = self.register_event(person_id, event_type_id) if success: success = True response.confirmation = T("Event registered") else: response.error = T("Could not register event") else: response.error = T("Person not found") return success
Python
def registration_ajax(self, r, **attr): """ Ajax response method, expects a JSON input like: {l: the PE label (from the input field), c: boolean to indicate whether to just check the PE label or to register payments t: the event type code } @param r: the S3Request instance @param attr: controller parameters @return: JSON response, structure: {l: the actual PE label (to update the input field), p: the person details, d: the family details, f: [{n: the flag name i: the flag instructions }, ...], b: profile picture URL, i: {<event_code>: [<msg>, <blocked_until_datetime>]}, s: whether the action is permitted or not e: form error (for label field) a: error message w: warning message m: success message } """ T = current.T # Load JSON data from request body s = r.body s.seek(0) try: data = json.load(s) except (ValueError, TypeError): r.error(400, current.ERROR.BAD_REQUEST) # Initialize processing variables output = {} error = None alert = None message = None warning = None permitted = False flags = [] # Identify the person pe_label = data.get("l") person = self.get_person(pe_label) if person is None: error = s3_str(T("No person found with this ID number")) else: # Get flag info flag_info = dvr_get_flag_instructions(person.id, action = "id-check", ) permitted = flag_info["permitted"] check = data.get("c") if check: # Person details person_details = self.person_details(person) profile_picture = self.profile_picture(person) output["p"] = s3_str(person_details) output["l"] = person.pe_label output["b"] = profile_picture # Family details details = dvr_get_household_size(person.id, dob = person.date_of_birth, ) if details: output["d"] = {"d": details} # Flag Info info = flag_info["info"] for flagname, instructions in info: flags.append({"n": s3_str(T(flagname)), "i": s3_str(T(instructions)), }) # Blocking periods for events event_types = self.get_event_types() blocked = self.get_blocked_events(person.id) intervals = {} for type_id, info in blocked.items(): event_type = event_types.get(type_id) if not event_type: continue code = event_type.code msg, dt = info intervals[code] = (s3_str(msg), "%sZ" % s3_encode_iso_datetime(dt), ) output["i"] = intervals else: # Check event code and permission type_id = None event_code = data.get("t") if not event_code: alert = T("No event type specified") elif not permitted: alert = T("Event registration not permitted") else: event_type = self.get_event_type(event_code) if not event_type: alert = T("Invalid event type: %s") % event_code else: type_id = event_type.id if type_id: # Check whether event type is blocked for this person person_id = person.id blocked = self.get_blocked_events(person_id, type_id = type_id, ) if type_id in blocked: # Event type is currently blocked for this person alert = blocked[type_id][0] else: # Ok - register the event success = self.register_event(person.id, type_id) if success: message = T("Event registered") else: alert = T("Could not register event") # Add messages to output if alert: output["a"] = s3_str(alert) if error: output["e"] = s3_str(error) if message: output["m"] = s3_str(message) if warning: output["w"] = s3_str(warning) # Add flag info to output output["s"] = permitted output["f"] = flags current.response.headers["Content-Type"] = "application/json" return json.dumps(output)
def registration_ajax(self, r, **attr): """ Ajax response method, expects a JSON input like: {l: the PE label (from the input field), c: boolean to indicate whether to just check the PE label or to register payments t: the event type code } @param r: the S3Request instance @param attr: controller parameters @return: JSON response, structure: {l: the actual PE label (to update the input field), p: the person details, d: the family details, f: [{n: the flag name i: the flag instructions }, ...], b: profile picture URL, i: {<event_code>: [<msg>, <blocked_until_datetime>]}, s: whether the action is permitted or not e: form error (for label field) a: error message w: warning message m: success message } """ T = current.T # Load JSON data from request body s = r.body s.seek(0) try: data = json.load(s) except (ValueError, TypeError): r.error(400, current.ERROR.BAD_REQUEST) # Initialize processing variables output = {} error = None alert = None message = None warning = None permitted = False flags = [] # Identify the person pe_label = data.get("l") person = self.get_person(pe_label) if person is None: error = s3_str(T("No person found with this ID number")) else: # Get flag info flag_info = dvr_get_flag_instructions(person.id, action = "id-check", ) permitted = flag_info["permitted"] check = data.get("c") if check: # Person details person_details = self.person_details(person) profile_picture = self.profile_picture(person) output["p"] = s3_str(person_details) output["l"] = person.pe_label output["b"] = profile_picture # Family details details = dvr_get_household_size(person.id, dob = person.date_of_birth, ) if details: output["d"] = {"d": details} # Flag Info info = flag_info["info"] for flagname, instructions in info: flags.append({"n": s3_str(T(flagname)), "i": s3_str(T(instructions)), }) # Blocking periods for events event_types = self.get_event_types() blocked = self.get_blocked_events(person.id) intervals = {} for type_id, info in blocked.items(): event_type = event_types.get(type_id) if not event_type: continue code = event_type.code msg, dt = info intervals[code] = (s3_str(msg), "%sZ" % s3_encode_iso_datetime(dt), ) output["i"] = intervals else: # Check event code and permission type_id = None event_code = data.get("t") if not event_code: alert = T("No event type specified") elif not permitted: alert = T("Event registration not permitted") else: event_type = self.get_event_type(event_code) if not event_type: alert = T("Invalid event type: %s") % event_code else: type_id = event_type.id if type_id: # Check whether event type is blocked for this person person_id = person.id blocked = self.get_blocked_events(person_id, type_id = type_id, ) if type_id in blocked: # Event type is currently blocked for this person alert = blocked[type_id][0] else: # Ok - register the event success = self.register_event(person.id, type_id) if success: message = T("Event registered") else: alert = T("Could not register event") # Add messages to output if alert: output["a"] = s3_str(alert) if error: output["e"] = s3_str(error) if message: output["m"] = s3_str(message) if warning: output["w"] = s3_str(warning) # Add flag info to output output["s"] = permitted output["f"] = flags current.response.headers["Content-Type"] = "application/json" return json.dumps(output)
Python
def check_intervals(self, person_id, type_id=None): """ Check minimum intervals between consecutive registrations of the same event type @param person_id: the person record ID @param type_id: check only this event type (rather than all types) @return: a dict with blocked event types {type_id: (error_message, blocked_until_datetime)} """ T = current.T db = current.db s3db = current.s3db now = current.request.utcnow day_start = now.replace(hour=0, minute=0, second=0, microsecond=0, ) next_day = day_start + datetime.timedelta(days=1) output = {} table = s3db.dvr_case_event event_type_id = table.type_id # Get event types to check event_types = self.get_event_types() # Check for impermissible combinations etable = s3db.dvr_case_event_exclusion query = (table.person_id == person_id) & \ (table.date >= day_start) & \ (table.deleted == False) & \ (etable.excluded_by_id == table.type_id) & \ (etable.deleted == False) if type_id and event_types.get(type_id): query &= etable.type_id == type_id rows = db(query).select(etable.type_id, etable.excluded_by_id, ) excluded = {} for row in rows: tid = row.type_id if tid in excluded: excluded[tid].append(row.excluded_by_id) else: excluded[tid] = [row.excluded_by_id] for tid, excluded_by_ids in excluded.items(): event_type = event_types.get(tid) if not event_type: continue excluded_by_names = [] seen = set() for excluded_by_id in excluded_by_ids: if excluded_by_id in seen: continue else: seen.add(excluded_by_id) excluded_by_type = event_types.get(excluded_by_id) if not excluded_by_type: continue excluded_by_names.append(s3_str(T(excluded_by_type.name))) if excluded_by_names: msg = T("%(event)s already registered today, not combinable") % \ {"event": ", ".join(excluded_by_names) } output[tid] = (msg, next_day) # Helper function to build event type sub-query def type_query(items): if len(items) == 1: return (event_type_id == items[0]) elif items: return (event_type_id.belongs(set(items))) else: return None # Check maximum occurences per day q = None if type_id: event_type = event_types.get(type_id) if event_type and \ event_type.max_per_day and \ type_id not in output: q = type_query((type_id,)) else: check = [tid for tid, row in event_types.items() if row.max_per_day and \ tid != "_default" and tid not in output ] q = type_query(check) if q is not None: # Get number of events per type for this person today cnt = table.id.count() query = (table.person_id == person_id) & q & \ (table.date >= day_start) & \ (table.deleted != True) rows = db(query).select(event_type_id, cnt, groupby = event_type_id, ) # Check limit for row in rows: number = row[cnt] tid = row[event_type_id] event_type = event_types[tid] limit = event_type.max_per_day if number >= limit: if number > 1: msg = T("%(event)s already registered %(number)s times today") % \ {"event": T(event_type.name), "number": number, } else: msg = T("%(event)s already registered today") % \ {"event": T(event_type.name), } output[tid] = (msg, next_day) # Check minimum intervals q = None if type_id: event_type = event_types.get(type_id) if event_type and \ event_type.min_interval and \ type_id not in output: q = type_query((type_id,)) else: check = [tid for tid, row in event_types.items() if row.min_interval and \ tid != "_default" and tid not in output ] q = type_query(check) if q is not None: # Get the last events for these types for this person query = (table.person_id == person_id) & q & \ (table.deleted != True) timestamp = table.date.max() rows = db(query).select(event_type_id, timestamp, groupby = event_type_id, ) # Check intervals represent = table.date.represent for row in rows: latest = row[timestamp] tid = row[event_type_id] event_type = event_types[tid] interval = event_type.min_interval if latest: earliest = latest + datetime.timedelta(hours=interval) if earliest > now: msg = T("%(event)s already registered on %(timestamp)s") % \ {"event": T(event_type.name), "timestamp": represent(latest), } output[tid] = (msg, earliest) return output
def check_intervals(self, person_id, type_id=None): """ Check minimum intervals between consecutive registrations of the same event type @param person_id: the person record ID @param type_id: check only this event type (rather than all types) @return: a dict with blocked event types {type_id: (error_message, blocked_until_datetime)} """ T = current.T db = current.db s3db = current.s3db now = current.request.utcnow day_start = now.replace(hour=0, minute=0, second=0, microsecond=0, ) next_day = day_start + datetime.timedelta(days=1) output = {} table = s3db.dvr_case_event event_type_id = table.type_id # Get event types to check event_types = self.get_event_types() # Check for impermissible combinations etable = s3db.dvr_case_event_exclusion query = (table.person_id == person_id) & \ (table.date >= day_start) & \ (table.deleted == False) & \ (etable.excluded_by_id == table.type_id) & \ (etable.deleted == False) if type_id and event_types.get(type_id): query &= etable.type_id == type_id rows = db(query).select(etable.type_id, etable.excluded_by_id, ) excluded = {} for row in rows: tid = row.type_id if tid in excluded: excluded[tid].append(row.excluded_by_id) else: excluded[tid] = [row.excluded_by_id] for tid, excluded_by_ids in excluded.items(): event_type = event_types.get(tid) if not event_type: continue excluded_by_names = [] seen = set() for excluded_by_id in excluded_by_ids: if excluded_by_id in seen: continue else: seen.add(excluded_by_id) excluded_by_type = event_types.get(excluded_by_id) if not excluded_by_type: continue excluded_by_names.append(s3_str(T(excluded_by_type.name))) if excluded_by_names: msg = T("%(event)s already registered today, not combinable") % \ {"event": ", ".join(excluded_by_names) } output[tid] = (msg, next_day) # Helper function to build event type sub-query def type_query(items): if len(items) == 1: return (event_type_id == items[0]) elif items: return (event_type_id.belongs(set(items))) else: return None # Check maximum occurences per day q = None if type_id: event_type = event_types.get(type_id) if event_type and \ event_type.max_per_day and \ type_id not in output: q = type_query((type_id,)) else: check = [tid for tid, row in event_types.items() if row.max_per_day and \ tid != "_default" and tid not in output ] q = type_query(check) if q is not None: # Get number of events per type for this person today cnt = table.id.count() query = (table.person_id == person_id) & q & \ (table.date >= day_start) & \ (table.deleted != True) rows = db(query).select(event_type_id, cnt, groupby = event_type_id, ) # Check limit for row in rows: number = row[cnt] tid = row[event_type_id] event_type = event_types[tid] limit = event_type.max_per_day if number >= limit: if number > 1: msg = T("%(event)s already registered %(number)s times today") % \ {"event": T(event_type.name), "number": number, } else: msg = T("%(event)s already registered today") % \ {"event": T(event_type.name), } output[tid] = (msg, next_day) # Check minimum intervals q = None if type_id: event_type = event_types.get(type_id) if event_type and \ event_type.min_interval and \ type_id not in output: q = type_query((type_id,)) else: check = [tid for tid, row in event_types.items() if row.min_interval and \ tid != "_default" and tid not in output ] q = type_query(check) if q is not None: # Get the last events for these types for this person query = (table.person_id == person_id) & q & \ (table.deleted != True) timestamp = table.date.max() rows = db(query).select(event_type_id, timestamp, groupby = event_type_id, ) # Check intervals represent = table.date.represent for row in rows: latest = row[timestamp] tid = row[event_type_id] event_type = event_types[tid] interval = event_type.min_interval if latest: earliest = latest + datetime.timedelta(hours=interval) if earliest > now: msg = T("%(event)s already registered on %(timestamp)s") % \ {"event": T(event_type.name), "timestamp": represent(latest), } output[tid] = (msg, earliest) return output
Python
def person_(label): """ Helper function to find a person by pe_label """ query = (FS("pe_label") == pe_label) & \ (FS("dvr_case.id") != None) & \ (FS("dvr_case.archived") != True) & \ (FS("dvr_case.status_id$is_closed") != True) presource = s3db.resource("pr_person", components = ["dvr_case"], filter = query, ) rows = presource.select(fields, start = 0, limit = 1, as_rows = True, ) return rows[0] if rows else None
def person_(label): """ Helper function to find a person by pe_label """ query = (FS("pe_label") == pe_label) & \ (FS("dvr_case.id") != None) & \ (FS("dvr_case.archived") != True) & \ (FS("dvr_case.status_id$is_closed") != True) presource = s3db.resource("pr_person", components = ["dvr_case"], filter = query, ) rows = presource.select(fields, start = 0, limit = 1, as_rows = True, ) return rows[0] if rows else None
Python
def profile_picture(person): """ Get the profile picture URL for a person @param person: the person record (Row) @return: the profile picture URL (relative URL), or None if no profile picture is available for that person """ try: pe_id = person.pe_id except AttributeError: return None table = current.s3db.pr_image query = (table.pe_id == pe_id) & \ (table.profile == True) & \ (table.deleted != True) row = current.db(query).select(table.image, limitby=(0, 1)).first() if row: return URL(c="default", f="download", args=row.image) else: return None
def profile_picture(person): """ Get the profile picture URL for a person @param person: the person record (Row) @return: the profile picture URL (relative URL), or None if no profile picture is available for that person """ try: pe_id = person.pe_id except AttributeError: return None table = current.s3db.pr_image query = (table.pe_id == pe_id) & \ (table.profile == True) & \ (table.deleted != True) row = current.db(query).select(table.image, limitby=(0, 1)).first() if row: return URL(c="default", f="download", args=row.image) else: return None
Python
def parse_code(code): """ Parse a scanned ID code (QR Code) @param code: the scanned ID code (string) @return: a dict {"label": the PE label, "first_name": optional first name, "last_name": optional last name, "date_of_birth": optional date of birth, } """ data = {"label": code} pattern = current.deployment_settings.get_dvr_id_code_pattern() if pattern and code: import re pattern = re.compile(pattern) m = pattern.match(code) if m: data.update(m.groupdict()) return data
def parse_code(code): """ Parse a scanned ID code (QR Code) @param code: the scanned ID code (string) @return: a dict {"label": the PE label, "first_name": optional first name, "last_name": optional last name, "date_of_birth": optional date of birth, } """ data = {"label": code} pattern = current.deployment_settings.get_dvr_id_code_pattern() if pattern and code: import re pattern = re.compile(pattern) m = pattern.match(code) if m: data.update(m.groupdict()) return data
Python
def inject_js(widget_id, options): """ Helper function to inject static JS and instantiate the eventRegistration widget @param widget_id: the node ID where to instantiate the widget @param options: dict of widget options (JSON-serializable) """ s3 = current.response.s3 appname = current.request.application # Static JS scripts = s3.scripts if s3.debug: script = "/%s/static/scripts/S3/s3.dvr.js" % appname else: script = "/%s/static/scripts/S3/s3.dvr.min.js" % appname scripts.append(script) # Instantiate widget scripts = s3.jquery_ready script = '''$('#%(id)s').eventRegistration(%(options)s)''' % \ {"id": widget_id, "options": json.dumps(options)} if script not in scripts: scripts.append(script)
def inject_js(widget_id, options): """ Helper function to inject static JS and instantiate the eventRegistration widget @param widget_id: the node ID where to instantiate the widget @param options: dict of widget options (JSON-serializable) """ s3 = current.response.s3 appname = current.request.application # Static JS scripts = s3.scripts if s3.debug: script = "/%s/static/scripts/S3/s3.dvr.js" % appname else: script = "/%s/static/scripts/S3/s3.dvr.min.js" % appname scripts.append(script) # Instantiate widget scripts = s3.jquery_ready script = '''$('#%(id)s').eventRegistration(%(options)s)''' % \ {"id": widget_id, "options": json.dumps(options)} if script not in scripts: scripts.append(script)
Python
def accept(self, r, form, event_type=None): """ Helper function to process the form @param r: the S3Request @param form: the FORM @param event_type: the event_type (Row) """ T = current.T response = current.response formvars = form.vars person_id = formvars.person_id success = False if not formvars.get("permitted"): response.error = T("Payment registration not permitted") elif person_id: # Get payment data from hidden input payments = r.post_vars.get("actions") if payments: # @todo: read date from formvars (utcnow as fallback) date = r.utcnow comments = formvars.get("comments") updated, failed = self.register_payments(person_id, payments, date = date, comments = comments, ) response.confirmation = T("%(number)s payment(s) registered") % \ {"number": updated} if failed: response.warning = T("%(number)s payment(s) not found") % \ {"number": failed} else: response.error = T("No payments specified") else: response.error = T("Person not found") return success
def accept(self, r, form, event_type=None): """ Helper function to process the form @param r: the S3Request @param form: the FORM @param event_type: the event_type (Row) """ T = current.T response = current.response formvars = form.vars person_id = formvars.person_id success = False if not formvars.get("permitted"): response.error = T("Payment registration not permitted") elif person_id: # Get payment data from hidden input payments = r.post_vars.get("actions") if payments: # @todo: read date from formvars (utcnow as fallback) date = r.utcnow comments = formvars.get("comments") updated, failed = self.register_payments(person_id, payments, date = date, comments = comments, ) response.confirmation = T("%(number)s payment(s) registered") % \ {"number": updated} if failed: response.warning = T("%(number)s payment(s) not found") % \ {"number": failed} else: response.error = T("No payments specified") else: response.error = T("Person not found") return success
Python
def registration_ajax(self, r, **attr): """ Ajax response method, expects a JSON input like: {l: the PE label (from the input field), c: boolean to indicate whether to just check the PE label or to register payments d: the payment data (raw data, which payments to update) } @param r: the S3Request instance @param attr: controller parameters @return: JSON response, structure: {l: the actual PE label (to update the input field), p: the person details, f: [{n: the flag name i: the flag instructions }, ...], u: whether there are any actionable data s: whether the action is permitted or not d: {t: time stamp h: payment details (raw data) d: payment details (HTML) } e: form error (for label field) a: error message w: warning message m: success message } """ T = current.T # Load JSON data from request body s = r.body s.seek(0) try: data = json.load(s) except (ValueError, TypeError): r.error(400, current.ERROR.BAD_REQUEST) # Initialize processing variables output = {} alert = None error = None warning = None message = None permitted = False flags = [] # Identify the person pe_label = data.get("l") person = self.get_person(pe_label) if person is None: error = s3_str(T("No person found with this ID number")) else: # Get flag info flag_info = dvr_get_flag_instructions(person.id, action = self.ACTION, ) permitted = flag_info["permitted"] check = data.get("c") if check: # Person details person_details = self.person_details(person) profile_picture = self.profile_picture(person) output["p"] = s3_str(person_details) output["l"] = person.pe_label output["b"] = profile_picture info = flag_info["info"] for flagname, instructions in info: flags.append({"n": s3_str(T(flagname)), "i": s3_str(T(instructions)), }) if permitted: payments = self.get_payment_data(person.id) else: payments = [] date = S3DateTime.datetime_represent(current.request.utcnow, utc = True, ) output["d"] = {"d": s3_str(self.payment_data_represent(payments)), "t": s3_str(date), "h": payments, } if payments: output["u"] = True else: output["u"] = False else: if not permitted: alert = T("Payment registration not permitted") else: # Get payment data from JSON payments = data.get("d") if payments: # @todo: read date from JSON data (utcnow as fallback) date = r.utcnow comments = data.get("c") updated, failed = self.register_payments( person.id, payments, date = date, comments = comments, ) message = T("%(number)s payment(s) registered") % \ {"number": updated} if failed: warning = T("%(number)s payment(s) not found") % \ {"number": failed} else: alert = T("No payments specified") # Add messages to output if alert: output["a"] = s3_str(alert) if error: output["e"] = s3_str(error) if message: output["m"] = s3_str(message) if warning: output["w"] = s3_str(warning) # Add flag info to output output["s"] = permitted output["f"] = flags current.response.headers["Content-Type"] = "application/json" return json.dumps(output)
def registration_ajax(self, r, **attr): """ Ajax response method, expects a JSON input like: {l: the PE label (from the input field), c: boolean to indicate whether to just check the PE label or to register payments d: the payment data (raw data, which payments to update) } @param r: the S3Request instance @param attr: controller parameters @return: JSON response, structure: {l: the actual PE label (to update the input field), p: the person details, f: [{n: the flag name i: the flag instructions }, ...], u: whether there are any actionable data s: whether the action is permitted or not d: {t: time stamp h: payment details (raw data) d: payment details (HTML) } e: form error (for label field) a: error message w: warning message m: success message } """ T = current.T # Load JSON data from request body s = r.body s.seek(0) try: data = json.load(s) except (ValueError, TypeError): r.error(400, current.ERROR.BAD_REQUEST) # Initialize processing variables output = {} alert = None error = None warning = None message = None permitted = False flags = [] # Identify the person pe_label = data.get("l") person = self.get_person(pe_label) if person is None: error = s3_str(T("No person found with this ID number")) else: # Get flag info flag_info = dvr_get_flag_instructions(person.id, action = self.ACTION, ) permitted = flag_info["permitted"] check = data.get("c") if check: # Person details person_details = self.person_details(person) profile_picture = self.profile_picture(person) output["p"] = s3_str(person_details) output["l"] = person.pe_label output["b"] = profile_picture info = flag_info["info"] for flagname, instructions in info: flags.append({"n": s3_str(T(flagname)), "i": s3_str(T(instructions)), }) if permitted: payments = self.get_payment_data(person.id) else: payments = [] date = S3DateTime.datetime_represent(current.request.utcnow, utc = True, ) output["d"] = {"d": s3_str(self.payment_data_represent(payments)), "t": s3_str(date), "h": payments, } if payments: output["u"] = True else: output["u"] = False else: if not permitted: alert = T("Payment registration not permitted") else: # Get payment data from JSON payments = data.get("d") if payments: # @todo: read date from JSON data (utcnow as fallback) date = r.utcnow comments = data.get("c") updated, failed = self.register_payments( person.id, payments, date = date, comments = comments, ) message = T("%(number)s payment(s) registered") % \ {"number": updated} if failed: warning = T("%(number)s payment(s) not found") % \ {"number": failed} else: alert = T("No payments specified") # Add messages to output if alert: output["a"] = s3_str(alert) if error: output["e"] = s3_str(error) if message: output["m"] = s3_str(message) if warning: output["w"] = s3_str(warning) # Add flag info to output output["s"] = permitted output["f"] = flags current.response.headers["Content-Type"] = "application/json" return json.dumps(output)
Python
def payment_data_represent(self, data): """ Representation method for the payment details field @param data: the payment data (from get_payment_data) """ if data: output = TABLE(_class="payment-details") for payment in data: details = TR(TD(payment["d"], _class="payment-date"), TD(payment["c"], _class="payment-currency"), TD(payment["a"], _class="payment-amount"), ) output.append(details) else: output = current.T("No pending payments") return output
def payment_data_represent(self, data): """ Representation method for the payment details field @param data: the payment data (from get_payment_data) """ if data: output = TABLE(_class="payment-details") for payment in data: details = TR(TD(payment["d"], _class="payment-date"), TD(payment["c"], _class="payment-currency"), TD(payment["a"], _class="payment-amount"), ) output.append(details) else: output = current.T("No pending payments") return output
Python
def dvr_get_flag_instructions(person_id, action=None): """ Get handling instructions if flags are set for a person @param person_id: the person ID @param action: the action for which instructions are needed: - check-in|check-out|payment|id-check @returns: dict {"permitted": whether the action is permitted "info": list of tuples (flagname, instructions) } """ s3db = current.s3db ftable = s3db.dvr_case_flag ltable = s3db.dvr_case_flag_case query = (ltable.person_id == person_id) & \ (ltable.deleted != True) & \ (ftable.id == ltable.flag_id) & \ (ftable.deleted != True) if action == "check-in": query &= (ftable.advise_at_check_in == True) | \ (ftable.deny_check_in == True) elif action == "check-out": query &= (ftable.advise_at_check_out == True) | \ (ftable.deny_check_out == True) elif action == "payment": query &= (ftable.advise_at_id_check == True) | \ (ftable.allowance_suspended == True) else: query &= (ftable.advise_at_id_check == True) flags = current.db(query).select(ftable.name, ftable.deny_check_in, ftable.deny_check_out, ftable.allowance_suspended, ftable.advise_at_check_in, ftable.advise_at_check_out, ftable.advise_at_id_check, ftable.instructions, ) info = [] permitted = True for flag in flags: advise = False if action == "check-in": if flag.deny_check_in: permitted = False advise = flag.advise_at_check_in elif action == "check-out": if flag.deny_check_out: permitted = False advise = flag.advise_at_check_out elif action == "payment": if flag.allowance_suspended: permitted = False advise = flag.advise_at_id_check else: advise = flag.advise_at_id_check if advise: instructions = flag.instructions if instructions is not None: instructions = instructions.strip() if not instructions: instructions = current.T("No instructions for this flag") info.append((flag.name, instructions)) return {"permitted": permitted, "info": info, }
def dvr_get_flag_instructions(person_id, action=None): """ Get handling instructions if flags are set for a person @param person_id: the person ID @param action: the action for which instructions are needed: - check-in|check-out|payment|id-check @returns: dict {"permitted": whether the action is permitted "info": list of tuples (flagname, instructions) } """ s3db = current.s3db ftable = s3db.dvr_case_flag ltable = s3db.dvr_case_flag_case query = (ltable.person_id == person_id) & \ (ltable.deleted != True) & \ (ftable.id == ltable.flag_id) & \ (ftable.deleted != True) if action == "check-in": query &= (ftable.advise_at_check_in == True) | \ (ftable.deny_check_in == True) elif action == "check-out": query &= (ftable.advise_at_check_out == True) | \ (ftable.deny_check_out == True) elif action == "payment": query &= (ftable.advise_at_id_check == True) | \ (ftable.allowance_suspended == True) else: query &= (ftable.advise_at_id_check == True) flags = current.db(query).select(ftable.name, ftable.deny_check_in, ftable.deny_check_out, ftable.allowance_suspended, ftable.advise_at_check_in, ftable.advise_at_check_out, ftable.advise_at_id_check, ftable.instructions, ) info = [] permitted = True for flag in flags: advise = False if action == "check-in": if flag.deny_check_in: permitted = False advise = flag.advise_at_check_in elif action == "check-out": if flag.deny_check_out: permitted = False advise = flag.advise_at_check_out elif action == "payment": if flag.allowance_suspended: permitted = False advise = flag.advise_at_id_check else: advise = flag.advise_at_id_check if advise: instructions = flag.instructions if instructions is not None: instructions = instructions.strip() if not instructions: instructions = current.T("No instructions for this flag") info.append((flag.name, instructions)) return {"permitted": permitted, "info": info, }
Python
def dvr_update_last_seen(person_id): """ Helper function for automatic updates of dvr_case.last_seen_on @param person_id: the person ID """ db = current.db s3db = current.s3db now = current.request.utcnow last_seen_on = None if not person_id: return # Get event types that require presence ettable = s3db.dvr_case_event_type query = (ettable.presence_required == True) & \ (ettable.deleted == False) types = db(query).select(ettable.id, cache=s3db.cache) type_ids = set(t.id for t in types) # Get the last case event that required presence etable = s3db.dvr_case_event query = (etable.person_id == person_id) & \ (etable.type_id.belongs(type_ids)) & \ (etable.date != None) & \ (etable.date <= now) & \ (etable.deleted != True) event = db(query).select(etable.date, orderby = ~etable.date, limitby = (0, 1), ).first() if event: last_seen_on = event.date # Check shelter registration history for newer entries htable = s3db.cr_shelter_registration_history query = (htable.person_id == person_id) & \ (htable.status.belongs(2, 3)) & \ (htable.date != None) & \ (htable.deleted != True) if last_seen_on is not None: query &= htable.date > last_seen_on entry = db(query).select(htable.date, orderby = ~htable.date, limitby = (0, 1), ).first() if entry: last_seen_on = entry.date settings = current.deployment_settings # Case appointments to update last_seen_on? if settings.get_dvr_appointments_update_last_seen_on(): # Get appointment types that require presence attable = s3db.dvr_case_appointment_type query = (attable.presence_required == True) & \ (attable.deleted == False) types = db(query).select(attable.id, cache=s3db.cache) type_ids = set(t.id for t in types) # Get last appointment that required presence atable = s3db.dvr_case_appointment query = (atable.person_id == person_id) & \ (atable.date != None) & \ (atable.type_id.belongs(type_ids)) & \ (atable.date <= now.date()) & \ (atable.status == 4) & \ (atable.deleted != True) if last_seen_on is not None: query &= atable.date > last_seen_on.date() appointment = db(query).select(atable.date, orderby = ~atable.date, limitby = (0, 1), ).first() if appointment: date = appointment.date try: date = datetime.datetime.combine(date, datetime.time(0, 0, 0)) except TypeError: pass # Local time offset to UTC (NB: can be 0) delta = S3DateTime.get_offset_value(current.session.s3.utc_offset) # Default to 08:00 local time (...unless that would be future) date = min(now, date + datetime.timedelta(seconds = 28800 - delta)) last_seen_on = date # Allowance payments to update last_seen_on? if settings.get_dvr_payments_update_last_seen_on(): atable = s3db.dvr_allowance query = (atable.person_id == person_id) & \ (atable.paid_on != None) & \ (atable.status == 2) & \ (atable.deleted != True) if last_seen_on is not None: query &= atable.paid_on > last_seen_on payment = db(query).select(atable.paid_on, orderby = ~atable.paid_on, limitby = (0, 1), ).first() if payment: last_seen_on = payment.paid_on # Update last_seen_on ctable = s3db.dvr_case query = (ctable.person_id == person_id) & \ (ctable.archived != True) & \ (ctable.deleted != True) db(query).update(last_seen_on = last_seen_on, # Don't change author stamp for # system-controlled record update: modified_on = ctable.modified_on, modified_by = ctable.modified_by, )
def dvr_update_last_seen(person_id): """ Helper function for automatic updates of dvr_case.last_seen_on @param person_id: the person ID """ db = current.db s3db = current.s3db now = current.request.utcnow last_seen_on = None if not person_id: return # Get event types that require presence ettable = s3db.dvr_case_event_type query = (ettable.presence_required == True) & \ (ettable.deleted == False) types = db(query).select(ettable.id, cache=s3db.cache) type_ids = set(t.id for t in types) # Get the last case event that required presence etable = s3db.dvr_case_event query = (etable.person_id == person_id) & \ (etable.type_id.belongs(type_ids)) & \ (etable.date != None) & \ (etable.date <= now) & \ (etable.deleted != True) event = db(query).select(etable.date, orderby = ~etable.date, limitby = (0, 1), ).first() if event: last_seen_on = event.date # Check shelter registration history for newer entries htable = s3db.cr_shelter_registration_history query = (htable.person_id == person_id) & \ (htable.status.belongs(2, 3)) & \ (htable.date != None) & \ (htable.deleted != True) if last_seen_on is not None: query &= htable.date > last_seen_on entry = db(query).select(htable.date, orderby = ~htable.date, limitby = (0, 1), ).first() if entry: last_seen_on = entry.date settings = current.deployment_settings # Case appointments to update last_seen_on? if settings.get_dvr_appointments_update_last_seen_on(): # Get appointment types that require presence attable = s3db.dvr_case_appointment_type query = (attable.presence_required == True) & \ (attable.deleted == False) types = db(query).select(attable.id, cache=s3db.cache) type_ids = set(t.id for t in types) # Get last appointment that required presence atable = s3db.dvr_case_appointment query = (atable.person_id == person_id) & \ (atable.date != None) & \ (atable.type_id.belongs(type_ids)) & \ (atable.date <= now.date()) & \ (atable.status == 4) & \ (atable.deleted != True) if last_seen_on is not None: query &= atable.date > last_seen_on.date() appointment = db(query).select(atable.date, orderby = ~atable.date, limitby = (0, 1), ).first() if appointment: date = appointment.date try: date = datetime.datetime.combine(date, datetime.time(0, 0, 0)) except TypeError: pass # Local time offset to UTC (NB: can be 0) delta = S3DateTime.get_offset_value(current.session.s3.utc_offset) # Default to 08:00 local time (...unless that would be future) date = min(now, date + datetime.timedelta(seconds = 28800 - delta)) last_seen_on = date # Allowance payments to update last_seen_on? if settings.get_dvr_payments_update_last_seen_on(): atable = s3db.dvr_allowance query = (atable.person_id == person_id) & \ (atable.paid_on != None) & \ (atable.status == 2) & \ (atable.deleted != True) if last_seen_on is not None: query &= atable.paid_on > last_seen_on payment = db(query).select(atable.paid_on, orderby = ~atable.paid_on, limitby = (0, 1), ).first() if payment: last_seen_on = payment.paid_on # Update last_seen_on ctable = s3db.dvr_case query = (ctable.person_id == person_id) & \ (ctable.archived != True) & \ (ctable.deleted != True) db(query).update(last_seen_on = last_seen_on, # Don't change author stamp for # system-controlled record update: modified_on = ctable.modified_on, modified_by = ctable.modified_by, )
Python
def validate_first_segment(self): """ Start fetching object data to ensure that the first segment (if any) is valid. This is to catch cases like "first segment is missing" or "first segment's etag doesn't match manifest". Note: this does not validate that you have any segments. A zero-segment large object is not erroneous; it is just empty. """ if self.validated_first_segment: return self.validated_first_segment = True try: self.peeked_chunk = next(self.app_iter) except StopIteration: pass
def validate_first_segment(self): """ Start fetching object data to ensure that the first segment (if any) is valid. This is to catch cases like "first segment is missing" or "first segment's etag doesn't match manifest". Note: this does not validate that you have any segments. A zero-segment large object is not erroneous; it is just empty. """ if self.validated_first_segment: return self.validated_first_segment = True try: self.peeked_chunk = next(self.app_iter) except StopIteration: pass
Python
def http_response_to_document_iters(response, read_chunk_size=4096): """ Takes a successful object-GET HTTP response and turns it into an iterator of (first-byte, last-byte, length, headers, body-file) 5-tuples. The response must either be a 200 or a 206; if you feed in a 204 or something similar, this probably won't work. :param response: HTTP response, like from bufferedhttp.http_connect(), not a swob.Response. """ chunked = is_chunked(dict(response.getheaders())) if response.status == 200: if chunked: # Single "range" that's the whole object with an unknown length return iter([(0, None, None, response.getheaders(), response)]) # Single "range" that's the whole object content_length = int(response.getheader('Content-Length')) return iter([(0, content_length - 1, content_length, response.getheaders(), response)]) content_type, params_list = parse_content_type( response.getheader('Content-Type')) if content_type != 'multipart/byteranges': # Single range; no MIME framing, just the bytes. The start and end # byte indices are in the Content-Range header. start, end, length = parse_content_range( response.getheader('Content-Range')) return iter([(start, end, length, response.getheaders(), response)]) else: # Multiple ranges; the response body is a multipart/byteranges MIME # document, and we have to parse it using the MIME boundary # extracted from the Content-Type header. params = dict(params_list) return multipart_byteranges_to_document_iters( response, params['boundary'], read_chunk_size)
def http_response_to_document_iters(response, read_chunk_size=4096): """ Takes a successful object-GET HTTP response and turns it into an iterator of (first-byte, last-byte, length, headers, body-file) 5-tuples. The response must either be a 200 or a 206; if you feed in a 204 or something similar, this probably won't work. :param response: HTTP response, like from bufferedhttp.http_connect(), not a swob.Response. """ chunked = is_chunked(dict(response.getheaders())) if response.status == 200: if chunked: # Single "range" that's the whole object with an unknown length return iter([(0, None, None, response.getheaders(), response)]) # Single "range" that's the whole object content_length = int(response.getheader('Content-Length')) return iter([(0, content_length - 1, content_length, response.getheaders(), response)]) content_type, params_list = parse_content_type( response.getheader('Content-Type')) if content_type != 'multipart/byteranges': # Single range; no MIME framing, just the bytes. The start and end # byte indices are in the Content-Range header. start, end, length = parse_content_range( response.getheader('Content-Range')) return iter([(start, end, length, response.getheaders(), response)]) else: # Multiple ranges; the response body is a multipart/byteranges MIME # document, and we have to parse it using the MIME boundary # extracted from the Content-Type header. params = dict(params_list) return multipart_byteranges_to_document_iters( response, params['boundary'], read_chunk_size)
Python
def _deprecated_message(new_arg_name, deprecated_from="2.x"): """ Alerts the user that an argument will be deprecated within the next release version """ msg = ( "Deprecated argument (it will be dropped " "from version {} onwards) please use --{} instead." ).format(deprecated_from, new_arg_name) return msg
def _deprecated_message(new_arg_name, deprecated_from="2.x"): """ Alerts the user that an argument will be deprecated within the next release version """ msg = ( "Deprecated argument (it will be dropped " "from version {} onwards) please use --{} instead." ).format(deprecated_from, new_arg_name) return msg
Python
def create_2digit_mnist_image_leftright(digit1, digit2): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) digit2 = digit2.reshape(28,28) w = randint(16,18) h = randint(0,4) image[w:w+28,h:h+28] = digit1 h = randint(28,32) image[w:w+28,h:h+28] = digit2 image = image.reshape(-1) return image
def create_2digit_mnist_image_leftright(digit1, digit2): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) digit2 = digit2.reshape(28,28) w = randint(16,18) h = randint(0,4) image[w:w+28,h:h+28] = digit1 h = randint(28,32) image[w:w+28,h:h+28] = digit2 image = image.reshape(-1) return image
Python
def create_2digit_mnist_image_topbottom(digit1, digit2): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) digit2 = digit2.reshape(28,28) h = randint(16,18) w = randint(0,2) image[w:w+28,h:h+28] = digit1 w = randint(30,32) image[w:w+28,h:h+28] = digit2 image = image.reshape(-1) return image
def create_2digit_mnist_image_topbottom(digit1, digit2): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) digit2 = digit2.reshape(28,28) h = randint(16,18) w = randint(0,2) image[w:w+28,h:h+28] = digit1 w = randint(30,32) image[w:w+28,h:h+28] = digit2 image = image.reshape(-1) return image
Python
def create_1digit_mnist_image_topleft(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(0,2) h = randint(0,4) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
def create_1digit_mnist_image_topleft(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(0,2) h = randint(0,4) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
Python
def create_1digit_mnist_image_topright(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(0,2) h = randint(28,32) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
def create_1digit_mnist_image_topright(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(0,2) h = randint(28,32) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
Python
def create_1digit_mnist_image_bottomright(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(30,32) h = randint(28,32) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
def create_1digit_mnist_image_bottomright(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(30,32) h = randint(28,32) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
Python
def create_1digit_mnist_image_bottomleft(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(30,32) h = randint(0,4) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
def create_1digit_mnist_image_bottomleft(digit1): """ Digits is list of numpy arrays, where each array is a digit""" image = np.zeros((60,60)) digit1 = digit1.reshape(28,28) w = randint(30,32) h = randint(0,4) image[w:w+28,h:h+28] = digit1 image = image.reshape(-1) return image
Python
def create_connection(): """ create a database connection to the SQLite database specified by db_file :param db_file: database file :return: Connection object or None """ try: return sqlite3.connect(database=os.environ["DATABASE_NAME"]) except Exception as e: print(e) return None
def create_connection(): """ create a database connection to the SQLite database specified by db_file :param db_file: database file :return: Connection object or None """ try: return sqlite3.connect(database=os.environ["DATABASE_NAME"]) except Exception as e: print(e) return None
Python
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" my_camera = camera.Camera() # my_camera = camera.CameraTest() return Response( gen(my_camera), mimetype="multipart/x-mixed-replace; boundary=frame" )
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" my_camera = camera.Camera() # my_camera = camera.CameraTest() return Response( gen(my_camera), mimetype="multipart/x-mixed-replace; boundary=frame" )
Python
def expectedFailure(reason: FailedReason, msg: str, condition: bool = True) -> Callable: "Extends the unittest.expectedFailure decorator to print failure details and takes an optional condition" def _decorator(func): @unittest.expectedFailure def _wrapper(x): print(f"\n{reason.value}: {msg}") try: return func(x) except Exception as e: print(f"\t{e}\n") raise (e) return _wrapper if condition else func return _decorator
def expectedFailure(reason: FailedReason, msg: str, condition: bool = True) -> Callable: "Extends the unittest.expectedFailure decorator to print failure details and takes an optional condition" def _decorator(func): @unittest.expectedFailure def _wrapper(x): print(f"\n{reason.value}: {msg}") try: return func(x) except Exception as e: print(f"\t{e}\n") raise (e) return _wrapper if condition else func return _decorator
Python
def benchmark_gemm(opts: GemmOpts, output_prefix: str, available_gpus, containerName, verbose_logs, compilerVersion, commit_id, commit_datetime, commit_branch, target_name, check_result, dev_props): """ Architecture Overview: -------------------------------------------------------------------------------------------- This function takes a particular gemm input and produces different kernel implementations by permuting the possible different optimizations and the configurations e.g. vectorization, cache layouts etc. In order to do this, it creates a set of variations (aka "variants") which are then benchmarked on GPU devices. To do this efficiently, we decompose the task into a producer-consumer model where the producers ("run_variant") are responsible for building the HAT package (CPU-bound) for a particular kernel configuration and the consumers ("gemm_runner") are responsible for taking a HAT package and benchmarking them (GPU-bound). We spawn 1 producer process for each kernel variant (in waves of X processes where X is a constant based on the number of CPU cores on the system) and 1 consumer process for each GPU device available for kernel execution. ---------------------------------------------------------------------------------------------- Here is the flow diagram for the benchmarking pipeline: GEMM input v +-------------------+ +-------------------->--------------------+ | Create | | +------------------->------------+ | | shared queues -+-----+-----+-----+--- --+ | | | (1 per GPU) | Q1 Q2 Q3 Qn ------->---+ | | | | | ^ ^ ^ ^ | | | | Create | | | | | | | | | golden data | \ | | / | | | Main | | | +-----------------------+ | | | Process | Generate | | GPU Device selector | | | | | variants | +-----------------------+ | | | | | | / | | \ \ \ | | v | Spawn producers | / | | | \ \ | v | | (1 per variant) -+---+-----+-----+-----+----+--- --+ | | | | | | P1 P2 P3 P4 P5 Pv v | | | Spawn consumers | | | | | (1 per GPU) -+-----+-----+-----+--- ---+ | | | | | | C1 C2 C3 Cn <----------+ | | | Wait for all | ^ ^ | | | processes to join | | +----------<---------------------+ | +-------------------+ ----------------------<-------------------+ ---------------------------------------------------------------------------------------------- Producer process: "run_variant" i, kernel variant, golden data, sync. list of results v +-------------------+ | Create | | result object | | | | | Build HAT | | package | | | | | Push HAT pkg to +-------------> Qi | shared queue Qi | +-------------------+ ---------------------------------------------------------------------------------------------- Consumer process (Ci) for shared queue Qi: "gemm_runner" i, sync. list of results v +-------------------+ | Until Qi is empty <-------------- Qi | do: | | | | | Pop HAT pkg. | | from queue Di | | | | | Verify | | correctness | | vs. golden data | | | | | Run benchmark | | (hatlib) | | | | | Upload results | | to Cosmos DB | +-------------------+ """ # create shared process queue, one per gpu gpu_devices = [] device_q = [] for i in range(len(available_gpus)): if available_gpus[i]: gpu_devices.append(i) device_q.append(Queue()) total_gpus = len(gpu_devices) target = Target(target_name) variants = get_variants(opts, target) if len(variants) == 0: # this means we failed to find any valid kernel configuration for this input if verbose_logs: print(colored('No valid kernel configurations found.', "magenta")) if containerName: result = BenchmarkResult(opts=opts, gpu_id=-1, commit_id=commit_id, commit_datetime=str(commit_datetime), commit_branch=commit_branch, target_name=target_name, deviceProperties='') result.target_rt = 'ROCM' if target.runtime == Target.Runtime.ROCM else 'CUDA' result.compiler_version = compilerVersion cosmosdb.upsert_benchmark_results([result.getResultRow()], containerName, verbose_logs) else: # Create golden data for verification if required golden_data = None if check_result: # Create the arrays with the appropriate layout datatype = getType(opts.type) npdatatype = np.dtype(datatype.name) A_test, B_test, C_test = (np.ndarray((opts.m, opts.k), dtype=npdatatype, order=getLayout(bool(opts.transA)).to_numpy_order()), np.ndarray((opts.k, opts.n), dtype=npdatatype, order=getLayout(bool(opts.transB)).to_numpy_order()), np.ndarray((opts.m, opts.n), dtype=npdatatype, order=Array.Layout.FIRST_MAJOR.to_numpy_order())) # Create all the random input data A_test_data, B_test_data, C_test_data = (np.random.random((opts.m, opts.k)).astype(npdatatype), np.random.random((opts.k, opts.n)).astype(npdatatype), np.random.random((opts.m, opts.n)).astype(npdatatype)) # Assign the default-ordered input data to the appropriately-ordered arrays A_test[:] = A_test_data B_test[:] = B_test_data C_test[:] = C_test_data C_ref = (opts.beta * C_test) + (opts.alpha * (A_test @ B_test)) golden_data = (A_test, B_test, C_test, C_ref) waveSize = multiprocessing.cpu_count() manager = multiprocessing.Manager() for wave in range(0, len(variants), waveSize): processes = [] result_rows = manager.list() print(f"Wave {wave // waveSize} (Completed: {wave}/{len(variants)} kernels)") for i in range(wave, min(wave + waveSize, len(variants))): gpu_idx = i % total_gpus gpu_id = gpu_devices[gpu_idx] p = Process(name=f"builder{i}", target=run_variant, args=(variants[i], gpu_id, device_q[gpu_idx], opts, target, output_prefix, compilerVersion, commit_id, commit_datetime, commit_branch, target_name, dev_props[gpu_id], verbose_logs, check_result)) p.start() time.sleep(5) for i in range(total_gpus): p = Process(name=f"runner{i}", target=gemm_runner, args=(gpu_devices[i], output_prefix, device_q[i], result_rows, golden_data, verbose_logs)) p.start() processes.append(p) if not verbose_logs: bar = progressbar.ProgressBar(maxval=len(processes), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]) bar.start() i = 0 for p in processes: while p.is_alive(): if verbose_logs: print(f"Joining process: {p.name}, {p.pid}") proc = psutil.Process(p.pid) if proc.status() == psutil.STATUS_ZOMBIE: if verbose_logs: print(f"Zombie process found: {p.name}, {p.pid}, skipping...") break # just move on p.join(5) else: i += 1 if not verbose_logs: bar.update(i) else: if not verbose_logs: bar.finish() if containerName: cosmosdb.upsert_benchmark_results(result_rows, containerName, verbose_logs)
def benchmark_gemm(opts: GemmOpts, output_prefix: str, available_gpus, containerName, verbose_logs, compilerVersion, commit_id, commit_datetime, commit_branch, target_name, check_result, dev_props): """ Architecture Overview: -------------------------------------------------------------------------------------------- This function takes a particular gemm input and produces different kernel implementations by permuting the possible different optimizations and the configurations e.g. vectorization, cache layouts etc. In order to do this, it creates a set of variations (aka "variants") which are then benchmarked on GPU devices. To do this efficiently, we decompose the task into a producer-consumer model where the producers ("run_variant") are responsible for building the HAT package (CPU-bound) for a particular kernel configuration and the consumers ("gemm_runner") are responsible for taking a HAT package and benchmarking them (GPU-bound). We spawn 1 producer process for each kernel variant (in waves of X processes where X is a constant based on the number of CPU cores on the system) and 1 consumer process for each GPU device available for kernel execution. ---------------------------------------------------------------------------------------------- Here is the flow diagram for the benchmarking pipeline: GEMM input v +-------------------+ +-------------------->--------------------+ | Create | | +------------------->------------+ | | shared queues -+-----+-----+-----+--- --+ | | | (1 per GPU) | Q1 Q2 Q3 Qn ------->---+ | | | | | ^ ^ ^ ^ | | | | Create | | | | | | | | | golden data | \ | | / | | | Main | | | +-----------------------+ | | | Process | Generate | | GPU Device selector | | | | | variants | +-----------------------+ | | | | | | / | | \ \ \ | | v | Spawn producers | / | | | \ \ | v | | (1 per variant) -+---+-----+-----+-----+----+--- --+ | | | | | | P1 P2 P3 P4 P5 Pv v | | | Spawn consumers | | | | | (1 per GPU) -+-----+-----+-----+--- ---+ | | | | | | C1 C2 C3 Cn <----------+ | | | Wait for all | ^ ^ | | | processes to join | | +----------<---------------------+ | +-------------------+ ----------------------<-------------------+ ---------------------------------------------------------------------------------------------- Producer process: "run_variant" i, kernel variant, golden data, sync. list of results v +-------------------+ | Create | | result object | | | | | Build HAT | | package | | | | | Push HAT pkg to +-------------> Qi | shared queue Qi | +-------------------+ ---------------------------------------------------------------------------------------------- Consumer process (Ci) for shared queue Qi: "gemm_runner" i, sync. list of results v +-------------------+ | Until Qi is empty <-------------- Qi | do: | | | | | Pop HAT pkg. | | from queue Di | | | | | Verify | | correctness | | vs. golden data | | | | | Run benchmark | | (hatlib) | | | | | Upload results | | to Cosmos DB | +-------------------+ """ # create shared process queue, one per gpu gpu_devices = [] device_q = [] for i in range(len(available_gpus)): if available_gpus[i]: gpu_devices.append(i) device_q.append(Queue()) total_gpus = len(gpu_devices) target = Target(target_name) variants = get_variants(opts, target) if len(variants) == 0: # this means we failed to find any valid kernel configuration for this input if verbose_logs: print(colored('No valid kernel configurations found.', "magenta")) if containerName: result = BenchmarkResult(opts=opts, gpu_id=-1, commit_id=commit_id, commit_datetime=str(commit_datetime), commit_branch=commit_branch, target_name=target_name, deviceProperties='') result.target_rt = 'ROCM' if target.runtime == Target.Runtime.ROCM else 'CUDA' result.compiler_version = compilerVersion cosmosdb.upsert_benchmark_results([result.getResultRow()], containerName, verbose_logs) else: # Create golden data for verification if required golden_data = None if check_result: # Create the arrays with the appropriate layout datatype = getType(opts.type) npdatatype = np.dtype(datatype.name) A_test, B_test, C_test = (np.ndarray((opts.m, opts.k), dtype=npdatatype, order=getLayout(bool(opts.transA)).to_numpy_order()), np.ndarray((opts.k, opts.n), dtype=npdatatype, order=getLayout(bool(opts.transB)).to_numpy_order()), np.ndarray((opts.m, opts.n), dtype=npdatatype, order=Array.Layout.FIRST_MAJOR.to_numpy_order())) # Create all the random input data A_test_data, B_test_data, C_test_data = (np.random.random((opts.m, opts.k)).astype(npdatatype), np.random.random((opts.k, opts.n)).astype(npdatatype), np.random.random((opts.m, opts.n)).astype(npdatatype)) # Assign the default-ordered input data to the appropriately-ordered arrays A_test[:] = A_test_data B_test[:] = B_test_data C_test[:] = C_test_data C_ref = (opts.beta * C_test) + (opts.alpha * (A_test @ B_test)) golden_data = (A_test, B_test, C_test, C_ref) waveSize = multiprocessing.cpu_count() manager = multiprocessing.Manager() for wave in range(0, len(variants), waveSize): processes = [] result_rows = manager.list() print(f"Wave {wave // waveSize} (Completed: {wave}/{len(variants)} kernels)") for i in range(wave, min(wave + waveSize, len(variants))): gpu_idx = i % total_gpus gpu_id = gpu_devices[gpu_idx] p = Process(name=f"builder{i}", target=run_variant, args=(variants[i], gpu_id, device_q[gpu_idx], opts, target, output_prefix, compilerVersion, commit_id, commit_datetime, commit_branch, target_name, dev_props[gpu_id], verbose_logs, check_result)) p.start() time.sleep(5) for i in range(total_gpus): p = Process(name=f"runner{i}", target=gemm_runner, args=(gpu_devices[i], output_prefix, device_q[i], result_rows, golden_data, verbose_logs)) p.start() processes.append(p) if not verbose_logs: bar = progressbar.ProgressBar(maxval=len(processes), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]) bar.start() i = 0 for p in processes: while p.is_alive(): if verbose_logs: print(f"Joining process: {p.name}, {p.pid}") proc = psutil.Process(p.pid) if proc.status() == psutil.STATUS_ZOMBIE: if verbose_logs: print(f"Zombie process found: {p.name}, {p.pid}, skipping...") break # just move on p.join(5) else: i += 1 if not verbose_logs: bar.update(i) else: if not verbose_logs: bar.finish() if containerName: cosmosdb.upsert_benchmark_results(result_rows, containerName, verbose_logs)
Python
def create_plan(self, target: "accera.Target" = Target.HOST) -> "accera.Plan": """Creates a plan for running this schedule Args: target: Optional target specification. Defaults to the HOST """ from .Plan import Plan return Plan(self, target)
def create_plan(self, target: "accera.Target" = Target.HOST) -> "accera.Plan": """Creates a plan for running this schedule Args: target: Optional target specification. Defaults to the HOST """ from .Plan import Plan return Plan(self, target)
Python
def pad( self, index: LoopIndex, size: Union[int, DelayedParameter], _front: bool = True ) -> None: """Pads the beginning of a specified dimension of the iteration-space with empty (no-op) elements. Args: index: The dimension to pad size: The number of elements to pad """ index = self._resolve_index(index) padded_index = index.create_child_index() order_pos = self._indices.index(index) self._indices[order_pos] = padded_index start, stop, _ = self._index_map[index].interval() if isinstance(size, DelayedParameter): self._delayed_calls[partial(self._pad_delayed, padded_index, _front)] = size self._index_map[padded_index] = IndexEntry( start=start, stop=stop, parent=index, transform=(IndexTransform.PAD, (0, _front)), ) return self._index_map[padded_index] = IndexEntry( start=start, stop=stop + size, parent=index, transform=(IndexTransform.PAD, (size, _front)), )
def pad( self, index: LoopIndex, size: Union[int, DelayedParameter], _front: bool = True ) -> None: """Pads the beginning of a specified dimension of the iteration-space with empty (no-op) elements. Args: index: The dimension to pad size: The number of elements to pad """ index = self._resolve_index(index) padded_index = index.create_child_index() order_pos = self._indices.index(index) self._indices[order_pos] = padded_index start, stop, _ = self._index_map[index].interval() if isinstance(size, DelayedParameter): self._delayed_calls[partial(self._pad_delayed, padded_index, _front)] = size self._index_map[padded_index] = IndexEntry( start=start, stop=stop, parent=index, transform=(IndexTransform.PAD, (0, _front)), ) return self._index_map[padded_index] = IndexEntry( start=start, stop=stop + size, parent=index, transform=(IndexTransform.PAD, (size, _front)), )
Python
def reorder( self, order: Union[Tuple[LoopIndex], LoopIndex, DelayedParameter] = None, *args: LoopIndex, ): """The `reorder` transformation sets the order of the indices in the schedule. Args: order: Either the order of indices to set, or the outermost index if using variable arguments. args: Optional variable arguments containing subsequent indices to set Remarks: These orders are not allowed: 1. The *outer dimension* created by a `split` transformation must always precede the corresponding *inner dimension*. 2. The *fusing dimension* created by a `fuse` operation must always precede any *unfused dimensions*. """ if isinstance(order, DelayedParameter): self._delayed_calls[partial(self.reorder, *args)] = order return indices = [order] + list(args) if isinstance(order, LoopIndex) else list(order) if len(indices) != len(self._indices): raise ValueError( f"Expected {len(self._indices)} indices, but got {len(indices)} indices instead" ) indices = list(map(self._resolve_index, indices)) visited = [] for i in indices: if ( self._index_map[i].parent and self._index_map[i].parent not in visited and self._index_map[i].transform and self._index_map[i].transform[0] is IndexTransform.SPLIT ): raise ValueError( "An inner dimension must not be ordered before its outer dimension" ) visited.append(i) self._indices = indices
def reorder( self, order: Union[Tuple[LoopIndex], LoopIndex, DelayedParameter] = None, *args: LoopIndex, ): """The `reorder` transformation sets the order of the indices in the schedule. Args: order: Either the order of indices to set, or the outermost index if using variable arguments. args: Optional variable arguments containing subsequent indices to set Remarks: These orders are not allowed: 1. The *outer dimension* created by a `split` transformation must always precede the corresponding *inner dimension*. 2. The *fusing dimension* created by a `fuse` operation must always precede any *unfused dimensions*. """ if isinstance(order, DelayedParameter): self._delayed_calls[partial(self.reorder, *args)] = order return indices = [order] + list(args) if isinstance(order, LoopIndex) else list(order) if len(indices) != len(self._indices): raise ValueError( f"Expected {len(self._indices)} indices, but got {len(indices)} indices instead" ) indices = list(map(self._resolve_index, indices)) visited = [] for i in indices: if ( self._index_map[i].parent and self._index_map[i].parent not in visited and self._index_map[i].transform and self._index_map[i].transform[0] is IndexTransform.SPLIT ): raise ValueError( "An inner dimension must not be ordered before its outer dimension" ) visited.append(i) self._indices = indices
Python
def tile( self, shape=Mapping[LoopIndex, Union[int, DelayedParameter]] ) -> Tuple[LoopIndex]: """The `tile` transformation is a convenience syntax that takes a dict of indices and sizes, and splits each index by the corresponding size. The indices involved in the split are then ordered such that all the outer indices precede all of their respective inner indices. ii, jj, kk = schedule.tile({i: 8, j: 2, k: 3}) The tile transformation above is shorthand for the following sequence of transformations: ii = schedule.split(i, 8) jj = schedule.split(j, 2) kk = schedule.split(k, 3) Args: shape: Mapping of indices to tile sizes """ try: names = varname(multi_vars=True) except: names = None # split for each index and it will automatically place the inner child index after its parent # self._indices is updated in-place. split_indices = [ self.split(self._resolve_index(idx), factor) for idx, factor in shape.items() ] if names: zipped_name_index = zip(names, split_indices) for name, index in zipped_name_index: index._name = name return split_indices
def tile( self, shape=Mapping[LoopIndex, Union[int, DelayedParameter]] ) -> Tuple[LoopIndex]: """The `tile` transformation is a convenience syntax that takes a dict of indices and sizes, and splits each index by the corresponding size. The indices involved in the split are then ordered such that all the outer indices precede all of their respective inner indices. ii, jj, kk = schedule.tile({i: 8, j: 2, k: 3}) The tile transformation above is shorthand for the following sequence of transformations: ii = schedule.split(i, 8) jj = schedule.split(j, 2) kk = schedule.split(k, 3) Args: shape: Mapping of indices to tile sizes """ try: names = varname(multi_vars=True) except: names = None # split for each index and it will automatically place the inner child index after its parent # self._indices is updated in-place. split_indices = [ self.split(self._resolve_index(idx), factor) for idx, factor in shape.items() ] if names: zipped_name_index = zip(names, split_indices) for name, index in zipped_name_index: index._name = name return split_indices
Python
def is_valid_loop_order(self, loop_order: Tuple[LoopIndex]) -> bool: """This method is used to validate the order of parent index and inner index, inner index should precede its parant index. Args: loop_order: A tuple of loop index. """ loop_order = list(loop_order) for index in loop_order: for inner_index in self._index_map[index].inners: if loop_order.index(index) > loop_order.index(inner_index): return False if self._index_map[index].parent: if loop_order.index(index) < loop_order.index( self._index_map[index].parent ): return False return True
def is_valid_loop_order(self, loop_order: Tuple[LoopIndex]) -> bool: """This method is used to validate the order of parent index and inner index, inner index should precede its parant index. Args: loop_order: A tuple of loop index. """ loop_order = list(loop_order) for index in loop_order: for inner_index in self._index_map[index].inners: if loop_order.index(index) > loop_order.index(inner_index): return False if self._index_map[index].parent: if loop_order.index(index) < loop_order.index( self._index_map[index].parent ): return False return True
Python
def _replay_delayed_calls(self): """ This method is called once per adding function, so it can be called multiple times when multiple functions get added. In order for the functions to be added correctly, we need to make sure all the residual states are cleared between different method calls. In Schedule class, we identify that Schedule._index_map can have residual states, so we need to reset self._index_map before we replay the delayed methods. """ if self._delayed_calls: # Reset the index map to its pre-parameterized state before applying function-specific parameters if self._parameterized_index_map: self._index_map = self._deep_copy_index_map( self._parameterized_index_map ) else: self._parameterized_index_map = self._deep_copy_index_map( self._index_map ) for delayed_call in self._delayed_calls: params = self._delayed_calls[delayed_call] if isinstance(params, DelayedParameter): delayed_call(params.get_value()) else: delayed_call(params)
def _replay_delayed_calls(self): """ This method is called once per adding function, so it can be called multiple times when multiple functions get added. In order for the functions to be added correctly, we need to make sure all the residual states are cleared between different method calls. In Schedule class, we identify that Schedule._index_map can have residual states, so we need to reset self._index_map before we replay the delayed methods. """ if self._delayed_calls: # Reset the index map to its pre-parameterized state before applying function-specific parameters if self._parameterized_index_map: self._index_map = self._deep_copy_index_map( self._parameterized_index_map ) else: self._parameterized_index_map = self._deep_copy_index_map( self._index_map ) for delayed_call in self._delayed_calls: params = self._delayed_calls[delayed_call] if isinstance(params, DelayedParameter): delayed_call(params.get_value()) else: delayed_call(params)
Python
def fuse( scheds: Union[Tuple[Schedule], Schedule], *args: Schedule, partial: int = None ) -> FusedSchedule: """The `fuse` operation combines multiple iteration spaces into a single "fused" iteration space. The fused iteration space represents the union of the work in the original spaces. In cases where it doesn't make sense to fuse all of the iteration space dimensions, we can choose to fuse a prefix of the dimensions and leave the rest unfused. Args: schedules: Either the schedules to fuse if performing partial fusing, or the first schedule to fuse if fusing all dimensions *args: Optional variable arguments containing subsequent schedules to fuse partial: The number of dimensions to fuse. If not specified, all dimensions will be fused """ schedules = [scheds] + list(args) if isinstance(scheds, Schedule) else list(scheds) return FusedSchedule(schedules, partial)
def fuse( scheds: Union[Tuple[Schedule], Schedule], *args: Schedule, partial: int = None ) -> FusedSchedule: """The `fuse` operation combines multiple iteration spaces into a single "fused" iteration space. The fused iteration space represents the union of the work in the original spaces. In cases where it doesn't make sense to fuse all of the iteration space dimensions, we can choose to fuse a prefix of the dimensions and leave the rest unfused. Args: schedules: Either the schedules to fuse if performing partial fusing, or the first schedule to fuse if fusing all dimensions *args: Optional variable arguments containing subsequent schedules to fuse partial: The number of dimensions to fuse. If not specified, all dimensions will be fused """ schedules = [scheds] + list(args) if isinstance(scheds, Schedule) else list(scheds) return FusedSchedule(schedules, partial)
Python
def MLAS( A: Array, B: Array, C: Array, transA=False, transB=False, alpha=1.0, beta=1.0, zero_C=False, bias: Array = None, opts=Options(), target=Target.HOST ): """Emits a Gemm-like function that performs matrix multiplication with the form Y = alpha * A * B + beta * C""" if (zero_C or bias) and opts.UseBiasFusion: return MLAS_with_bias(A, B, bias, C, transA, transB, alpha, beta, opts, target) raise RuntimeError("Unexpected")
def MLAS( A: Array, B: Array, C: Array, transA=False, transB=False, alpha=1.0, beta=1.0, zero_C=False, bias: Array = None, opts=Options(), target=Target.HOST ): """Emits a Gemm-like function that performs matrix multiplication with the form Y = alpha * A * B + beta * C""" if (zero_C or bias) and opts.UseBiasFusion: return MLAS_with_bias(A, B, bias, C, transA, transB, alpha, beta, opts, target) raise RuntimeError("Unexpected")
Python
def create_schedule(self) -> "accera.Schedule": "Creates a schedule for shaping the iteration space" from .Schedule import Schedule return Schedule(self)
def create_schedule(self) -> "accera.Schedule": "Creates a schedule for shaping the iteration space" from .Schedule import Schedule return Schedule(self)
Python
def create_plan(self, target: "accera.Target" = Target.HOST) -> "accera.Plan": """Creates a plan using the default schedule for the nest. Args: target: Optional target specification. Defaults to the HOST """ return self.create_schedule().create_plan(target)
def create_plan(self, target: "accera.Target" = Target.HOST) -> "accera.Plan": """Creates a plan using the default schedule for the nest. Args: target: Optional target specification. Defaults to the HOST """ return self.create_schedule().create_plan(target)
Python
def iteration_logic(self, logic: Callable = None, predicate=None, placement=None): """Adds iteration logic to the nest Args: logic: Python function that represents the logic to run in the innermost loop of the nest. predicate: The predicate that determine when the logic code should run. placement: The predicate that determines where the logic code should be placed. Remarks: this can be invoked as a decorator, where the logic function will be the first argument: @nest.iteration_logic def _(): # logic function implementation The decorator invocation pattern only applies when the additional arguments are using the default values. To use non-default values (for `predicate`, for example), call this like a standard method: def fn(): # logic function implementation nest.iteration_logic(fn, predicate=my_predicate) """ wrapped_logic = logic_function(logic) self._logic_fns.append(wrapped_logic) self._commands.append( partial(self._add_iteration_logic, wrapped_logic, predicate, placement) )
def iteration_logic(self, logic: Callable = None, predicate=None, placement=None): """Adds iteration logic to the nest Args: logic: Python function that represents the logic to run in the innermost loop of the nest. predicate: The predicate that determine when the logic code should run. placement: The predicate that determines where the logic code should be placed. Remarks: this can be invoked as a decorator, where the logic function will be the first argument: @nest.iteration_logic def _(): # logic function implementation The decorator invocation pattern only applies when the additional arguments are using the default values. To use non-default values (for `predicate`, for example), call this like a standard method: def fn(): # logic function implementation nest.iteration_logic(fn, predicate=my_predicate) """ wrapped_logic = logic_function(logic) self._logic_fns.append(wrapped_logic) self._commands.append( partial(self._add_iteration_logic, wrapped_logic, predicate, placement) )
Python
def _replay_delayed_calls(self): """ This method is called once per adding function, so it can be called multiple times when multiple functions get added. In order for the functions to be added correctly, we need to make sure all the residual states are cleared between different method calls. For example, in Schedule class, we identify that Schedule._index_map can have residual states, so we need to reset self._index_map before we replay the delayed methods. If there is no residual state between different method calls, no need to reset. """ for delayed_call in self._delayed_calls: params = self._delayed_calls[delayed_call] if isinstance(params, Tuple): resolved_param_list = [] for p in params: if isinstance(p, DelayedParameter): resolved_param_list.append(p.get_value()) else: resolved_param_list.append(p) delayed_call(resolved_param_list) else: delayed_call(params.get_value())
def _replay_delayed_calls(self): """ This method is called once per adding function, so it can be called multiple times when multiple functions get added. In order for the functions to be added correctly, we need to make sure all the residual states are cleared between different method calls. For example, in Schedule class, we identify that Schedule._index_map can have residual states, so we need to reset self._index_map before we replay the delayed methods. If there is no residual state between different method calls, no need to reset. """ for delayed_call in self._delayed_calls: params = self._delayed_calls[delayed_call] if isinstance(params, Tuple): resolved_param_list = [] for p in params: if isinstance(p, DelayedParameter): resolved_param_list.append(p.get_value()) else: resolved_param_list.append(p) delayed_call(resolved_param_list) else: delayed_call(params.get_value())
Python
def add( self, source: Union[ "accera.Nest", "accera.Schedule", "accera.Plan", "accera.Function", Callable ], args: List["accera.Array"] = None, base_name: str = "", parameters: Union[dict, List[dict]] = {}, function_opts: dict = {}, auxiliary: dict = {}, ) -> Union["accera.Function", List["accera.Function"]]: """Adds a function to the package. If multiple parameters are provided, generates and adds them according to the parameter grid. Returns a list of functions added if multiple parameters are provided, otherwise the function added. Args: source: The source which defines the function's implementation. args: The order of external-scope arrays used in the function signature. base_name: A base name for the function. The full name for the function will be the base name followed by an automatically-generated unique identifier. parameters: A mapping of parameter to values for each parameter used by the function implementation (if any). Optionally, can be a list of mappings, which will result in multiple functions. function_opts: A dictionary of advanced options to set on the function, e.g. {"no_inline" : True} auxiliary: A dictionary of auxiliary metadata to include in the HAT package. """ if parameters and not isinstance(parameters, dict): return [ self._add_function(source, args, base_name, p, function_opts, auxiliary) for p in parameters ] else: return self._add_function( source, args, base_name, parameters, function_opts, auxiliary )
def add( self, source: Union[ "accera.Nest", "accera.Schedule", "accera.Plan", "accera.Function", Callable ], args: List["accera.Array"] = None, base_name: str = "", parameters: Union[dict, List[dict]] = {}, function_opts: dict = {}, auxiliary: dict = {}, ) -> Union["accera.Function", List["accera.Function"]]: """Adds a function to the package. If multiple parameters are provided, generates and adds them according to the parameter grid. Returns a list of functions added if multiple parameters are provided, otherwise the function added. Args: source: The source which defines the function's implementation. args: The order of external-scope arrays used in the function signature. base_name: A base name for the function. The full name for the function will be the base name followed by an automatically-generated unique identifier. parameters: A mapping of parameter to values for each parameter used by the function implementation (if any). Optionally, can be a list of mappings, which will result in multiple functions. function_opts: A dictionary of advanced options to set on the function, e.g. {"no_inline" : True} auxiliary: A dictionary of auxiliary metadata to include in the HAT package. """ if parameters and not isinstance(parameters, dict): return [ self._add_function(source, args, base_name, p, function_opts, auxiliary) for p in parameters ] else: return self._add_function( source, args, base_name, parameters, function_opts, auxiliary )
Python
def _add_function( self, source: Union[ "accera.Nest", "accera.Schedule", "accera.Plan", "accera.Function", Callable ], args: List["accera.Array"] = None, base_name: str = "", parameters: dict = {}, function_opts: dict = {}, auxiliary: dict = {}, ) -> "accera.Function": """Adds a function to the package. Args: source: The source which defines the function's implementation. args: The order of external-scope arrays to use in the function signature. base_name: A base name for the function. The full name for the function will be the base name followed by an automatically-generated unique identifier. parameters: A value for each parameter if the function's implementation is parameterized. function_opts: A dictionary of advanced options to set on the function, e.g. {"no_inline" : True} auxiliary: A dictionary of auxiliary metadata to include in the HAT package. """ from .lang import LoopIndex # Auxiliary data should be one copy per function auxiliary_metadata = auxiliary.copy() param_value_dict = {} for delayed_param, value in parameters.items(): delayed_param.set_value(value) if isinstance(value, int): param_value_dict[delayed_param._name] = value else: if isinstance(value, tuple) or isinstance(value, list): if all(isinstance(v, LoopIndex) for v in value): param_value_dict[delayed_param._name] = str( [x._name for x in value] ) else: raise ValueError("Invalid value of parameters") else: param_value_dict[delayed_param._name] = str(value) auxiliary_metadata["accera"] = {"parameters": param_value_dict} def validate_target(target: Target): # can't use set because targets are mutable (therefore unhashable) for f in self._fns.values(): if not target.is_compatible_with(f.target): raise NotImplementedError( "Function target being added is currently incompatible with existing functions in package" ) def get_function_name(target: Target): # Get a function name using a stable hash of [base_name, signature, target, and parameters] # If no base_name is provided, use a unique identifier to avoid collisions (assume user # does not care about the function name in this case) # ref: https://death.andgravity.com/stable-hashing suffix = ( md5( json.dumps( tuple( map( lambda x: str(x), [ base_name or token_hex(4), target, auxiliary_metadata["accera"], ] + [ (a.role, a.element_type, a.shape, a.layout) for a in args ], ) ) ).encode("utf-8") ) .digest() .hex()[:16] ) # truncate # Function names must begin with an _ or alphabetical character return f"{base_name}_{suffix}" if base_name else f"_{suffix}" # Resolve any undefined argument shapes based on the source usage pattern for arr in args: _resolve_array_shape(source, arr) if isinstance(source, lang.Nest) or isinstance(source, lang.Schedule): # assumption: convenience functions are for host targets only source = source.create_plan(Target.HOST) # fall-through if isinstance(source, lang.Plan): self._dynamic_dependencies.update(source._dynamic_dependencies) source = source._create_function( args, public=True, no_inline=function_opts.get("no_inline", False) ) # fall-through if isinstance(source, lang.Function): source: lang.Function # due to the fall-through, we only need to validate here validate_target(source.target) native_array_args = [arg._get_native_array() for arg in args] assert source.public source.name = get_function_name(source.target) source.base_name = base_name source.auxiliary = auxiliary_metadata source.param_overrides = parameters source.args = tuple(native_array_args) source.requested_args = args self._fns[source.name] = source return source # for composability elif isinstance(source, Callable): # due to the fall-through, we only need to validate here validate_target(Target.HOST) @wraps(source) def wrapper_fn(args): source(*map(_convert_arg, args)) name = get_function_name(Target.HOST) wrapped_func = lang.Function( name=name, base_name=base_name, public=True, decorated=function_opts.get("decorated", False), no_inline=function_opts.get("no_inline", False), args=tuple(map(_convert_arg, args)), requested_args=args, definition=wrapper_fn, auxiliary=auxiliary_metadata, target=Target.HOST, ) self._fns[name] = wrapped_func return wrapped_func # for composability else: raise ValueError("Invalid type for source")
def _add_function( self, source: Union[ "accera.Nest", "accera.Schedule", "accera.Plan", "accera.Function", Callable ], args: List["accera.Array"] = None, base_name: str = "", parameters: dict = {}, function_opts: dict = {}, auxiliary: dict = {}, ) -> "accera.Function": """Adds a function to the package. Args: source: The source which defines the function's implementation. args: The order of external-scope arrays to use in the function signature. base_name: A base name for the function. The full name for the function will be the base name followed by an automatically-generated unique identifier. parameters: A value for each parameter if the function's implementation is parameterized. function_opts: A dictionary of advanced options to set on the function, e.g. {"no_inline" : True} auxiliary: A dictionary of auxiliary metadata to include in the HAT package. """ from .lang import LoopIndex # Auxiliary data should be one copy per function auxiliary_metadata = auxiliary.copy() param_value_dict = {} for delayed_param, value in parameters.items(): delayed_param.set_value(value) if isinstance(value, int): param_value_dict[delayed_param._name] = value else: if isinstance(value, tuple) or isinstance(value, list): if all(isinstance(v, LoopIndex) for v in value): param_value_dict[delayed_param._name] = str( [x._name for x in value] ) else: raise ValueError("Invalid value of parameters") else: param_value_dict[delayed_param._name] = str(value) auxiliary_metadata["accera"] = {"parameters": param_value_dict} def validate_target(target: Target): # can't use set because targets are mutable (therefore unhashable) for f in self._fns.values(): if not target.is_compatible_with(f.target): raise NotImplementedError( "Function target being added is currently incompatible with existing functions in package" ) def get_function_name(target: Target): # Get a function name using a stable hash of [base_name, signature, target, and parameters] # If no base_name is provided, use a unique identifier to avoid collisions (assume user # does not care about the function name in this case) # ref: https://death.andgravity.com/stable-hashing suffix = ( md5( json.dumps( tuple( map( lambda x: str(x), [ base_name or token_hex(4), target, auxiliary_metadata["accera"], ] + [ (a.role, a.element_type, a.shape, a.layout) for a in args ], ) ) ).encode("utf-8") ) .digest() .hex()[:16] ) # truncate # Function names must begin with an _ or alphabetical character return f"{base_name}_{suffix}" if base_name else f"_{suffix}" # Resolve any undefined argument shapes based on the source usage pattern for arr in args: _resolve_array_shape(source, arr) if isinstance(source, lang.Nest) or isinstance(source, lang.Schedule): # assumption: convenience functions are for host targets only source = source.create_plan(Target.HOST) # fall-through if isinstance(source, lang.Plan): self._dynamic_dependencies.update(source._dynamic_dependencies) source = source._create_function( args, public=True, no_inline=function_opts.get("no_inline", False) ) # fall-through if isinstance(source, lang.Function): source: lang.Function # due to the fall-through, we only need to validate here validate_target(source.target) native_array_args = [arg._get_native_array() for arg in args] assert source.public source.name = get_function_name(source.target) source.base_name = base_name source.auxiliary = auxiliary_metadata source.param_overrides = parameters source.args = tuple(native_array_args) source.requested_args = args self._fns[source.name] = source return source # for composability elif isinstance(source, Callable): # due to the fall-through, we only need to validate here validate_target(Target.HOST) @wraps(source) def wrapper_fn(args): source(*map(_convert_arg, args)) name = get_function_name(Target.HOST) wrapped_func = lang.Function( name=name, base_name=base_name, public=True, decorated=function_opts.get("decorated", False), no_inline=function_opts.get("no_inline", False), args=tuple(map(_convert_arg, args)), requested_args=args, definition=wrapper_fn, auxiliary=auxiliary_metadata, target=Target.HOST, ) self._fns[name] = wrapped_func return wrapped_func # for composability else: raise ValueError("Invalid type for source")
Python
def add_description( self, author: str = None, license: str = None, other: dict = {}, version: str = None, ): """Adds descriptive metadata to the HAT package. Args: author: Name of the individual or group that authored the package. license: The internet URL of the license used to release the package. other: User-specific descriptive metadata. If the key already exists, the value will be overwritten To remove a key, set its value to None version: The package version. """ if other: if "auxiliary" not in self._description: self._description["auxiliary"] = other else: self._description["auxiliary"].update(other) # remove any keys marked None keys_to_remove = [ k for k, v in self._description["auxiliary"].items() if v is None ] for k in keys_to_remove: del self._description["auxiliary"][k] if version is not None: self._description["version"] = version if author is not None: self._description["author"] = author if license is not None: self._description["license"] = license
def add_description( self, author: str = None, license: str = None, other: dict = {}, version: str = None, ): """Adds descriptive metadata to the HAT package. Args: author: Name of the individual or group that authored the package. license: The internet URL of the license used to release the package. other: User-specific descriptive metadata. If the key already exists, the value will be overwritten To remove a key, set its value to None version: The package version. """ if other: if "auxiliary" not in self._description: self._description["auxiliary"] = other else: self._description["auxiliary"].update(other) # remove any keys marked None keys_to_remove = [ k for k, v in self._description["auxiliary"].items() if v is None ] for k in keys_to_remove: del self._description["auxiliary"][k] if version is not None: self._description["version"] = version if author is not None: self._description["author"] = author if license is not None: self._description["license"] = license
Python
def check_correctness( self, function_name: str, before: List["numpy.ndarray"], after: List["numpy.ndarray"], tolerance: float = 1e-5 ): """Performs correctness-checking on a function Args: function_name before: values before calling the function after: desired values after calling the function tolerance: relative tolerance for floating point comparison """ hat_files = list(filter(lambda f: f.suffix == ".hat", self.file_list)) if hat_files: assert len(hat_files) == 1 hat_file = hat_files[0] if not self.correctness_checker: self.correctness_checker = CorrectnessChecker(hat_file) self.correctness_checker.run(function_name, before, after, tolerance) else: print("Warning: check_correctness was called but no hat file was generated. Correctness check skipped.")
def check_correctness( self, function_name: str, before: List["numpy.ndarray"], after: List["numpy.ndarray"], tolerance: float = 1e-5 ): """Performs correctness-checking on a function Args: function_name before: values before calling the function after: desired values after calling the function tolerance: relative tolerance for floating point comparison """ hat_files = list(filter(lambda f: f.suffix == ".hat", self.file_list)) if hat_files: assert len(hat_files) == 1 hat_file = hat_files[0] if not self.correctness_checker: self.correctness_checker = CorrectnessChecker(hat_file) self.correctness_checker.run(function_name, before, after, tolerance) else: print("Warning: check_correctness was called but no hat file was generated. Correctness check skipped.")
Python
def file_checker(self, filename): """Returns a checker for applying FileCheck directives Args: filename: name or path to the file to apply the checks If a non-path is provided, searches the output directory for the first instance The non-path can be a glob-like regex, e.g. "*myfile.mlir" """ filepath = pathlib.Path(filename) # Python 3.7 on Windows raises an OSError for is_file() for non-existent files, # use os.path.isfile() instead if not os.path.isfile(filepath.absolute()): files = glob.glob(f"{self.output_dir}/**/{filename}", recursive=True) if not files: raise ValueError(f"{filename} not found, did you set the correct Package.Format?") filepath = pathlib.Path(files[0]) return FileChecker(filepath.resolve())
def file_checker(self, filename): """Returns a checker for applying FileCheck directives Args: filename: name or path to the file to apply the checks If a non-path is provided, searches the output directory for the first instance The non-path can be a glob-like regex, e.g. "*myfile.mlir" """ filepath = pathlib.Path(filename) # Python 3.7 on Windows raises an OSError for is_file() for non-existent files, # use os.path.isfile() instead if not os.path.isfile(filepath.absolute()): files = glob.glob(f"{self.output_dir}/**/{filename}", recursive=True) if not files: raise ValueError(f"{filename} not found, did you set the correct Package.Format?") filepath = pathlib.Path(files[0]) return FileChecker(filepath.resolve())
Python
def unroll(self, index: Union[LoopIndex, DelayedParameter]): """Unrolls the loop along a dimension Args: index: The dimension to unroll """ if isinstance(index, DelayedParameter): self._delayed_calls[partial(self.unroll)] = index return None self._add_index_attr(index, "unrolled") self._commands.append(partial(self._unroll, index))
def unroll(self, index: Union[LoopIndex, DelayedParameter]): """Unrolls the loop along a dimension Args: index: The dimension to unroll """ if isinstance(index, DelayedParameter): self._delayed_calls[partial(self.unroll)] = index return None self._add_index_attr(index, "unrolled") self._commands.append(partial(self._unroll, index))
Python
def vectorize(self, index: Union[LoopIndex, DelayedParameter]): """Only available for targets that have SIMD registers and support vector instructions. Marks a dimension of the iteration-space for vectorization. Args: index: The index to vectorize """ if isinstance(index, DelayedParameter): self._delayed_calls[partial(self.vectorize)] = index return None if not self._target.vectorization_info: raise RuntimeError("The target does not support vectorization") self._add_index_attr(index, "vectorized") self._commands.append( partial(self._vectorize, index, self._target.vectorization_info) )
def vectorize(self, index: Union[LoopIndex, DelayedParameter]): """Only available for targets that have SIMD registers and support vector instructions. Marks a dimension of the iteration-space for vectorization. Args: index: The index to vectorize """ if isinstance(index, DelayedParameter): self._delayed_calls[partial(self.vectorize)] = index return None if not self._target.vectorization_info: raise RuntimeError("The target does not support vectorization") self._add_index_attr(index, "vectorized") self._commands.append( partial(self._vectorize, index, self._target.vectorization_info) )
Python
def parallelize( self, indices: Union[LoopIndex, Tuple[LoopIndex], DelayedParameter], pin: Union[Tuple[Any], DelayedParameter] = None, policy: Union[str, DelayedParameter] = "static", ): """Executes one or more loops in parallel on multiple cores or processors. Only available for targets with multiple cores or processors. Args: indices: The iteration-space dimensions to run in parallel. To assign multiple threads to an index, first split that index, then parallelize its split indices. Unsplit indices will be assigned one thread each, split indices will be assigned threads based on the number of split blocks. This is limited by the number of threads supported by the target. pin: Pin the computation to a subset of cores or processors. policy: The scheduling policy to apply ("dynamic" or "static"). """ if self._target.category == Target.Category.CPU: self._dynamic_dependencies.add(LibraryDependency.OPENMP) if any([isinstance(arg, DelayedParameter) for arg in [indices, pin, policy]]): self._delayed_calls[partial(self.parallelize)] = { "indices": indices, "pin": pin, "policy": policy, } return None indices = [indices] if isinstance(indices, LoopIndex) else list(indices) # ensure the indices are contiguous and follow the Schedule ordering start = self._sched._indices.index(indices[0]) end = start + len(indices) if ( end > len(self._sched._indices) or indices != self._sched._indices[start:end] ): raise ValueError( "indices must be contiguous in the Schedule dimension order" ) for index in indices: self._add_index_attr(index, "parallelized") self._commands.append(partial(self._parallelize, indices, policy))
def parallelize( self, indices: Union[LoopIndex, Tuple[LoopIndex], DelayedParameter], pin: Union[Tuple[Any], DelayedParameter] = None, policy: Union[str, DelayedParameter] = "static", ): """Executes one or more loops in parallel on multiple cores or processors. Only available for targets with multiple cores or processors. Args: indices: The iteration-space dimensions to run in parallel. To assign multiple threads to an index, first split that index, then parallelize its split indices. Unsplit indices will be assigned one thread each, split indices will be assigned threads based on the number of split blocks. This is limited by the number of threads supported by the target. pin: Pin the computation to a subset of cores or processors. policy: The scheduling policy to apply ("dynamic" or "static"). """ if self._target.category == Target.Category.CPU: self._dynamic_dependencies.add(LibraryDependency.OPENMP) if any([isinstance(arg, DelayedParameter) for arg in [indices, pin, policy]]): self._delayed_calls[partial(self.parallelize)] = { "indices": indices, "pin": pin, "policy": policy, } return None indices = [indices] if isinstance(indices, LoopIndex) else list(indices) # ensure the indices are contiguous and follow the Schedule ordering start = self._sched._indices.index(indices[0]) end = start + len(indices) if ( end > len(self._sched._indices) or indices != self._sched._indices[start:end] ): raise ValueError( "indices must be contiguous in the Schedule dimension order" ) for index in indices: self._add_index_attr(index, "parallelized") self._commands.append(partial(self._parallelize, indices, policy))
Python
def cache( self, source: Union[Array, Cache], index: Union[LoopIndex, DelayedParameter] = None, trigger_index: Union[LoopIndex, DelayedParameter] = None, layout: Union[Array.Layout, DelayedParameter] = None, max_elements: int = None, thrifty: Union[bool, DelayedParameter] = None, location: _MemorySpace = _MemorySpace.NONE, level: Union[int, DelayedParameter] = None, trigger_level: Union[int, DelayedParameter] = None, double_buffer: Union[bool, DelayedParameter] = False, double_buffer_location: Union[object, _MemorySpace, DelayedParameter] = AUTO, vectorize: Union[bool, DelayedParameter, object] = AUTO, _delayed_cache: DelayedCache = None, ): """Adds a cache for a view target Args: source: The array or cache from which this cache is copied. index: The index used to determine the cache level. Specify one and only one of `index`, `level`, `max_elements`. trigger_index: The index used to determine what level to fill the cache at. `trigger_index` can't come after `index` in the schedule order, and will default to `index` if not specified. Specify at most one of `trigger_index` or `trigger_level`. layout: The affine memory map, if different from the source. level: The key-slice level to cache (the number of wildcard dimensions in a key-slice). Specify one and only one of `index`, `level`, `max_elements`. trigger_level: The key-slice level to fill the cache at. `trigger_level` can't be smaller than `level`, and will default to `level` if not specified. Specify at most one of `trigger_index` or `trigger_level`. max_elements: The maximum elements to include in the cached region. Specify one and only one of `index`, `level`, `max_elements`. thrifty: Use thrifty caching (copy data into a cache only if the cached data differs from the original active block). This defaults to False as it slows down compilation speed so it is intended as an opt-in feature. double_buffer: Make this a double buffer cache by copying data one iteration ahead and using private memory on GPU for this procedure. vectorize: Whether to vectorize the cache operations. Defaults to AUTO, which will behave like `vectorize=True` if the loopnest has a vectorized loop or `vectorize=False` if the loopnest has no vectorized loops. double_buffer_location: The memory space used for storing iteration data for the double buffer cache. Requires that double_buffer is set to True. Defaults to AUTO. AUTO will configure the double buffering location based on the following: | location | double_buffer | double_buffer_location = `AUTO` | | ------------------- | ------------- | ------------------------------- | | MemorySpace.SHARED | True | MemorySpace.PRIVATE | | !MemorySpace.SHARED | True | Same value as location | """ if ( any( [ isinstance(arg, DelayedParameter) for arg in ( index, trigger_index, level, trigger_level, thrifty, double_buffer, double_buffer_location, vectorize, layout, ) ] ) or (isinstance(source, DelayedCache) and not source.completed) ): # If any of the cache level arguments are parameters, then this cache call is incomplete until those parameters # have values. Additionally, if this is a hierarchical cache and an outer cache is parameterized, # then this cache call is also incomplete until the outer cache's parameters have values # Create an incomplete Cache object so hierarchical caches that depend on this cache handle can # have an object to hold onto delayed_cache = DelayedCache(plan=self, target=source) self._delayed_calls[ partial( self.cache, source=source, max_elements=max_elements, location=location, _delayed_cache=delayed_cache, ) ] = { "index": index, "trigger_index": trigger_index, "level": level, "trigger_level": trigger_level, "layout": layout, "thrifty": thrifty, "double_buffer": double_buffer, "double_buffer_location": double_buffer_location, "vectorize": vectorize, } return delayed_cache if sum(i is not None for i in [index, level, max_elements]) != 1: raise ValueError( "Specify one and only one of index, level, or max_elements" ) if max_elements is not None and max_elements <= 0: raise ValueError( "Max element count specified as a cache budget must be greater than 0" ) if isinstance(source, Array): array_role = source.role elif isinstance(source, Cache): array_role = source.target_role if double_buffer and array_role not in [Array.Role.CONST, Array.Role.INPUT]: raise ValueError( "Double-buffering is only supported for CONST and INPUT arrays" ) if not double_buffer and double_buffer_location != AUTO: raise ValueError( "double_buffer_location is only valid to specify when double_buffer is set to True" ) if double_buffer_location is AUTO: if double_buffer: if ( self._target.category == Target.Category.GPU and location == _MemorySpace.SHARED ): double_buffer_location = _MemorySpace.PRIVATE else: double_buffer_location = location else: double_buffer_location = _MemorySpace.NONE if max_elements is None: # Validate or set index / level values # Validate that if index is specified, then level and trigger_level are not if (index is not None) and (level is not None or trigger_level is not None): raise ValueError( "Can't specify both a cache index and a cache level or trigger level" ) # Validate that if level is specified, then index and trigger_index are not if (level is not None) and (index is not None or trigger_index is not None): raise ValueError( "Can't specify both a cache level and a cache index or trigger index" ) if level: # the level of the key-slices is the count of right-aligned wildcards, e.g. level 2 = (i[0], ..., *, *) # therefore the index is at position -level, e.g. (i[0], ..., index, *) # Note: this takes a snapshot of the schedule ordering index = self._sched._indices[-level] else: self._add_index_attr(index, "cache") index_pos = self._sched._indices.index(index) level = len(self._sched._indices) - index_pos if (trigger_level or trigger_index) and array_role not in [ Array.Role.CONST, Array.Role.INPUT, ]: raise ValueError( "Multicaching is only supported for CONST and INPUT arrays" ) if layout is None: layout = source._requested_layout # Validate or set trigger_index / trigger_level values if trigger_index is not None and trigger_level is not None: raise ValueError( "Can't specify both a trigger_index and a trigger_level" ) if trigger_index is None and trigger_level is None: trigger_index = index trigger_level = level elif trigger_level is not None: # the trigger level is the level of the loopnest to fill the cache at. Must be the same as level or precede it # Note: this takes a snapshot of the schedule ordering trigger_index = self._sched._indices[-trigger_level] else: self._add_index_attr(trigger_index, "trigger") trigger_index_pos = self._sched._indices.index(trigger_index) trigger_level = len(self._sched._indices) - trigger_index_pos if level > trigger_level: raise ValueError( "Cache level must be less than or equal to the cache trigger level" ) if level <= 0: raise ValueError("Cache level must be greater than or equal to 1") if trigger_level <= 0: raise ValueError( "Cache trigger level must be greater than or equal to 1" ) if isinstance(source, Cache): # The outer cache must have a higher cache level and a higher trigger level than this cache, or a higher max element budget if source.max_elements is None and ( source.level is None or source.trigger_level is None ): # If the outer cache doesn't have a max element budget, then it must have both a cache level and a cache trigger_level raise ValueError( "Given source cache doesn't have a cache level, trigger_level, or max_elements" ) if (source.max_elements is None) != (max_elements is None): raise ValueError( "Can only create a max element hierarchical caches of other max element caches" ) if source.max_elements is not None: if source.max_elements <= max_elements: raise ValueError( "Outer max element cache for a hierarchical cache must have a larger budget than the inner cache" ) else: if source.level <= level: raise ValueError( "Outer cache for a hierarchical cache must have a higher cache level than inner cache" ) if source.level < trigger_level: raise ValueError( "Outer cache for a hierarchical cache must have a greater or equal cache level than the inner cache's trigger_level" ) cache = Cache( plan=self, target=source, index=index, trigger_index=trigger_index, level=level, trigger_level=trigger_level, layout=layout, max_elements=max_elements, thrifty=thrifty, location=location, double_buffer=double_buffer, double_buffer_location=double_buffer_location, vectorize=vectorize, ) if _delayed_cache: _delayed_cache.complete(cache) cache = _delayed_cache if _delayed_cache.enqueue_command: self._commands.append(partial(self._add_cache, cache)) _delayed_cache.enqueue_command = False else: self._commands.append(partial(self._add_cache, cache)) return cache
def cache( self, source: Union[Array, Cache], index: Union[LoopIndex, DelayedParameter] = None, trigger_index: Union[LoopIndex, DelayedParameter] = None, layout: Union[Array.Layout, DelayedParameter] = None, max_elements: int = None, thrifty: Union[bool, DelayedParameter] = None, location: _MemorySpace = _MemorySpace.NONE, level: Union[int, DelayedParameter] = None, trigger_level: Union[int, DelayedParameter] = None, double_buffer: Union[bool, DelayedParameter] = False, double_buffer_location: Union[object, _MemorySpace, DelayedParameter] = AUTO, vectorize: Union[bool, DelayedParameter, object] = AUTO, _delayed_cache: DelayedCache = None, ): """Adds a cache for a view target Args: source: The array or cache from which this cache is copied. index: The index used to determine the cache level. Specify one and only one of `index`, `level`, `max_elements`. trigger_index: The index used to determine what level to fill the cache at. `trigger_index` can't come after `index` in the schedule order, and will default to `index` if not specified. Specify at most one of `trigger_index` or `trigger_level`. layout: The affine memory map, if different from the source. level: The key-slice level to cache (the number of wildcard dimensions in a key-slice). Specify one and only one of `index`, `level`, `max_elements`. trigger_level: The key-slice level to fill the cache at. `trigger_level` can't be smaller than `level`, and will default to `level` if not specified. Specify at most one of `trigger_index` or `trigger_level`. max_elements: The maximum elements to include in the cached region. Specify one and only one of `index`, `level`, `max_elements`. thrifty: Use thrifty caching (copy data into a cache only if the cached data differs from the original active block). This defaults to False as it slows down compilation speed so it is intended as an opt-in feature. double_buffer: Make this a double buffer cache by copying data one iteration ahead and using private memory on GPU for this procedure. vectorize: Whether to vectorize the cache operations. Defaults to AUTO, which will behave like `vectorize=True` if the loopnest has a vectorized loop or `vectorize=False` if the loopnest has no vectorized loops. double_buffer_location: The memory space used for storing iteration data for the double buffer cache. Requires that double_buffer is set to True. Defaults to AUTO. AUTO will configure the double buffering location based on the following: | location | double_buffer | double_buffer_location = `AUTO` | | ------------------- | ------------- | ------------------------------- | | MemorySpace.SHARED | True | MemorySpace.PRIVATE | | !MemorySpace.SHARED | True | Same value as location | """ if ( any( [ isinstance(arg, DelayedParameter) for arg in ( index, trigger_index, level, trigger_level, thrifty, double_buffer, double_buffer_location, vectorize, layout, ) ] ) or (isinstance(source, DelayedCache) and not source.completed) ): # If any of the cache level arguments are parameters, then this cache call is incomplete until those parameters # have values. Additionally, if this is a hierarchical cache and an outer cache is parameterized, # then this cache call is also incomplete until the outer cache's parameters have values # Create an incomplete Cache object so hierarchical caches that depend on this cache handle can # have an object to hold onto delayed_cache = DelayedCache(plan=self, target=source) self._delayed_calls[ partial( self.cache, source=source, max_elements=max_elements, location=location, _delayed_cache=delayed_cache, ) ] = { "index": index, "trigger_index": trigger_index, "level": level, "trigger_level": trigger_level, "layout": layout, "thrifty": thrifty, "double_buffer": double_buffer, "double_buffer_location": double_buffer_location, "vectorize": vectorize, } return delayed_cache if sum(i is not None for i in [index, level, max_elements]) != 1: raise ValueError( "Specify one and only one of index, level, or max_elements" ) if max_elements is not None and max_elements <= 0: raise ValueError( "Max element count specified as a cache budget must be greater than 0" ) if isinstance(source, Array): array_role = source.role elif isinstance(source, Cache): array_role = source.target_role if double_buffer and array_role not in [Array.Role.CONST, Array.Role.INPUT]: raise ValueError( "Double-buffering is only supported for CONST and INPUT arrays" ) if not double_buffer and double_buffer_location != AUTO: raise ValueError( "double_buffer_location is only valid to specify when double_buffer is set to True" ) if double_buffer_location is AUTO: if double_buffer: if ( self._target.category == Target.Category.GPU and location == _MemorySpace.SHARED ): double_buffer_location = _MemorySpace.PRIVATE else: double_buffer_location = location else: double_buffer_location = _MemorySpace.NONE if max_elements is None: # Validate or set index / level values # Validate that if index is specified, then level and trigger_level are not if (index is not None) and (level is not None or trigger_level is not None): raise ValueError( "Can't specify both a cache index and a cache level or trigger level" ) # Validate that if level is specified, then index and trigger_index are not if (level is not None) and (index is not None or trigger_index is not None): raise ValueError( "Can't specify both a cache level and a cache index or trigger index" ) if level: # the level of the key-slices is the count of right-aligned wildcards, e.g. level 2 = (i[0], ..., *, *) # therefore the index is at position -level, e.g. (i[0], ..., index, *) # Note: this takes a snapshot of the schedule ordering index = self._sched._indices[-level] else: self._add_index_attr(index, "cache") index_pos = self._sched._indices.index(index) level = len(self._sched._indices) - index_pos if (trigger_level or trigger_index) and array_role not in [ Array.Role.CONST, Array.Role.INPUT, ]: raise ValueError( "Multicaching is only supported for CONST and INPUT arrays" ) if layout is None: layout = source._requested_layout # Validate or set trigger_index / trigger_level values if trigger_index is not None and trigger_level is not None: raise ValueError( "Can't specify both a trigger_index and a trigger_level" ) if trigger_index is None and trigger_level is None: trigger_index = index trigger_level = level elif trigger_level is not None: # the trigger level is the level of the loopnest to fill the cache at. Must be the same as level or precede it # Note: this takes a snapshot of the schedule ordering trigger_index = self._sched._indices[-trigger_level] else: self._add_index_attr(trigger_index, "trigger") trigger_index_pos = self._sched._indices.index(trigger_index) trigger_level = len(self._sched._indices) - trigger_index_pos if level > trigger_level: raise ValueError( "Cache level must be less than or equal to the cache trigger level" ) if level <= 0: raise ValueError("Cache level must be greater than or equal to 1") if trigger_level <= 0: raise ValueError( "Cache trigger level must be greater than or equal to 1" ) if isinstance(source, Cache): # The outer cache must have a higher cache level and a higher trigger level than this cache, or a higher max element budget if source.max_elements is None and ( source.level is None or source.trigger_level is None ): # If the outer cache doesn't have a max element budget, then it must have both a cache level and a cache trigger_level raise ValueError( "Given source cache doesn't have a cache level, trigger_level, or max_elements" ) if (source.max_elements is None) != (max_elements is None): raise ValueError( "Can only create a max element hierarchical caches of other max element caches" ) if source.max_elements is not None: if source.max_elements <= max_elements: raise ValueError( "Outer max element cache for a hierarchical cache must have a larger budget than the inner cache" ) else: if source.level <= level: raise ValueError( "Outer cache for a hierarchical cache must have a higher cache level than inner cache" ) if source.level < trigger_level: raise ValueError( "Outer cache for a hierarchical cache must have a greater or equal cache level than the inner cache's trigger_level" ) cache = Cache( plan=self, target=source, index=index, trigger_index=trigger_index, level=level, trigger_level=trigger_level, layout=layout, max_elements=max_elements, thrifty=thrifty, location=location, double_buffer=double_buffer, double_buffer_location=double_buffer_location, vectorize=vectorize, ) if _delayed_cache: _delayed_cache.complete(cache) cache = _delayed_cache if _delayed_cache.enqueue_command: self._commands.append(partial(self._add_cache, cache)) _delayed_cache.enqueue_command = False else: self._commands.append(partial(self._add_cache, cache)) return cache
Python
def pack_and_embed_buffer( self, target, wrapper_fn_name, packed_buffer_name="", indexing=CacheIndexing.GLOBAL_TO_PHYSICAL, ): """Emits a packing function for the given target and rewrites the loopnest to assume the given input is packed Args: target: The target being cached (e.g Array, Matrix, etc) data: The constant data to pack wrapper_fn_name: The name to give the wrapping function packed_buffer_name: The name to give the packed constant buffer indexing: The cache indexing """ # TODO: Make this work with multiple kernels, fused schedules if target.role != Array.Role.CONST: raise ValueError("Can only pack and embed constant data buffers") self._commands.append( partial( self._pack_and_embed_buffer, target, wrapper_fn_name, packed_buffer_name, indexing, ) )
def pack_and_embed_buffer( self, target, wrapper_fn_name, packed_buffer_name="", indexing=CacheIndexing.GLOBAL_TO_PHYSICAL, ): """Emits a packing function for the given target and rewrites the loopnest to assume the given input is packed Args: target: The target being cached (e.g Array, Matrix, etc) data: The constant data to pack wrapper_fn_name: The name to give the wrapping function packed_buffer_name: The name to give the packed constant buffer indexing: The cache indexing """ # TODO: Make this work with multiple kernels, fused schedules if target.role != Array.Role.CONST: raise ValueError("Can only pack and embed constant data buffers") self._commands.append( partial( self._pack_and_embed_buffer, target, wrapper_fn_name, packed_buffer_name, indexing, ) )
Python
def emit_runtime_init_pack( self, target, packing_func_name, packed_buf_size_func_name, indexing=CacheIndexing.GLOBAL_TO_PHYSICAL, ): """Emits a packing function for the given target and rewrites the loopnest to assume the given input is packed Args: target: The target being cached (e.g Array, Matrix, etc) packing_func_name: The name of the packing function to emit packed_buf_size_func_name: The name of the function giving the packed buffer size to emit indexing: The cache indexing """ # TODO: Make this work with multiple kernels, fused schedules self._commands.append( partial( self._emit_runtime_init_packing, target, packing_func_name, packed_buf_size_func_name, indexing, ) )
def emit_runtime_init_pack( self, target, packing_func_name, packed_buf_size_func_name, indexing=CacheIndexing.GLOBAL_TO_PHYSICAL, ): """Emits a packing function for the given target and rewrites the loopnest to assume the given input is packed Args: target: The target being cached (e.g Array, Matrix, etc) packing_func_name: The name of the packing function to emit packed_buf_size_func_name: The name of the function giving the packed buffer size to emit indexing: The cache indexing """ # TODO: Make this work with multiple kernels, fused schedules self._commands.append( partial( self._emit_runtime_init_packing, target, packing_func_name, packed_buf_size_func_name, indexing, ) )
Python
def bind(self, mapping: Mapping[Union[LoopIndex, Tuple[LoopIndex]], GridUnits]): """Binds iteration space dimensions to GPU execution units Args: mapping: Mapping of indices to GPU thread or block identifiers """ if self._target is not None and self._target.category == Target.Category.GPU: self._commands.append(partial(self._bind, mapping)) for index_or_tuple, proc in mapping.items(): self._bindings[proc] = index_or_tuple else: raise ValueError("Only supported on plans with GPU targets")
def bind(self, mapping: Mapping[Union[LoopIndex, Tuple[LoopIndex]], GridUnits]): """Binds iteration space dimensions to GPU execution units Args: mapping: Mapping of indices to GPU thread or block identifiers """ if self._target is not None and self._target.category == Target.Category.GPU: self._commands.append(partial(self._bind, mapping)) for index_or_tuple, proc in mapping.items(): self._bindings[proc] = index_or_tuple else: raise ValueError("Only supported on plans with GPU targets")
Python
def _replay_delayed_calls(self): """ This method is called once per adding function, so it can be called multiple times when multiple functions get added. In order for the functions to be added correctly, we need to make sure all the residual states are cleared between different method calls. For example, in Schedule class, we identify that Schedule._index_map can have residual states, so we need to reset self._index_map before we replay the delayed methods. If there is no residual states between different method calls, no need to reset. """ for delayed_call in self._delayed_calls: params = self._delayed_calls[delayed_call] if isinstance(params, dict): resolved_params = { key: params[key].get_value() if isinstance(params[key], DelayedParameter) else params[key] for key in params } delayed_call(**resolved_params) else: resolved_params = [ param.get_value() if isinstance(param, DelayedParameter) else param for param in params ] delayed_call(*resolved_params)
def _replay_delayed_calls(self): """ This method is called once per adding function, so it can be called multiple times when multiple functions get added. In order for the functions to be added correctly, we need to make sure all the residual states are cleared between different method calls. For example, in Schedule class, we identify that Schedule._index_map can have residual states, so we need to reset self._index_map before we replay the delayed methods. If there is no residual states between different method calls, no need to reset. """ for delayed_call in self._delayed_calls: params = self._delayed_calls[delayed_call] if isinstance(params, dict): resolved_params = { key: params[key].get_value() if isinstance(params[key], DelayedParameter) else params[key] for key in params } delayed_call(**resolved_params) else: resolved_params = [ param.get_value() if isinstance(param, DelayedParameter) else param for param in params ] delayed_call(*resolved_params)
Python
def simulate( self, initial_state, policy, initial_action=None, logger=None, stack_obs=True ): """Simulate a set of particles starting from `state' and following `policy'.""" if self.num_samples > 0: initial_state = repeat_along_dimension( initial_state, number=self.num_samples, dim=0 ) initial_state = initial_state.reshape(-1, *self.dynamical_model.dim_state) if initial_action is not None: initial_action = repeat_along_dimension( initial_action, number=self.num_samples, dim=0 ) initial_action = initial_action.reshape(*initial_state.shape[:-1], -1) trajectory = rollout_model( dynamical_model=self.dynamical_model, reward_model=self.reward_model, policy=policy, initial_state=initial_state, initial_action=initial_action, max_steps=self.num_steps, termination_model=self.termination_model, ) if not stack_obs: self._log_trajectory(trajectory) return trajectory else: observation = stack_list_of_tuples(trajectory, dim=initial_state.ndim - 1) self._log_observation(observation) return observation
def simulate( self, initial_state, policy, initial_action=None, logger=None, stack_obs=True ): """Simulate a set of particles starting from `state' and following `policy'.""" if self.num_samples > 0: initial_state = repeat_along_dimension( initial_state, number=self.num_samples, dim=0 ) initial_state = initial_state.reshape(-1, *self.dynamical_model.dim_state) if initial_action is not None: initial_action = repeat_along_dimension( initial_action, number=self.num_samples, dim=0 ) initial_action = initial_action.reshape(*initial_state.shape[:-1], -1) trajectory = rollout_model( dynamical_model=self.dynamical_model, reward_model=self.reward_model, policy=policy, initial_state=initial_state, initial_action=initial_action, max_steps=self.num_steps, termination_model=self.termination_model, ) if not stack_obs: self._log_trajectory(trajectory) return trajectory else: observation = stack_list_of_tuples(trajectory, dim=initial_state.ndim - 1) self._log_observation(observation) return observation
Python
def _log_observation(self, observation): """Log a simulated observation (a stacked trajectory).""" if not self.log_simulation: return scale = torch.diagonal(observation.next_state_scale_tril, dim1=-1, dim2=-2) self._info.update( sim_entropy=observation.entropy.mean().item(), sim_return=observation.reward.sum(-1).mean().item(), sim_scale=scale.square().sum(-1).sum(0).mean().sqrt().item(), sim_max_state=observation.state.abs().max().item(), sim_max_action=observation.action.abs().max().item(), ) for key, value in self.reward_model.info.items(): self._info.update(**{f"sim_{key}": value})
def _log_observation(self, observation): """Log a simulated observation (a stacked trajectory).""" if not self.log_simulation: return scale = torch.diagonal(observation.next_state_scale_tril, dim1=-1, dim2=-2) self._info.update( sim_entropy=observation.entropy.mean().item(), sim_return=observation.reward.sum(-1).mean().item(), sim_scale=scale.square().sum(-1).sum(0).mean().sqrt().item(), sim_max_state=observation.state.abs().max().item(), sim_max_action=observation.action.abs().max().item(), ) for key, value in self.reward_model.info.items(): self._info.update(**{f"sim_{key}": value})
Python
def post_init(self): """Set derived modules after initialization.""" super().post_init() self.policy.dist_params.update(tanh=True) self.policy_target.dist_params.update(tanh=True)
def post_init(self): """Set derived modules after initialization.""" super().post_init() self.policy.dist_params.update(tanh=True) self.policy_target.dist_params.update(tanh=True)
Python
def append(self, observation): """Append new observation to the dataset. Every time a new observation is appended, sample a mask to build a Bootstrap. Parameters ---------- observation: Observation Raises ------ TypeError If the new observation is not of type Observation. """ if not type(observation) == Observation: raise TypeError( f"input has to be of type Observation, and it was {type(observation)}" ) if self.bootstrap: self.weights[self.ptr] = self.mask_distribution.sample() else: self.weights[self.ptr] = torch.ones(self.mask_distribution.batch_shape) super().append(observation)
def append(self, observation): """Append new observation to the dataset. Every time a new observation is appended, sample a mask to build a Bootstrap. Parameters ---------- observation: Observation Raises ------ TypeError If the new observation is not of type Observation. """ if not type(observation) == Observation: raise TypeError( f"input has to be of type Observation, and it was {type(observation)}" ) if self.bootstrap: self.weights[self.ptr] = self.mask_distribution.sample() else: self.weights[self.ptr] = torch.ones(self.mask_distribution.batch_shape) super().append(observation)
Python
def split(self, ratio=0.8, *args, **kwargs): """Split into two data sets.""" return super().split( ratio=ratio, num_bootstraps=self.weights.shape[-1], bootstrap=self.bootstrap, *args, **kwargs, )
def split(self, ratio=0.8, *args, **kwargs): """Split into two data sets.""" return super().split( ratio=ratio, num_bootstraps=self.weights.shape[-1], bootstrap=self.bootstrap, *args, **kwargs, )
Python
def NonMaxSuppression(boxes, scores, threshold): r"""Non-Maximum Suppression The algorithm begins by storing the highest-scoring bounding box, and eliminating any box whose intersection-over-union (IoU) with it is too great. The procedure repeats on the surviving boxes, and so on until there are no boxes left. The stored boxes are returned. NB: The function returns a tuple (mask, indices), where indices index into the input boxes and are sorted according to score, from higest to lowest. indices[i][mask[i]] gives the indices of the surviving boxes from the ith batch, sorted by score. Args: - boxes :math:`(N, n_boxes, 4)` - scroes :math:`(N, n_boxes)` - threshold (float): IoU above which to eliminate boxes Outputs: - mask: :math:`(N, n_boxes)` - indicies: :math:`(N, n_boxes)` Examples:: >>> boxes = torch.Tensor([[[10., 20., 20., 15.], >>> [24., 22., 50., 54.], >>> [10., 21., 20. 14.5]]]) >>> scores = torch.abs(torch.randn([1, 3])) >>> mask, indices = NonMaxSuppression(boxes, scores, 0.7) >>> #indices are SORTED according to score. >>> surviving_box_indices = indices[mask] """ if boxes.is_cuda: return gpu.non_max_suppression(boxes, scores, threshold) else: return cpu.non_max_suppression(boxes, scores, threshold)
def NonMaxSuppression(boxes, scores, threshold): r"""Non-Maximum Suppression The algorithm begins by storing the highest-scoring bounding box, and eliminating any box whose intersection-over-union (IoU) with it is too great. The procedure repeats on the surviving boxes, and so on until there are no boxes left. The stored boxes are returned. NB: The function returns a tuple (mask, indices), where indices index into the input boxes and are sorted according to score, from higest to lowest. indices[i][mask[i]] gives the indices of the surviving boxes from the ith batch, sorted by score. Args: - boxes :math:`(N, n_boxes, 4)` - scroes :math:`(N, n_boxes)` - threshold (float): IoU above which to eliminate boxes Outputs: - mask: :math:`(N, n_boxes)` - indicies: :math:`(N, n_boxes)` Examples:: >>> boxes = torch.Tensor([[[10., 20., 20., 15.], >>> [24., 22., 50., 54.], >>> [10., 21., 20. 14.5]]]) >>> scores = torch.abs(torch.randn([1, 3])) >>> mask, indices = NonMaxSuppression(boxes, scores, 0.7) >>> #indices are SORTED according to score. >>> surviving_box_indices = indices[mask] """ if boxes.is_cuda: return gpu.non_max_suppression(boxes, scores, threshold) else: return cpu.non_max_suppression(boxes, scores, threshold)
Python
def main2(): ''' for K-fold cross-validation :return: ''' print(config) torch.manual_seed(0) train_dataset_info=read_train_dataset_info() kf=KFold(n_splits=5,random_state=8,shuffle=True) fold=0 os.environ['CUDA_VISIBLE_DEVICES']=config['n_gpu'] for train_data,test_data in kf.split(train_dataset_info): fold+=1 train_loader,validation_loader,net,loss,opt,start_epoch=\ build_network(train_dataset_info[train_data],train_dataset_info[test_data]) net.cuda() net=DataParallel(net) loss.cuda() save_dir=os.path.join(config['save_dir'],"_"+str(fold)+"fold") for epoch in range(start_epoch+1, start_epoch+config['epochs']): print("the epoch is %d"%epoch) train(train_loader,net,loss,epoch,opt,get_lr,config['save_freq'],save_dir)#当继续训练的时候,这个时候学习率有变化 validation(validation_loader,net,loss,epoch,save_dir)
def main2(): ''' for K-fold cross-validation :return: ''' print(config) torch.manual_seed(0) train_dataset_info=read_train_dataset_info() kf=KFold(n_splits=5,random_state=8,shuffle=True) fold=0 os.environ['CUDA_VISIBLE_DEVICES']=config['n_gpu'] for train_data,test_data in kf.split(train_dataset_info): fold+=1 train_loader,validation_loader,net,loss,opt,start_epoch=\ build_network(train_dataset_info[train_data],train_dataset_info[test_data]) net.cuda() net=DataParallel(net) loss.cuda() save_dir=os.path.join(config['save_dir'],"_"+str(fold)+"fold") for epoch in range(start_epoch+1, start_epoch+config['epochs']): print("the epoch is %d"%epoch) train(train_loader,net,loss,epoch,opt,get_lr,config['save_freq'],save_dir)#当继续训练的时候,这个时候学习率有变化 validation(validation_loader,net,loss,epoch,save_dir)
Python
def magnitude(self): ''' Gets the magnitude of the vector ''' return math.sqrt(math.pow(self.x, 2) + math.pow(self.y, 2))
def magnitude(self): ''' Gets the magnitude of the vector ''' return math.sqrt(math.pow(self.x, 2) + math.pow(self.y, 2))
Python
def squareMagnitude(self): ''' Gets the magnitude of the vector. Used for e.g comparison since sqrt does not need to be called, which can slow down the performance ''' return math.pow(self.x, 2) + math.pow(self.y, 2)
def squareMagnitude(self): ''' Gets the magnitude of the vector. Used for e.g comparison since sqrt does not need to be called, which can slow down the performance ''' return math.pow(self.x, 2) + math.pow(self.y, 2)
Python
def normalize(self): ''' Turn a non-zero vector into a vector of unit length ''' l = self.magnitude() if(l > 0): self.x *= (1/l) self.y *= (1/l)
def normalize(self): ''' Turn a non-zero vector into a vector of unit length ''' l = self.magnitude() if(l > 0): self.x *= (1/l) self.y *= (1/l)
Python
def componentProductUpdate(self, v): ''' Performs a component-wise product with the given vector and sets this vector to its result ''' self.x *= v.x self.y *= v.y
def componentProductUpdate(self, v): ''' Performs a component-wise product with the given vector and sets this vector to its result ''' self.x *= v.x self.y *= v.y
Python
def componentProduct(self, v): ''' Calculates and returns a component-wise product of this vector with the given vector. ''' return Vector(self.x * v.x, self.y * v.y)
def componentProduct(self, v): ''' Calculates and returns a component-wise product of this vector with the given vector. ''' return Vector(self.x * v.x, self.y * v.y)
Python
def scalarProduct(self, v): ''' Calculates and returns a component-wise product of this vector with the given vector. ''' return self.x * v.x + self.y + v.y
def scalarProduct(self, v): ''' Calculates and returns a component-wise product of this vector with the given vector. ''' return self.x * v.x + self.y + v.y
Python
def ask(self, question): """Asks Cleverbot a question. Maintains message history. :param question: The question to ask :return Cleverbot's answer """ # Set the current question self.data['stimulus'] = question # Connect to Cleverbot's API and remember the response resp = self._send() # Add the current question to the conversation log self.conversation.append(question) parsed = self._parse(resp.text) # Set data as appropriate if self.data['sessionid'] != '': self.data['sessionid'] = parsed['conversation_id'] # Add Cleverbot's reply to the conversation log self.conversation.append(parsed['answer']) return parsed['answer'].encode('latin-1').decode('utf-8')
def ask(self, question): """Asks Cleverbot a question. Maintains message history. :param question: The question to ask :return Cleverbot's answer """ # Set the current question self.data['stimulus'] = question # Connect to Cleverbot's API and remember the response resp = self._send() # Add the current question to the conversation log self.conversation.append(question) parsed = self._parse(resp.text) # Set data as appropriate if self.data['sessionid'] != '': self.data['sessionid'] = parsed['conversation_id'] # Add Cleverbot's reply to the conversation log self.conversation.append(parsed['answer']) return parsed['answer'].encode('latin-1').decode('utf-8')
Python
def _send(self): """POST the user's question and all required information to the Cleverbot API Cleverbot tries to prevent unauthorized access to its API by obfuscating how it generates the 'icognocheck' token. The token is currently the md5 checksum of the 10th through 36th characters of the encoded data. This may change in the future. TODO: Order is not guaranteed when urlencoding dicts. This hasn't been a problem yet, but let's look into ordered dicts or tuples instead. """ # Set data as appropriate if self.conversation: linecount = 1 for line in reversed(self.conversation): linecount += 1 self.data['vText' + str(linecount)] = line if linecount == 8: break # Generate the token enc_data = urlencode(self.data) digest_txt = enc_data[9:35] token = hashlib.md5(digest_txt.encode('utf-8')).hexdigest() self.data['icognocheck'] = token # POST the data to Cleverbot's API and return return self.session.post(Cleverbot.API_URL, data=self.data, headers=Cleverbot.headers)
def _send(self): """POST the user's question and all required information to the Cleverbot API Cleverbot tries to prevent unauthorized access to its API by obfuscating how it generates the 'icognocheck' token. The token is currently the md5 checksum of the 10th through 36th characters of the encoded data. This may change in the future. TODO: Order is not guaranteed when urlencoding dicts. This hasn't been a problem yet, but let's look into ordered dicts or tuples instead. """ # Set data as appropriate if self.conversation: linecount = 1 for line in reversed(self.conversation): linecount += 1 self.data['vText' + str(linecount)] = line if linecount == 8: break # Generate the token enc_data = urlencode(self.data) digest_txt = enc_data[9:35] token = hashlib.md5(digest_txt.encode('utf-8')).hexdigest() self.data['icognocheck'] = token # POST the data to Cleverbot's API and return return self.session.post(Cleverbot.API_URL, data=self.data, headers=Cleverbot.headers)
Python
def compare(self, other: "GKeepTodoItem", ignore_keys: Sequence[str] = []) -> bool: """Compare two items, return True if they are considered equal.""" for key in self._key_names: if key in ignore_keys: continue elif key in self._date_key_names: if not is_same_datetime( self[key], other[key], tol=datetime.timedelta(minutes=10) ): logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{self}\n\nItem2\n\n{other}\n\nKey" f" [{key}] is different - [{repr(self[key])}] | [{repr(other[key])}]" ) return False else: if self[key] != other[key]: logger.opt(lazy=True).trace(f"Items differ [{key}]\n\n{self}\n\n{other}") return False return True
def compare(self, other: "GKeepTodoItem", ignore_keys: Sequence[str] = []) -> bool: """Compare two items, return True if they are considered equal.""" for key in self._key_names: if key in ignore_keys: continue elif key in self._date_key_names: if not is_same_datetime( self[key], other[key], tol=datetime.timedelta(minutes=10) ): logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{self}\n\nItem2\n\n{other}\n\nKey" f" [{key}] is different - [{repr(self[key])}] | [{repr(other[key])}]" ) return False else: if self[key] != other[key]: logger.opt(lazy=True).trace(f"Items differ [{key}]\n\n{self}\n\n{other}") return False return True
Python
def add_todo_block(self, title: str, checked: bool = False) -> NotionTodoBlock: """Create a new TODO block with the given title.""" new_block = { "object": "block", "type": "to_do", "to_do": { "text": [{"type": "text", "text": {"content": title}}], "checked": checked, }, } raw_item = self._client.blocks.children.append( block_id=self._page_id, children=[new_block] ) return NotionTodoBlock.from_raw_item(raw_item)
def add_todo_block(self, title: str, checked: bool = False) -> NotionTodoBlock: """Create a new TODO block with the given title.""" new_block = { "object": "block", "type": "to_do", "to_do": { "text": [{"type": "text", "text": {"content": title}}], "checked": checked, }, } raw_item = self._client.blocks.children.append( block_id=self._page_id, children=[new_block] ) return NotionTodoBlock.from_raw_item(raw_item)
Python
def _parse_gcal_item_desc( gcal_item: Item, ) -> Tuple[List[str], str, Optional[UUID]]: """Parse and return the necessary TW fields off a Google Calendar Item.""" annotations: List[str] = [] status = "pending" uuid = None if "description" not in gcal_item.keys(): return annotations, status, uuid gcal_desc = gcal_item["description"] # strip whitespaces, empty lines lines = [line.strip() for line in gcal_desc.split("\n") if line][1:] # annotations i = 0 for i, line in enumerate(lines): parts = line.split(":", maxsplit=1) if len(parts) == 2 and parts[0].lower().startswith("* annotation"): annotations.append(parts[1].strip()) else: break if i == len(lines) - 1: return annotations, status, uuid # Iterate through rest of lines, find only the status and uuid ones for line in lines[i:]: parts = line.split(":", maxsplit=1) if len(parts) == 2: start = parts[0].lower() if start.startswith("* status"): status = parts[1].strip().lower() elif start.startswith("* uuid"): try: uuid = UUID(parts[1].strip()) except ValueError as err: logger.error( f'Invalid UUID "{err}" provided during GCal -> TW conversion,' f" Using None...\n\n{traceback.format_exc()}" ) return annotations, status, uuid
def _parse_gcal_item_desc( gcal_item: Item, ) -> Tuple[List[str], str, Optional[UUID]]: """Parse and return the necessary TW fields off a Google Calendar Item.""" annotations: List[str] = [] status = "pending" uuid = None if "description" not in gcal_item.keys(): return annotations, status, uuid gcal_desc = gcal_item["description"] # strip whitespaces, empty lines lines = [line.strip() for line in gcal_desc.split("\n") if line][1:] # annotations i = 0 for i, line in enumerate(lines): parts = line.split(":", maxsplit=1) if len(parts) == 2 and parts[0].lower().startswith("* annotation"): annotations.append(parts[1].strip()) else: break if i == len(lines) - 1: return annotations, status, uuid # Iterate through rest of lines, find only the status and uuid ones for line in lines[i:]: parts = line.split(":", maxsplit=1) if len(parts) == 2: start = parts[0].lower() if start.startswith("* status"): status = parts[1].strip().lower() elif start.startswith("* uuid"): try: uuid = UUID(parts[1].strip()) except ValueError as err: logger.error( f'Invalid UUID "{err}" provided during GCal -> TW conversion,' f" Using None...\n\n{traceback.format_exc()}" ) return annotations, status, uuid
Python
def main( gcal_calendar: str, google_secret: str, oauth_port: int, tw_tags: List[str], tw_project: str, resolution_strategy: str, verbose: int, combination_name: str, custom_combination_savename: str, do_list_combinations: bool, ): """Synchronize calendars from your Google Calendar with filters from Taskwarrior. The list of TW tasks is determined by a combination of TW tags and a TW project while the calendar in GCal should be provided by their name. if it doesn't exist it will be crated """ # setup logger ---------------------------------------------------------------------------- loguru_tqdm_sink(verbosity=verbose) log_to_syslog(name="tw_gcal_sync") logger.debug("Initialising...") inform_about_config = False if do_list_combinations: list_named_combinations(config_fname="tw_gcal_configs") return 0 # cli validation -------------------------------------------------------------------------- check_optional_mutually_exclusive(combination_name, custom_combination_savename) combination_of_tw_project_tags_and_gcal_calendar = any( [ tw_project, tw_tags, gcal_calendar, ] ) check_optional_mutually_exclusive( combination_name, combination_of_tw_project_tags_and_gcal_calendar ) # existing combination name is provided --------------------------------------------------- if combination_name is not None: app_config = fetch_app_configuration( config_fname="tw_gcal_configs", combination=combination_name ) tw_tags = app_config["tw_tags"] tw_project = app_config["tw_project"] gcal_calendar = app_config["gcal_calendar"] # combination manually specified ---------------------------------------------------------- else: inform_about_config = True combination_name = cache_or_reuse_cached_combination( config_args={ "gcal_calendar": gcal_calendar, "tw_project": tw_project, "tw_tags": tw_tags, }, config_fname="tw_gcal_configs", custom_combination_savename=custom_combination_savename, ) # at least one of tw_tags, tw_project should be set --------------------------------------- if not tw_tags and not tw_project: raise RuntimeError( "You have to provide at least one valid tag or a valid project ID to use for" " the synchronization" ) # announce configuration ------------------------------------------------------------------ logger.info( format_dict( header="Configuration", items={ "TW Tags": tw_tags, "TW Project": tw_project, "Google Calendar": gcal_calendar, }, prefix="\n\n", suffix="\n", ) ) # initialize sides ------------------------------------------------------------------------ tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project) gcal_side = GCalSide( calendar_summary=gcal_calendar, oauth_port=oauth_port, client_secret=google_secret ) # sync ------------------------------------------------------------------------------------ try: with Aggregator( side_A=gcal_side, side_B=tw_side, converter_B_to_A=convert_tw_to_gcal, converter_A_to_B=convert_gcal_to_tw, resolution_strategy=get_resolution_strategy( resolution_strategy, side_A_type=type(gcal_side), side_B_type=type(tw_side) ), config_fname=combination_name, ignore_keys=( (), ("due", "end", "entry", "modified", "urgency"), ), ) as aggregator: aggregator.sync() except KeyboardInterrupt: logger.error("Exiting...") return 1 except: report_toplevel_exception(is_verbose=verbose >= 1) return 1 if inform_about_config: inform_about_combination_name_usage(combination_name) return 0
def main( gcal_calendar: str, google_secret: str, oauth_port: int, tw_tags: List[str], tw_project: str, resolution_strategy: str, verbose: int, combination_name: str, custom_combination_savename: str, do_list_combinations: bool, ): """Synchronize calendars from your Google Calendar with filters from Taskwarrior. The list of TW tasks is determined by a combination of TW tags and a TW project while the calendar in GCal should be provided by their name. if it doesn't exist it will be crated """ # setup logger ---------------------------------------------------------------------------- loguru_tqdm_sink(verbosity=verbose) log_to_syslog(name="tw_gcal_sync") logger.debug("Initialising...") inform_about_config = False if do_list_combinations: list_named_combinations(config_fname="tw_gcal_configs") return 0 # cli validation -------------------------------------------------------------------------- check_optional_mutually_exclusive(combination_name, custom_combination_savename) combination_of_tw_project_tags_and_gcal_calendar = any( [ tw_project, tw_tags, gcal_calendar, ] ) check_optional_mutually_exclusive( combination_name, combination_of_tw_project_tags_and_gcal_calendar ) # existing combination name is provided --------------------------------------------------- if combination_name is not None: app_config = fetch_app_configuration( config_fname="tw_gcal_configs", combination=combination_name ) tw_tags = app_config["tw_tags"] tw_project = app_config["tw_project"] gcal_calendar = app_config["gcal_calendar"] # combination manually specified ---------------------------------------------------------- else: inform_about_config = True combination_name = cache_or_reuse_cached_combination( config_args={ "gcal_calendar": gcal_calendar, "tw_project": tw_project, "tw_tags": tw_tags, }, config_fname="tw_gcal_configs", custom_combination_savename=custom_combination_savename, ) # at least one of tw_tags, tw_project should be set --------------------------------------- if not tw_tags and not tw_project: raise RuntimeError( "You have to provide at least one valid tag or a valid project ID to use for" " the synchronization" ) # announce configuration ------------------------------------------------------------------ logger.info( format_dict( header="Configuration", items={ "TW Tags": tw_tags, "TW Project": tw_project, "Google Calendar": gcal_calendar, }, prefix="\n\n", suffix="\n", ) ) # initialize sides ------------------------------------------------------------------------ tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project) gcal_side = GCalSide( calendar_summary=gcal_calendar, oauth_port=oauth_port, client_secret=google_secret ) # sync ------------------------------------------------------------------------------------ try: with Aggregator( side_A=gcal_side, side_B=tw_side, converter_B_to_A=convert_tw_to_gcal, converter_A_to_B=convert_gcal_to_tw, resolution_strategy=get_resolution_strategy( resolution_strategy, side_A_type=type(gcal_side), side_B_type=type(tw_side) ), config_fname=combination_name, ignore_keys=( (), ("due", "end", "entry", "modified", "urgency"), ), ) as aggregator: aggregator.sync() except KeyboardInterrupt: logger.error("Exiting...") return 1 except: report_toplevel_exception(is_verbose=verbose >= 1) return 1 if inform_about_config: inform_about_combination_name_usage(combination_name) return 0
Python
def main( notion_page_id: str, tw_tags: List[str], tw_project: str, token_pass_path: str, resolution_strategy: str, verbose: int, combination_name: str, custom_combination_savename: str, do_list_combinations: bool, ): """Synchronise filters of TW tasks with the to_do items of Notion pages The list of TW tasks is determined by a combination of TW tags and TW project while the notion pages should be provided by their URLs. """ # setup logger ---------------------------------------------------------------------------- loguru_tqdm_sink(verbosity=verbose) log_to_syslog(name="tw_notion_sync") logger.debug("Initialising...") inform_about_config = False if do_list_combinations: list_named_combinations(config_fname="tw_notion_configs") return 0 # cli validation -------------------------------------------------------------------------- check_optional_mutually_exclusive(combination_name, custom_combination_savename) combination_of_tw_project_tags_and_notion_page = any( [ tw_project, tw_tags, notion_page_id, ] ) check_optional_mutually_exclusive( combination_name, combination_of_tw_project_tags_and_notion_page ) # existing combination name is provided --------------------------------------------------- if combination_name is not None: app_config = fetch_app_configuration( config_fname="tw_notion_configs", combination=combination_name ) tw_tags = app_config["tw_tags"] tw_project = app_config["tw_project"] notion_page_id = app_config["notion_page_id"] # combination manually specified ---------------------------------------------------------- else: inform_about_config = True combination_name = cache_or_reuse_cached_combination( config_args={ "notion_page_id": notion_page_id, "tw_project": tw_project, "tw_tags": tw_tags, }, config_fname="tw_notion_configs", custom_combination_savename=custom_combination_savename, ) # at least one of tw_tags, tw_project should be set --------------------------------------- if not tw_tags and not tw_project: raise RuntimeError( "You have to provide at least one valid tag or a valid project ID to use for" " the synchronization" ) # announce configuration ------------------------------------------------------------------ logger.info( format_dict( header="Configuration", items={ "TW Tags": tw_tags, "TW Project": tw_project, "Notion Page ID": notion_page_id, }, prefix="\n\n", suffix="\n", ) ) # find token to connect to notion --------------------------------------------------------- token_v2 = os.environ.get("NOTION_API_KEY") if token_v2 is not None: logger.debug("Reading the Notion API key from environment variable...") else: token_v2 = fetch_from_pass_manager(token_pass_path) assert token_v2 # initialize taskwarrior ------------------------------------------------------------------ tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project) # initialize notion ----------------------------------------------------------------------- # client is a bit too verbose by default. client_verbosity = max(verbose - 1, 0) client = Client( auth=token_v2, log_level=verbosity_int_to_std_logging_lvl(client_verbosity) ) notion_side = NotionSide(client=client, page_id=notion_page_id) # sync ------------------------------------------------------------------------------------ try: with Aggregator( side_A=notion_side, side_B=tw_side, converter_B_to_A=convert_tw_to_notion, converter_A_to_B=convert_notion_to_tw, resolution_strategy=get_resolution_strategy( resolution_strategy, side_A_type=type(notion_side), side_B_type=type(tw_side) ), config_fname=combination_name, ignore_keys=( ("last_modified_date",), ("due", "end", "entry", "modified", "urgency"), ), ) as aggregator: aggregator.sync() except KeyboardInterrupt: logger.error("Exiting...") return 1 except: report_toplevel_exception(is_verbose=verbose >= 1) return 1 if inform_about_config: inform_about_combination_name_usage(combination_name) return 0
def main( notion_page_id: str, tw_tags: List[str], tw_project: str, token_pass_path: str, resolution_strategy: str, verbose: int, combination_name: str, custom_combination_savename: str, do_list_combinations: bool, ): """Synchronise filters of TW tasks with the to_do items of Notion pages The list of TW tasks is determined by a combination of TW tags and TW project while the notion pages should be provided by their URLs. """ # setup logger ---------------------------------------------------------------------------- loguru_tqdm_sink(verbosity=verbose) log_to_syslog(name="tw_notion_sync") logger.debug("Initialising...") inform_about_config = False if do_list_combinations: list_named_combinations(config_fname="tw_notion_configs") return 0 # cli validation -------------------------------------------------------------------------- check_optional_mutually_exclusive(combination_name, custom_combination_savename) combination_of_tw_project_tags_and_notion_page = any( [ tw_project, tw_tags, notion_page_id, ] ) check_optional_mutually_exclusive( combination_name, combination_of_tw_project_tags_and_notion_page ) # existing combination name is provided --------------------------------------------------- if combination_name is not None: app_config = fetch_app_configuration( config_fname="tw_notion_configs", combination=combination_name ) tw_tags = app_config["tw_tags"] tw_project = app_config["tw_project"] notion_page_id = app_config["notion_page_id"] # combination manually specified ---------------------------------------------------------- else: inform_about_config = True combination_name = cache_or_reuse_cached_combination( config_args={ "notion_page_id": notion_page_id, "tw_project": tw_project, "tw_tags": tw_tags, }, config_fname="tw_notion_configs", custom_combination_savename=custom_combination_savename, ) # at least one of tw_tags, tw_project should be set --------------------------------------- if not tw_tags and not tw_project: raise RuntimeError( "You have to provide at least one valid tag or a valid project ID to use for" " the synchronization" ) # announce configuration ------------------------------------------------------------------ logger.info( format_dict( header="Configuration", items={ "TW Tags": tw_tags, "TW Project": tw_project, "Notion Page ID": notion_page_id, }, prefix="\n\n", suffix="\n", ) ) # find token to connect to notion --------------------------------------------------------- token_v2 = os.environ.get("NOTION_API_KEY") if token_v2 is not None: logger.debug("Reading the Notion API key from environment variable...") else: token_v2 = fetch_from_pass_manager(token_pass_path) assert token_v2 # initialize taskwarrior ------------------------------------------------------------------ tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project) # initialize notion ----------------------------------------------------------------------- # client is a bit too verbose by default. client_verbosity = max(verbose - 1, 0) client = Client( auth=token_v2, log_level=verbosity_int_to_std_logging_lvl(client_verbosity) ) notion_side = NotionSide(client=client, page_id=notion_page_id) # sync ------------------------------------------------------------------------------------ try: with Aggregator( side_A=notion_side, side_B=tw_side, converter_B_to_A=convert_tw_to_notion, converter_A_to_B=convert_notion_to_tw, resolution_strategy=get_resolution_strategy( resolution_strategy, side_A_type=type(notion_side), side_B_type=type(tw_side) ), config_fname=combination_name, ignore_keys=( ("last_modified_date",), ("due", "end", "entry", "modified", "urgency"), ), ) as aggregator: aggregator.sync() except KeyboardInterrupt: logger.error("Exiting...") return 1 except: report_toplevel_exception(is_verbose=verbose >= 1) return 1 if inform_about_config: inform_about_combination_name_usage(combination_name) return 0
Python
def compare(self, other: "NotionTodoBlock", ignore_keys: Sequence[str] = []) -> bool: """Compare two items, return True if they are considered equal.""" for key in self._key_names: if key in ignore_keys: continue elif key in self._date_key_names: if not is_same_datetime( self[key], other[key], tol=datetime.timedelta(minutes=10) ): logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{self}\n\nItem2\n\n{other}\n\nKey" f" [{key}] is different - [{repr(self[key])}] | [{repr(other[key])}]" ) return False else: if self[key] != other[key]: logger.opt(lazy=True).trace(f"Items differ [{key}]\n\n{self}\n\n{other}") return False return True
def compare(self, other: "NotionTodoBlock", ignore_keys: Sequence[str] = []) -> bool: """Compare two items, return True if they are considered equal.""" for key in self._key_names: if key in ignore_keys: continue elif key in self._date_key_names: if not is_same_datetime( self[key], other[key], tol=datetime.timedelta(minutes=10) ): logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{self}\n\nItem2\n\n{other}\n\nKey" f" [{key}] is different - [{repr(self[key])}] | [{repr(other[key])}]" ) return False else: if self[key] != other[key]: logger.opt(lazy=True).trace(f"Items differ [{key}]\n\n{self}\n\n{other}") return False return True
Python
def from_raw_item(cls, block_item: NotionTodoBlockItem) -> "NotionTodoBlock": """Create a NotionTodoBlock given the raw item at hand.""" assert "archived" in block_item assert "id" in block_item assert "last_edited_time" in block_item assert "object" in block_item assert block_item["object"] == "block" if "to_do" not in block_item: logger.exception("This is not a to_do block") raise RuntimeError id_ = block_item["id"] is_archived = block_item["archived"] is_checked = block_item["to_do"]["checked"] last_modified_date = parse_datetime(block_item["last_edited_time"]) plaintext = cls.get_plaintext(todo_section=block_item["to_do"]) return NotionTodoBlock( is_archived=is_archived, is_checked=is_checked, last_modified_date=last_modified_date, plaintext=plaintext, id=id_, )
def from_raw_item(cls, block_item: NotionTodoBlockItem) -> "NotionTodoBlock": """Create a NotionTodoBlock given the raw item at hand.""" assert "archived" in block_item assert "id" in block_item assert "last_edited_time" in block_item assert "object" in block_item assert block_item["object"] == "block" if "to_do" not in block_item: logger.exception("This is not a to_do block") raise RuntimeError id_ = block_item["id"] is_archived = block_item["archived"] is_checked = block_item["to_do"]["checked"] last_modified_date = parse_datetime(block_item["last_edited_time"]) plaintext = cls.get_plaintext(todo_section=block_item["to_do"]) return NotionTodoBlock( is_archived=is_archived, is_checked=is_checked, last_modified_date=last_modified_date, plaintext=plaintext, id=id_, )
Python
def _fetch_cal_id(self) -> Optional[str]: """Return the id of the Calendar based on the given Summary. :returns: id or None if that was not found """ res = self._service.calendarList().list().execute() # type: ignore calendars_list: List[dict] = res["items"] matching_calendars = [ c["id"] for c in calendars_list if c["summary"] == self._calendar_summary ] if len(matching_calendars) == 0: return None elif len(matching_calendars) == 1: return cast(str, matching_calendars[0]) else: raise RuntimeError( f'Multiple matching calendars for name -> "{self._calendar_summary}"' )
def _fetch_cal_id(self) -> Optional[str]: """Return the id of the Calendar based on the given Summary. :returns: id or None if that was not found """ res = self._service.calendarList().list().execute() # type: ignore calendars_list: List[dict] = res["items"] matching_calendars = [ c["id"] for c in calendars_list if c["summary"] == self._calendar_summary ] if len(matching_calendars) == 0: return None elif len(matching_calendars) == 1: return cast(str, matching_calendars[0]) else: raise RuntimeError( f'Multiple matching calendars for name -> "{self._calendar_summary}"' )
Python
def _clear_all_calendar_entries(self): """Clear all events from the current calendar.""" # TODO Currently not functional - returning "400 Bad Request" logger.warning(f"Clearing all events from calendar {self._calendar_id}") self._service.calendars().clear(calendarId=self._calendar_id).execute()
def _clear_all_calendar_entries(self): """Clear all events from the current calendar.""" # TODO Currently not functional - returning "400 Bad Request" logger.warning(f"Clearing all events from calendar {self._calendar_id}") self._service.calendars().clear(calendarId=self._calendar_id).execute()
Python
def caplog(_caplog): """ Fixture that forwards loguru's output to std logging's output so that you can use caplog as usual """ class PropagateHandler(logging.Handler): def emit(self, record): logging.getLogger(record.name).handle(record) logger.add(PropagateHandler(), format="{message}") yield _caplog
def caplog(_caplog): """ Fixture that forwards loguru's output to std logging's output so that you can use caplog as usual """ class PropagateHandler(logging.Handler): def emit(self, record): logging.getLogger(record.name).handle(record) logger.add(PropagateHandler(), format="{message}") yield _caplog
Python
def main( gkeep_note: str, gkeep_user_pass_path: str, gkeep_passwd_pass_path: str, tw_tags: Sequence[str], tw_project: str, resolution_strategy: str, verbose: int, combination_name: str, custom_combination_savename: str, do_list_combinations: bool, ): """Synchronize Notes from your Google Keep with filters from Taskwarrior. The list of TW tasks is determined by a combination of TW tags and a TW project while the note in GKeep should be specified using their full name. if it doesn't exist it will be created. This service will create TaskWarrior tasks with the specified filter for each one of the checkboxed items in the specified Google Keep note and will create Google Keep items for each one of the tasks in the Taskwarrior filter. You have to first "Show checkboxes" in the Google Keep Note in order to use it with this service. """ # setup logger ---------------------------------------------------------------------------- loguru_tqdm_sink(verbosity=verbose) log_to_syslog(name="tw_gkeep_sync") logger.debug("Initialising...") inform_about_config = False if do_list_combinations: list_named_combinations(config_fname="tw_gkeep_configs") return 0 # cli validation -------------------------------------------------------------------------- check_optional_mutually_exclusive(combination_name, custom_combination_savename) combination_of_tw_project_tags_and_gkeep_note = any( [ tw_project, tw_tags, gkeep_note, ] ) check_optional_mutually_exclusive( combination_name, combination_of_tw_project_tags_and_gkeep_note ) # existing combination name is provided --------------------------------------------------- if combination_name is not None: app_config = fetch_app_configuration( config_fname="tw_gkeep_configs", combination=combination_name ) tw_tags = app_config["tw_tags"] tw_project = app_config["tw_project"] gkeep_note = app_config["gkeep_note"] # combination manually specified ---------------------------------------------------------- else: inform_about_config = True combination_name = cache_or_reuse_cached_combination( config_args={ "gkeep_note": gkeep_note, "tw_project": tw_project, "tw_tags": tw_tags, }, config_fname="tw_gkeep_configs", custom_combination_savename=custom_combination_savename, ) # at least one of tw_tags, tw_project should be set --------------------------------------- if not tw_tags and not tw_project: raise RuntimeError( "You have to provide at least one valid tag or a valid project ID to use for" " the synchronization" ) # announce configuration ------------------------------------------------------------------ logger.info( format_dict( header="Configuration", items={ "TW Tags": tw_tags, "TW Project": tw_project, "Google Keep Note": gkeep_note, }, prefix="\n\n", suffix="\n", ) ) # initialize sides ------------------------------------------------------------------------ # fetch username gkeep_user = os.environ.get("GKEEP_USERNAME") if gkeep_user is not None: logger.debug("Reading the gkeep username from environment variable...") else: gkeep_user = fetch_from_pass_manager(gkeep_user_pass_path) assert gkeep_user # fetch password gkeep_passwd = os.environ.get("GKEEP_PASSWD") if gkeep_passwd is not None: logger.debug("Reading the gkeep password from environment variable...") else: gkeep_passwd = fetch_from_pass_manager(gkeep_passwd_pass_path) assert gkeep_passwd gkeep_side = GKeepTodoSide( note_title=gkeep_note, gkeep_user=gkeep_user, gkeep_passwd=gkeep_passwd, notes_label="tw_gkeep_sync", ) # initialize taskwarrior ------------------------------------------------------------------ tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project) # sync ------------------------------------------------------------------------------------ try: with Aggregator( side_A=gkeep_side, side_B=tw_side, converter_B_to_A=convert_tw_to_gkeep_todo, converter_A_to_B=convert_gkeep_todo_to_tw, resolution_strategy=get_resolution_strategy( resolution_strategy, side_A_type=type(gkeep_side), side_B_type=type(tw_side) ), config_fname=combination_name, ignore_keys=( (), ("due", "end", "entry", "modified", "urgency"), ), ) as aggregator: aggregator.sync() except KeyboardInterrupt: logger.error("Exiting...") return 1 except: report_toplevel_exception(is_verbose=verbose >= 1) return 1 if inform_about_config: inform_about_combination_name_usage(combination_name) return 0
def main( gkeep_note: str, gkeep_user_pass_path: str, gkeep_passwd_pass_path: str, tw_tags: Sequence[str], tw_project: str, resolution_strategy: str, verbose: int, combination_name: str, custom_combination_savename: str, do_list_combinations: bool, ): """Synchronize Notes from your Google Keep with filters from Taskwarrior. The list of TW tasks is determined by a combination of TW tags and a TW project while the note in GKeep should be specified using their full name. if it doesn't exist it will be created. This service will create TaskWarrior tasks with the specified filter for each one of the checkboxed items in the specified Google Keep note and will create Google Keep items for each one of the tasks in the Taskwarrior filter. You have to first "Show checkboxes" in the Google Keep Note in order to use it with this service. """ # setup logger ---------------------------------------------------------------------------- loguru_tqdm_sink(verbosity=verbose) log_to_syslog(name="tw_gkeep_sync") logger.debug("Initialising...") inform_about_config = False if do_list_combinations: list_named_combinations(config_fname="tw_gkeep_configs") return 0 # cli validation -------------------------------------------------------------------------- check_optional_mutually_exclusive(combination_name, custom_combination_savename) combination_of_tw_project_tags_and_gkeep_note = any( [ tw_project, tw_tags, gkeep_note, ] ) check_optional_mutually_exclusive( combination_name, combination_of_tw_project_tags_and_gkeep_note ) # existing combination name is provided --------------------------------------------------- if combination_name is not None: app_config = fetch_app_configuration( config_fname="tw_gkeep_configs", combination=combination_name ) tw_tags = app_config["tw_tags"] tw_project = app_config["tw_project"] gkeep_note = app_config["gkeep_note"] # combination manually specified ---------------------------------------------------------- else: inform_about_config = True combination_name = cache_or_reuse_cached_combination( config_args={ "gkeep_note": gkeep_note, "tw_project": tw_project, "tw_tags": tw_tags, }, config_fname="tw_gkeep_configs", custom_combination_savename=custom_combination_savename, ) # at least one of tw_tags, tw_project should be set --------------------------------------- if not tw_tags and not tw_project: raise RuntimeError( "You have to provide at least one valid tag or a valid project ID to use for" " the synchronization" ) # announce configuration ------------------------------------------------------------------ logger.info( format_dict( header="Configuration", items={ "TW Tags": tw_tags, "TW Project": tw_project, "Google Keep Note": gkeep_note, }, prefix="\n\n", suffix="\n", ) ) # initialize sides ------------------------------------------------------------------------ # fetch username gkeep_user = os.environ.get("GKEEP_USERNAME") if gkeep_user is not None: logger.debug("Reading the gkeep username from environment variable...") else: gkeep_user = fetch_from_pass_manager(gkeep_user_pass_path) assert gkeep_user # fetch password gkeep_passwd = os.environ.get("GKEEP_PASSWD") if gkeep_passwd is not None: logger.debug("Reading the gkeep password from environment variable...") else: gkeep_passwd = fetch_from_pass_manager(gkeep_passwd_pass_path) assert gkeep_passwd gkeep_side = GKeepTodoSide( note_title=gkeep_note, gkeep_user=gkeep_user, gkeep_passwd=gkeep_passwd, notes_label="tw_gkeep_sync", ) # initialize taskwarrior ------------------------------------------------------------------ tw_side = TaskWarriorSide(tags=tw_tags, project=tw_project) # sync ------------------------------------------------------------------------------------ try: with Aggregator( side_A=gkeep_side, side_B=tw_side, converter_B_to_A=convert_tw_to_gkeep_todo, converter_A_to_B=convert_gkeep_todo_to_tw, resolution_strategy=get_resolution_strategy( resolution_strategy, side_A_type=type(gkeep_side), side_B_type=type(tw_side) ), config_fname=combination_name, ignore_keys=( (), ("due", "end", "entry", "modified", "urgency"), ), ) as aggregator: aggregator.sync() except KeyboardInterrupt: logger.error("Exiting...") return 1 except: report_toplevel_exception(is_verbose=verbose >= 1) return 1 if inform_about_config: inform_about_combination_name_usage(combination_name) return 0
Python
def _get_credentials(self): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. :return: Credentials, the obtained credentials. """ creds = None credentials_cache = self._credentials_cache if credentials_cache.is_file(): with credentials_cache.open("rb") as f: creds = pickle.load(f) if not creds or not creds.valid: logger.debug("Invalid credentials. Fetching again...") if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: client_secret = self._client_secret flow = InstalledAppFlow.from_client_secrets_file(client_secret, self._scopes) try: creds = flow.run_local_server(port=self._oauth_port) except OSError as e: raise RuntimeError( f"Port {self._oauth_port} is already in use, please specify a" " different port or stop the process that's already using it." ) from e # Save the credentials for the next run with credentials_cache.open("wb") as f: pickle.dump(creds, f) else: logger.info("Using already cached credentials...") return creds
def _get_credentials(self): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. :return: Credentials, the obtained credentials. """ creds = None credentials_cache = self._credentials_cache if credentials_cache.is_file(): with credentials_cache.open("rb") as f: creds = pickle.load(f) if not creds or not creds.valid: logger.debug("Invalid credentials. Fetching again...") if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: client_secret = self._client_secret flow = InstalledAppFlow.from_client_secrets_file(client_secret, self._scopes) try: creds = flow.run_local_server(port=self._oauth_port) except OSError as e: raise RuntimeError( f"Port {self._oauth_port} is already in use, please specify a" " different port or stop the process that's already using it." ) from e # Save the credentials for the next run with credentials_cache.open("wb") as f: pickle.dump(creds, f) else: logger.info("Using already cached credentials...") return creds
Python
def delete_single_item(self, item_id: ID): """Delete an item based on the given UUID. .. raises:: Keyerror if item is not found. """ raise NotImplementedError("Should be implemented in derived")
def delete_single_item(self, item_id: ID): """Delete an item based on the given UUID. .. raises:: Keyerror if item is not found. """ raise NotImplementedError("Should be implemented in derived")
Python
def update_item(self, item_id: ID, **changes): """Update with the given item. :param item_id : ID of item to update :param changes: Keyword only parameters that are to change in the item .. warning:: The item must already be present """ raise NotImplementedError("Should be implemented in derived")
def update_item(self, item_id: ID, **changes): """Update with the given item. :param item_id : ID of item to update :param changes: Keyword only parameters that are to change in the item .. warning:: The item must already be present """ raise NotImplementedError("Should be implemented in derived")
Python
def id_key(cls) -> str: """ Key in the dictionary of the added/updated/deleted item that refers to the ID of that Item. """ raise NotImplementedError("Implement in derived")
def id_key(cls) -> str: """ Key in the dictionary of the added/updated/deleted item that refers to the ID of that Item. """ raise NotImplementedError("Implement in derived")
Python
def items_are_identical( cls, item1: ItemType, item2: ItemType, ignore_keys: Sequence[str] = [] ) -> bool: """Determine whether two items are identical. .. returns:: True if items are identical, False otherwise. """ raise NotImplementedError("Implement in derived")
def items_are_identical( cls, item1: ItemType, item2: ItemType, ignore_keys: Sequence[str] = [] ) -> bool: """Determine whether two items are identical. .. returns:: True if items are identical, False otherwise. """ raise NotImplementedError("Implement in derived")
Python
def _items_are_identical(item1: ItemType, item2: ItemType, keys: list) -> bool: """Compare the provided keys of the two given items. Take extra care of the datetime key. """ for k in keys: if k not in item1 and k not in item2: continue if (k in item1 and k not in item2) or (k not in item1 and k in item2): logger.opt(lazy=True).trace( f"Key [{k}] exists in one but not in other\n\n{item1}\n\n{item2}" ) return False if isinstance(item1[k], datetime.datetime) and isinstance( item2[k], datetime.datetime ): if is_same_datetime(item1[k], item2[k], tol=datetime.timedelta(minutes=10)): continue else: logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{item1}\n\nItem2\n\n{item2}" f"\n\nKey [{k}] is different - [{repr(item1[k])}] | [{repr(item2[k])}]" ) return False else: if item1[k] == item2[k]: continue else: logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{item1}\n\nItem2\n\n{item2}" f"\n\nKey [{k}] is different - [{repr(item1[k])}] | [{repr(item2[k])}]" ) return False return True
def _items_are_identical(item1: ItemType, item2: ItemType, keys: list) -> bool: """Compare the provided keys of the two given items. Take extra care of the datetime key. """ for k in keys: if k not in item1 and k not in item2: continue if (k in item1 and k not in item2) or (k not in item1 and k in item2): logger.opt(lazy=True).trace( f"Key [{k}] exists in one but not in other\n\n{item1}\n\n{item2}" ) return False if isinstance(item1[k], datetime.datetime) and isinstance( item2[k], datetime.datetime ): if is_same_datetime(item1[k], item2[k], tol=datetime.timedelta(minutes=10)): continue else: logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{item1}\n\nItem2\n\n{item2}" f"\n\nKey [{k}] is different - [{repr(item1[k])}] | [{repr(item2[k])}]" ) return False else: if item1[k] == item2[k]: continue else: logger.opt(lazy=True).trace( f"\n\nItems differ\n\nItem1\n\n{item1}\n\nItem2\n\n{item2}" f"\n\nKey [{k}] is different - [{repr(item1[k])}] | [{repr(item2[k])}]" ) return False return True
Python
def detect_changes(self, helper: SideHelper, items: Dict[ID, Item]) -> SideChanges: """ Given a fresh list of items from the SyncSide, determine which of them are new, modified, or have been deleted since the last run. """ serdes_dir, _ = self._get_serdes_dirs(helper) logger.info(f"Detecting changes from {helper}...") item_ids = set(items.keys()) # New items exist in the sync side but don't yet exist in my IDs correspndences. new = { item_id for item_id in item_ids if item_id not in self._get_ids_map(helper=helper) } # Deleted items do not exist in the sync side but still yet exist in my IDs # correspndences. # # Exclude the already new ones determined in the earlier step deleted = { registered_id for registered_id in self._get_ids_map(helper=helper) if registered_id not in item_ids.difference(new) } # Potentially modified items are all the items that exist in the sync side minus the # ones already determined as deleted or enw # # For these items, load the cached version and check whether they are the same or not # to actually determine the ones that are changed. modified = set() potentially_modified_ids = item_ids.difference(new.union(deleted)) for item_id in potentially_modified_ids: item = items[item_id] cached_item = pickle_load(serdes_dir / item_id) if self._item_has_update(prev_item=cached_item, new_item=item, helper=helper): modified.add(item_id) side_changes = SideChanges(new=new, modified=modified, deleted=deleted) logger.debug(f"\n\n{side_changes}") return side_changes
def detect_changes(self, helper: SideHelper, items: Dict[ID, Item]) -> SideChanges: """ Given a fresh list of items from the SyncSide, determine which of them are new, modified, or have been deleted since the last run. """ serdes_dir, _ = self._get_serdes_dirs(helper) logger.info(f"Detecting changes from {helper}...") item_ids = set(items.keys()) # New items exist in the sync side but don't yet exist in my IDs correspndences. new = { item_id for item_id in item_ids if item_id not in self._get_ids_map(helper=helper) } # Deleted items do not exist in the sync side but still yet exist in my IDs # correspndences. # # Exclude the already new ones determined in the earlier step deleted = { registered_id for registered_id in self._get_ids_map(helper=helper) if registered_id not in item_ids.difference(new) } # Potentially modified items are all the items that exist in the sync side minus the # ones already determined as deleted or enw # # For these items, load the cached version and check whether they are the same or not # to actually determine the ones that are changed. modified = set() potentially_modified_ids = item_ids.difference(new.union(deleted)) for item_id in potentially_modified_ids: item = items[item_id] cached_item = pickle_load(serdes_dir / item_id) if self._item_has_update(prev_item=cached_item, new_item=item, helper=helper): modified.add(item_id) side_changes = SideChanges(new=new, modified=modified, deleted=deleted) logger.debug(f"\n\n{side_changes}") return side_changes
Python
def inserter_to(self, item: Item, helper: SideHelper) -> ID: """Inserter. Other side already has the item, and I'm also inserting it at this side. """ item_side, _ = self._get_side_instances(helper) serdes_dir, _ = self._get_serdes_dirs(helper) logger.info( f"[{helper.other}] Inserting item [{self._summary_of(item, helper):10}] at" f" {helper}..." ) item_created = item_side.add_item(item) item_created_id = str(item_created[helper.id_key]) # Cache both sides with pickle - f=id_ logger.debug(f'Pickling newly created {helper} item -> "{item_created_id}"') pickle_dump(item_created, serdes_dir / item_created_id) return item_created_id
def inserter_to(self, item: Item, helper: SideHelper) -> ID: """Inserter. Other side already has the item, and I'm also inserting it at this side. """ item_side, _ = self._get_side_instances(helper) serdes_dir, _ = self._get_serdes_dirs(helper) logger.info( f"[{helper.other}] Inserting item [{self._summary_of(item, helper):10}] at" f" {helper}..." ) item_created = item_side.add_item(item) item_created_id = str(item_created[helper.id_key]) # Cache both sides with pickle - f=id_ logger.debug(f'Pickling newly created {helper} item -> "{item_created_id}"') pickle_dump(item_created, serdes_dir / item_created_id) return item_created_id
Python
def _item_has_update(self, prev_item: Item, new_item: Item, helper: SideHelper) -> bool: """Determine whether the item has been updated.""" side, _ = self._get_side_instances(helper) return not side.items_are_identical( prev_item, new_item, ignore_keys=[helper.id_key, *helper.ignore_keys] )
def _item_has_update(self, prev_item: Item, new_item: Item, helper: SideHelper) -> bool: """Determine whether the item has been updated.""" side, _ = self._get_side_instances(helper) return not side.items_are_identical( prev_item, new_item, ignore_keys=[helper.id_key, *helper.ignore_keys] )
Python
def _summary_of(self, item: Item, helper: SideHelper, short=True) -> str: """Get the summary of the given item.""" ret = item[helper.summary_key] if short: return ret[:10] return ret
def _summary_of(self, item: Item, helper: SideHelper, short=True) -> str: """Get the summary of the given item.""" ret = item[helper.summary_key] if short: return ret[:10] return ret
Python
def app_name(): """ Return the name of the application which defines the config, cache, and share directories of this app. """ if "TASKWARRIOR_SYNCALL_TESTENV" in os.environ: return "test_taskwarrior_syncall" else: return "taskwarrior_syncall"
def app_name(): """ Return the name of the application which defines the config, cache, and share directories of this app. """ if "TASKWARRIOR_SYNCALL_TESTENV" in os.environ: return "test_taskwarrior_syncall" else: return "taskwarrior_syncall"
Python
def list_named_combinations(config_fname: str) -> None: """List the named configurations currently available for the given configuration name. Mainly used by the top-level synchronization apps. """ logger.success( format_list( header="\n\nNamed configurations currently available", items=get_named_combinations(config_fname=config_fname), ) )
def list_named_combinations(config_fname: str) -> None: """List the named configurations currently available for the given configuration name. Mainly used by the top-level synchronization apps. """ logger.success( format_list( header="\n\nNamed configurations currently available", items=get_named_combinations(config_fname=config_fname), ) )
Python
def fetch_app_configuration(config_fname: str, combination: str) -> Mapping[str, Any]: """ Fetch the configuration of a top-level synchronization app. This function is useful for parsing a previously cached configuration of a synchronization app. The configuration file is managed by a bubop.PrefsManager instance and the configuration of this particular combination is contained under the specified `combination`. It will check whether the configuration file at hand exist and will also give meaningful errors to the user if the configuration file does not contain the said combination. """ with PrefsManager(app_name=app_name(), config_fname=config_fname) as prefs_manager: if combination not in prefs_manager: # config not found ---------------------------------------------------------------- existing_keys = prefs_manager.keys() raise RuntimeError( format_list( header="\n\nNo such configuration found - existing configurations are", items=existing_keys, ) ) # config combination found ------------------------------------------------------------ logger.info(f"\n\nLoading configuration - {prefs_manager.config_file}.{combination}") return prefs_manager[combination]
def fetch_app_configuration(config_fname: str, combination: str) -> Mapping[str, Any]: """ Fetch the configuration of a top-level synchronization app. This function is useful for parsing a previously cached configuration of a synchronization app. The configuration file is managed by a bubop.PrefsManager instance and the configuration of this particular combination is contained under the specified `combination`. It will check whether the configuration file at hand exist and will also give meaningful errors to the user if the configuration file does not contain the said combination. """ with PrefsManager(app_name=app_name(), config_fname=config_fname) as prefs_manager: if combination not in prefs_manager: # config not found ---------------------------------------------------------------- existing_keys = prefs_manager.keys() raise RuntimeError( format_list( header="\n\nNo such configuration found - existing configurations are", items=existing_keys, ) ) # config combination found ------------------------------------------------------------ logger.info(f"\n\nLoading configuration - {prefs_manager.config_file}.{combination}") return prefs_manager[combination]
Python
def cache_or_reuse_cached_combination( config_args: Mapping[str, Any], config_fname: str, custom_combination_savename: Optional[str], ): """ App utility function that either retrieves the configuration for the app at hand based on the given arguments or retrieves it based on the custom configuration name specified. """ if custom_combination_savename is None: config_name = get_config_name_for_args(*config_args.values()) else: config_name = custom_combination_savename # see if this combination corresponds to an already existing configuration ----------------- with PrefsManager(app_name=app_name(), config_fname=config_fname) as prefs_manager: config_exists = config_name in prefs_manager if config_exists: logger.debug(f"Loading cached configuration - {config_name}") else: # does not correspond to an existing configuration ------------------------------------ # assemble and cache it. with PrefsManager(app_name=app_name(), config_fname=config_fname) as prefs_manager: logger.info(f"Caching this configuration under the name - {config_name}...") prefs_manager[config_name] = {**config_args} return config_name
def cache_or_reuse_cached_combination( config_args: Mapping[str, Any], config_fname: str, custom_combination_savename: Optional[str], ): """ App utility function that either retrieves the configuration for the app at hand based on the given arguments or retrieves it based on the custom configuration name specified. """ if custom_combination_savename is None: config_name = get_config_name_for_args(*config_args.values()) else: config_name = custom_combination_savename # see if this combination corresponds to an already existing configuration ----------------- with PrefsManager(app_name=app_name(), config_fname=config_fname) as prefs_manager: config_exists = config_name in prefs_manager if config_exists: logger.debug(f"Loading cached configuration - {config_name}") else: # does not correspond to an existing configuration ------------------------------------ # assemble and cache it. with PrefsManager(app_name=app_name(), config_fname=config_fname) as prefs_manager: logger.info(f"Caching this configuration under the name - {config_name}...") prefs_manager[config_name] = {**config_args} return config_name
Python
def inform_about_combination_name_usage(combination_name: str): """Inform the user about the use of the flag for referring to a saved combination.""" exec_name = Path(sys.argv[0]).stem logger.success( "Sync completed successfully. You can now use the" f' {"/".join(COMBINATION_FLAGS)} option to refer to this particular combination\n\n ' f" {exec_name} {COMBINATION_FLAGS[1]} {combination_name}" )
def inform_about_combination_name_usage(combination_name: str): """Inform the user about the use of the flag for referring to a saved combination.""" exec_name = Path(sys.argv[0]).stem logger.success( "Sync completed successfully. You can now use the" f' {"/".join(COMBINATION_FLAGS)} option to refer to this particular combination\n\n ' f" {exec_name} {COMBINATION_FLAGS[1]} {combination_name}" )