language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def s3_assign_role(self, user_id, group_id, for_pe=None, system=False): """ Assigns a role to a user (add the user to a user group) Args: user_id: the record ID of the user account group_id: the record ID(s)/UID(s) of the group for_pe: the person entity (pe_id) to restrict the group membership to, possible values: - None: use default realm (entities the user is affiliated with) - 0: site-wide realm (no entity-restriction) - X: restrict to records owned by entity X system: set the system-flag for any new role assignments Notes: - strings are assumed to be group UIDs - for_pe will be ignored for ADMIN, ANONYMOUS and AUTHENTICATED """ db = current.db gtable = self.settings.table_group mtable = self.settings.table_membership # Find the group IDs query = None uuids = None if isinstance(group_id, (list, tuple)): if isinstance(group_id[0], str): uuids = group_id query = (gtable.uuid.belongs(group_id)) else: group_ids = group_id elif isinstance(group_id, str) and not group_id.isdigit(): uuids = [group_id] query = (gtable.uuid == group_id) else: group_ids = [group_id] if query is not None: query = (gtable.deleted == False) & query groups = db(query).select(gtable.id, gtable.uuid) group_ids = [g.id for g in groups] missing = [uuid for uuid in uuids if uuid not in [g.uuid for g in groups]] for m in missing: group_id = self.s3_create_role(m, uid=m) if group_id: group_ids.append(group_id) # Find the assigned groups query = (mtable.deleted == False) & \ (mtable.user_id == user_id) & \ (mtable.group_id.belongs(group_ids) & \ (mtable.pe_id == for_pe)) assigned = db(query).select(mtable.group_id) assigned_groups = [g.group_id for g in assigned] # Add missing memberships sr = self.get_system_roles() unrestrictable = [str(sr.ADMIN), str(sr.ANONYMOUS), str(sr.AUTHENTICATED), ] for gid in group_ids: if gid not in assigned_groups: membership = {"user_id": user_id, "group_id": gid, "system": system, } if for_pe is not None and str(gid) not in unrestrictable: membership["pe_id"] = for_pe mtable.insert(**membership) # Update roles for current user if required if self.user and str(user_id) == str(self.user.id): self.s3_set_roles()
def s3_assign_role(self, user_id, group_id, for_pe=None, system=False): """ Assigns a role to a user (add the user to a user group) Args: user_id: the record ID of the user account group_id: the record ID(s)/UID(s) of the group for_pe: the person entity (pe_id) to restrict the group membership to, possible values: - None: use default realm (entities the user is affiliated with) - 0: site-wide realm (no entity-restriction) - X: restrict to records owned by entity X system: set the system-flag for any new role assignments Notes: - strings are assumed to be group UIDs - for_pe will be ignored for ADMIN, ANONYMOUS and AUTHENTICATED """ db = current.db gtable = self.settings.table_group mtable = self.settings.table_membership # Find the group IDs query = None uuids = None if isinstance(group_id, (list, tuple)): if isinstance(group_id[0], str): uuids = group_id query = (gtable.uuid.belongs(group_id)) else: group_ids = group_id elif isinstance(group_id, str) and not group_id.isdigit(): uuids = [group_id] query = (gtable.uuid == group_id) else: group_ids = [group_id] if query is not None: query = (gtable.deleted == False) & query groups = db(query).select(gtable.id, gtable.uuid) group_ids = [g.id for g in groups] missing = [uuid for uuid in uuids if uuid not in [g.uuid for g in groups]] for m in missing: group_id = self.s3_create_role(m, uid=m) if group_id: group_ids.append(group_id) # Find the assigned groups query = (mtable.deleted == False) & \ (mtable.user_id == user_id) & \ (mtable.group_id.belongs(group_ids) & \ (mtable.pe_id == for_pe)) assigned = db(query).select(mtable.group_id) assigned_groups = [g.group_id for g in assigned] # Add missing memberships sr = self.get_system_roles() unrestrictable = [str(sr.ADMIN), str(sr.ANONYMOUS), str(sr.AUTHENTICATED), ] for gid in group_ids: if gid not in assigned_groups: membership = {"user_id": user_id, "group_id": gid, "system": system, } if for_pe is not None and str(gid) not in unrestrictable: membership["pe_id"] = for_pe mtable.insert(**membership) # Update roles for current user if required if self.user and str(user_id) == str(self.user.id): self.s3_set_roles()
Python
def s3_remove_role(self, user_id, group_id, for_pe=None): """ Removes a role assignment from a user account Args: user_id: the record ID of the user account group_id: the record ID(s)/UID(s) of the role for_pe: only remove the group membership for this realm, possible values: - None: only remove for the default realm - 0: only remove for the site-wide realm - X: only remove for entity X - []: remove for any realms Note: strings are assumed to be role UIDs """ if not group_id: return db = current.db gtable = self.settings.table_group mtable = self.settings.table_membership # Find the group IDs query = None if isinstance(group_id, (list, tuple)): if isinstance(group_id[0], str): query = (gtable.uuid.belongs(group_id)) else: group_ids = group_id elif isinstance(group_id, str): query = (gtable.uuid == group_id) else: group_ids = [group_id] if query is not None: query = (gtable.deleted == False) & query groups = db(query).select(gtable.id) group_ids = [g.id for g in groups] # Get the assigned groups query = (mtable.deleted == False) & \ (mtable.user_id == user_id) & \ (mtable.group_id.belongs(group_ids)) sr = self.get_system_roles() unrestrictable = [str(sr.ADMIN), str(sr.ANONYMOUS), str(sr.AUTHENTICATED)] if for_pe != []: query &= ((mtable.pe_id == for_pe) | \ (mtable.group_id.belongs(unrestrictable))) memberships = db(query).select() # Archive the memberships for m in memberships: deleted_fk = {"user_id": m.user_id, "group_id": m.group_id} if for_pe: deleted_fk["pe_id"] = for_pe deleted_fk = json.dumps(deleted_fk) m.update_record(deleted = True, deleted_fk = deleted_fk, user_id = None, group_id = None) # Update roles for current user if required if self.user and str(user_id) == str(self.user.id): self.s3_set_roles()
def s3_remove_role(self, user_id, group_id, for_pe=None): """ Removes a role assignment from a user account Args: user_id: the record ID of the user account group_id: the record ID(s)/UID(s) of the role for_pe: only remove the group membership for this realm, possible values: - None: only remove for the default realm - 0: only remove for the site-wide realm - X: only remove for entity X - []: remove for any realms Note: strings are assumed to be role UIDs """ if not group_id: return db = current.db gtable = self.settings.table_group mtable = self.settings.table_membership # Find the group IDs query = None if isinstance(group_id, (list, tuple)): if isinstance(group_id[0], str): query = (gtable.uuid.belongs(group_id)) else: group_ids = group_id elif isinstance(group_id, str): query = (gtable.uuid == group_id) else: group_ids = [group_id] if query is not None: query = (gtable.deleted == False) & query groups = db(query).select(gtable.id) group_ids = [g.id for g in groups] # Get the assigned groups query = (mtable.deleted == False) & \ (mtable.user_id == user_id) & \ (mtable.group_id.belongs(group_ids)) sr = self.get_system_roles() unrestrictable = [str(sr.ADMIN), str(sr.ANONYMOUS), str(sr.AUTHENTICATED)] if for_pe != []: query &= ((mtable.pe_id == for_pe) | \ (mtable.group_id.belongs(unrestrictable))) memberships = db(query).select() # Archive the memberships for m in memberships: deleted_fk = {"user_id": m.user_id, "group_id": m.group_id} if for_pe: deleted_fk["pe_id"] = for_pe deleted_fk = json.dumps(deleted_fk) m.update_record(deleted = True, deleted_fk = deleted_fk, user_id = None, group_id = None) # Update roles for current user if required if self.user and str(user_id) == str(self.user.id): self.s3_set_roles()
Python
def s3_get_roles(self, user_id, for_pe=DEFAULT): """ Lookup all roles which have been assigned to user for an entity Args: user_id: the user_id for_pe: the entity (pe_id) or list of entities """ if not user_id: return [] mtable = self.settings.table_membership query = (mtable.deleted == False) & \ (mtable.user_id == user_id) if isinstance(for_pe, (list, tuple)): if len(for_pe): query &= (mtable.pe_id.belongs(for_pe)) elif for_pe is not DEFAULT: query &= (mtable.pe_id == for_pe) rows = current.db(query).select(mtable.group_id) return list({row.group_id for row in rows})
def s3_get_roles(self, user_id, for_pe=DEFAULT): """ Lookup all roles which have been assigned to user for an entity Args: user_id: the user_id for_pe: the entity (pe_id) or list of entities """ if not user_id: return [] mtable = self.settings.table_membership query = (mtable.deleted == False) & \ (mtable.user_id == user_id) if isinstance(for_pe, (list, tuple)): if len(for_pe): query &= (mtable.pe_id.belongs(for_pe)) elif for_pe is not DEFAULT: query &= (mtable.pe_id == for_pe) rows = current.db(query).select(mtable.group_id) return list({row.group_id for row in rows})
Python
def s3_has_role(self, role, for_pe=None, include_admin=True): """ Check whether the currently logged-in user has a certain role (auth_group membership). Args: role: the record ID or UID of the role for_pe: check for this particular realm, possible values: - None: for any entity - 0: site-wide - X: for entity X include_admin: ADMIN matches all Roles """ # Allow override if self.override: return True system_roles = self.get_system_roles() if role == system_roles.ANONYMOUS: # All users have the anonymous role return True s3 = current.session.s3 # Trigger HTTP basic auth self.s3_logged_in() # Get the realms if not s3: return False realms = None if self.user: realms = self.user.realms elif s3.roles: realms = Storage([(r, None) for r in s3.roles]) if not realms: return False # Administrators have all roles if include_admin and system_roles.ADMIN in realms: return True # Resolve role ID/UID if isinstance(role, str): if role.isdigit(): role = int(role) elif role in system_roles: role = system_roles[role] else: gtable = self.settings.table_group query = (gtable.uuid == role) & \ (gtable.deleted == False) row = current.db(query).select(gtable.id, cache = (current.cache.ram, 600), limitby = (0, 1), ).first() if row: role = row.id else: return False # Check the realm if role in realms: realm = realms[role] if realm is None or for_pe is None or for_pe in realm: return True return False
def s3_has_role(self, role, for_pe=None, include_admin=True): """ Check whether the currently logged-in user has a certain role (auth_group membership). Args: role: the record ID or UID of the role for_pe: check for this particular realm, possible values: - None: for any entity - 0: site-wide - X: for entity X include_admin: ADMIN matches all Roles """ # Allow override if self.override: return True system_roles = self.get_system_roles() if role == system_roles.ANONYMOUS: # All users have the anonymous role return True s3 = current.session.s3 # Trigger HTTP basic auth self.s3_logged_in() # Get the realms if not s3: return False realms = None if self.user: realms = self.user.realms elif s3.roles: realms = Storage([(r, None) for r in s3.roles]) if not realms: return False # Administrators have all roles if include_admin and system_roles.ADMIN in realms: return True # Resolve role ID/UID if isinstance(role, str): if role.isdigit(): role = int(role) elif role in system_roles: role = system_roles[role] else: gtable = self.settings.table_group query = (gtable.uuid == role) & \ (gtable.deleted == False) row = current.db(query).select(gtable.id, cache = (current.cache.ram, 600), limitby = (0, 1), ).first() if row: role = row.id else: return False # Check the realm if role in realms: realm = realms[role] if realm is None or for_pe is None or for_pe in realm: return True return False
Python
def s3_has_roles(self, roles, for_pe=None, all=False): """ Check whether the currently logged-in user has at least one out of a set of roles (or all of them, with all=True) Args: roles: list|tuple|set of role IDs or UIDs for_pe: check for this particular realm, possible values: None - for any entity 0 - site-wide X - for entity X all: check whether the user has all of the roles """ # Override if self.override or not roles: return True # Get the realms session_s3 = current.session.s3 if not session_s3: return False realms = None if self.user: realms = self.user.realms elif session_s3.roles: realms = Storage([(r, None) for r in session_s3.roles]) if not realms: return False # Administrators have all roles (no need to check) system_roles = self.get_system_roles() if system_roles.ADMIN in realms: return True # Resolve any role UIDs if not isinstance(roles, (tuple, list, set)): roles = [roles] check = set() resolve = set() for role in roles: if isinstance(role, str): resolve.add(role) else: check.add(role) if resolve: gtable = self.settings.table_group query = (gtable.uuid.belongs(resolve)) & \ (gtable.deleted == False) rows = current.db(query).select(gtable.id, cache = (current.cache.ram, 600), ) for row in rows: check.add(row.id) # Check each role for role in check: if role == system_roles.ANONYMOUS: # All users have the anonymous role has_role = True elif role in realms: realm = realms[role] has_role = realm is None or for_pe is None or for_pe in realm else: has_role = False if has_role: if not all: return True elif all: return False return bool(all)
def s3_has_roles(self, roles, for_pe=None, all=False): """ Check whether the currently logged-in user has at least one out of a set of roles (or all of them, with all=True) Args: roles: list|tuple|set of role IDs or UIDs for_pe: check for this particular realm, possible values: None - for any entity 0 - site-wide X - for entity X all: check whether the user has all of the roles """ # Override if self.override or not roles: return True # Get the realms session_s3 = current.session.s3 if not session_s3: return False realms = None if self.user: realms = self.user.realms elif session_s3.roles: realms = Storage([(r, None) for r in session_s3.roles]) if not realms: return False # Administrators have all roles (no need to check) system_roles = self.get_system_roles() if system_roles.ADMIN in realms: return True # Resolve any role UIDs if not isinstance(roles, (tuple, list, set)): roles = [roles] check = set() resolve = set() for role in roles: if isinstance(role, str): resolve.add(role) else: check.add(role) if resolve: gtable = self.settings.table_group query = (gtable.uuid.belongs(resolve)) & \ (gtable.deleted == False) rows = current.db(query).select(gtable.id, cache = (current.cache.ram, 600), ) for row in rows: check.add(row.id) # Check each role for role in check: if role == system_roles.ANONYMOUS: # All users have the anonymous role has_role = True elif role in realms: realm = realms[role] has_role = realm is None or for_pe is None or for_pe in realm else: has_role = False if has_role: if not all: return True elif all: return False return bool(all)
Python
def s3_group_members(self, group_id, for_pe=DEFAULT): """ Get a list of members of a group Args: group_id: the group record ID for_pe: show only group members for this PE Returns: a list of the user_ids for members of a group """ mtable = self.settings.table_membership query = (mtable.deleted == False) & \ (mtable.group_id == group_id) if for_pe is None: query &= (mtable.pe_id == None) elif for_pe is not DEFAULT: query &= (mtable.pe_id == for_pe) members = current.db(query).select(mtable.user_id) return [m.user_id for m in members]
def s3_group_members(self, group_id, for_pe=DEFAULT): """ Get a list of members of a group Args: group_id: the group record ID for_pe: show only group members for this PE Returns: a list of the user_ids for members of a group """ mtable = self.settings.table_membership query = (mtable.deleted == False) & \ (mtable.group_id == group_id) if for_pe is None: query &= (mtable.pe_id == None) elif for_pe is not DEFAULT: query &= (mtable.pe_id == for_pe) members = current.db(query).select(mtable.user_id) return [m.user_id for m in members]
Python
def s3_user_pe_id(user_id): """ Get the person pe_id for a user ID Args: user_id: the user ID """ table = current.s3db.pr_person_user row = current.db(table.user_id == user_id).select(table.pe_id, limitby=(0, 1), ).first() return row.pe_id if row else None
def s3_user_pe_id(user_id): """ Get the person pe_id for a user ID Args: user_id: the user ID """ table = current.s3db.pr_person_user row = current.db(table.user_id == user_id).select(table.pe_id, limitby=(0, 1), ).first() return row.pe_id if row else None
Python
def s3_bulk_user_pe_id(user_ids): """ Get the list of person pe_id for list of user_ids Args: user_id: list of user IDs """ table = current.s3db.pr_person_user if not isinstance(user_ids, list): user_ids = [user_ids] rows = current.db(table.user_id.belongs(user_ids)).select(table.pe_id, table.user_id, ) if rows: return {row.user_id: row.pe_id for row in rows} return None
def s3_bulk_user_pe_id(user_ids): """ Get the list of person pe_id for list of user_ids Args: user_id: list of user IDs """ table = current.s3db.pr_person_user if not isinstance(user_ids, list): user_ids = [user_ids] rows = current.db(table.user_id.belongs(user_ids)).select(table.pe_id, table.user_id, ) if rows: return {row.user_id: row.pe_id for row in rows} return None
Python
def s3_logged_in_person(self): """ Get the person record ID for the current logged-in user """ row = None if self.s3_logged_in(): ptable = current.s3db.pr_person try: query = (ptable.pe_id == self.user.pe_id) except AttributeError: # Prepop (auth.override, self.user is None) pass else: row = current.db(query).select(ptable.id, limitby = (0, 1), ).first() return row.id if row else None
def s3_logged_in_person(self): """ Get the person record ID for the current logged-in user """ row = None if self.s3_logged_in(): ptable = current.s3db.pr_person try: query = (ptable.pe_id == self.user.pe_id) except AttributeError: # Prepop (auth.override, self.user is None) pass else: row = current.db(query).select(ptable.id, limitby = (0, 1), ).first() return row.id if row else None
Python
def s3_logged_in_human_resource(self): """ Get the first HR record ID for the current logged-in user """ row = None if self.s3_logged_in(): s3db = current.s3db ptable = s3db.pr_person htable = s3db.hrm_human_resource try: query = (htable.person_id == ptable.id) & \ (ptable.pe_id == self.user.pe_id) except AttributeError: # Prepop (auth.override, self.user is None) pass else: row = current.db(query).select(htable.id, orderby = ~htable.modified_on, limitby = (0, 1), ).first() return row.id if row else None
def s3_logged_in_human_resource(self): """ Get the first HR record ID for the current logged-in user """ row = None if self.s3_logged_in(): s3db = current.s3db ptable = s3db.pr_person htable = s3db.hrm_human_resource try: query = (htable.person_id == ptable.id) & \ (ptable.pe_id == self.user.pe_id) except AttributeError: # Prepop (auth.override, self.user is None) pass else: row = current.db(query).select(htable.id, orderby = ~htable.modified_on, limitby = (0, 1), ).first() return row.id if row else None
Python
def s3_has_permission(self, method, table, record_id=None, c=None, f=None): """ S3 framework function to define whether a user can access a record in manner "method". Designed to be called from the RESTlike controller. Args: method: the access method as string, one of "create", "read", "update", "delete" table: the table or tablename record_id: the record ID (if any) c: the controller name (overrides current.request) f: the function name (overrides current.request) """ if self.override: return True if not hasattr(table, "_tablename"): tablename = table table = current.s3db.table(tablename, db_only=True) if table is None: current.log.warning("Permission check on Table %s failed as couldn't load table. Module disabled?" % tablename) # Return a different Falsy value return None policy = current.deployment_settings.get_security_policy() if isinstance(method, (list, tuple)) and policy not in (3, 4, 5, 6, 7): return all(self.s3_has_permission(m, table, record_id=record_id, c=c, f=f) for m in method) sr = self.get_system_roles() permission = self.permission # Simple policy if policy == 1: required = permission.METHODS.get(method) or 0 if required == permission.READ: # All users can read, including anonymous users authorised = True else: # Authentication required for all other methods authorised = self.s3_logged_in() # Editor policy elif policy == 2: required = permission.METHODS.get(method) or 0 if required == permission.READ: # All users can read, including anonymous users authorised = True elif required == permission.CREATE or \ record_id == 0 and required == permission.UPDATE: # Authenticated users can create records, and update # certain default records (e.g. their profile) authorised = self.s3_logged_in() else: # Otherwise, must be EDITOR or record owner authorised = self.s3_has_role(sr.EDITOR) if not authorised and self.user and "owned_by_user" in table: query = (table.id == record_id) record = current.db(query).select(table.owned_by_user, limitby = (0, 1), ).first() if record and self.user.id == record.owned_by_user: authorised = True # Use S3Permission elif policy in (3, 4, 5, 6, 7): authorised = permission.has_permission(method, c = c, f = f, t = table, record = record_id, ) # Web2py default policy else: if self.s3_logged_in(): # Administrators are always authorised if self.s3_has_role(sr.ADMIN): authorised = True else: # Require records in auth_permission to specify access # (default Web2Py-style) authorised = self.has_permission(method, table, record_id) else: # No access for anonymous authorised = False return authorised
def s3_has_permission(self, method, table, record_id=None, c=None, f=None): """ S3 framework function to define whether a user can access a record in manner "method". Designed to be called from the RESTlike controller. Args: method: the access method as string, one of "create", "read", "update", "delete" table: the table or tablename record_id: the record ID (if any) c: the controller name (overrides current.request) f: the function name (overrides current.request) """ if self.override: return True if not hasattr(table, "_tablename"): tablename = table table = current.s3db.table(tablename, db_only=True) if table is None: current.log.warning("Permission check on Table %s failed as couldn't load table. Module disabled?" % tablename) # Return a different Falsy value return None policy = current.deployment_settings.get_security_policy() if isinstance(method, (list, tuple)) and policy not in (3, 4, 5, 6, 7): return all(self.s3_has_permission(m, table, record_id=record_id, c=c, f=f) for m in method) sr = self.get_system_roles() permission = self.permission # Simple policy if policy == 1: required = permission.METHODS.get(method) or 0 if required == permission.READ: # All users can read, including anonymous users authorised = True else: # Authentication required for all other methods authorised = self.s3_logged_in() # Editor policy elif policy == 2: required = permission.METHODS.get(method) or 0 if required == permission.READ: # All users can read, including anonymous users authorised = True elif required == permission.CREATE or \ record_id == 0 and required == permission.UPDATE: # Authenticated users can create records, and update # certain default records (e.g. their profile) authorised = self.s3_logged_in() else: # Otherwise, must be EDITOR or record owner authorised = self.s3_has_role(sr.EDITOR) if not authorised and self.user and "owned_by_user" in table: query = (table.id == record_id) record = current.db(query).select(table.owned_by_user, limitby = (0, 1), ).first() if record and self.user.id == record.owned_by_user: authorised = True # Use S3Permission elif policy in (3, 4, 5, 6, 7): authorised = permission.has_permission(method, c = c, f = f, t = table, record = record_id, ) # Web2py default policy else: if self.s3_logged_in(): # Administrators are always authorised if self.s3_has_role(sr.ADMIN): authorised = True else: # Require records in auth_permission to specify access # (default Web2Py-style) authorised = self.has_permission(method, table, record_id) else: # No access for anonymous authorised = False return authorised
Python
def s3_accessible_query(self, method, table, c=None, f=None): """ Returns a query with all accessible records for the currently logged-in user Args: method: the access method as string, one of: "create", "read", "update" or "delete" table: the table or table name c: the controller name (overrides current.request) f: the function name (overrides current.request) NB This method does not work on GAE because it uses JOIN and IN """ if not hasattr(table, "_tablename"): table = current.s3db[table] if self.override: return table.id > 0 sr = self.get_system_roles() policy = current.deployment_settings.get_security_policy() if policy == 1: # "simple" security policy: show all records return table.id > 0 elif policy == 2: # "editor" security policy: show all records return table.id > 0 elif policy in (3, 4, 5, 6, 7): # ACLs: use S3Permission method query = self.permission.accessible_query(method, table, c=c, f=f) return query # "Full" security policy if self.s3_has_role(sr.ADMIN): # Administrators can see all data return table.id > 0 # If there is access to the entire table then show all records try: user_id = self.user.id except: user_id = 0 if self.has_permission(method, table, 0, user_id): return table.id > 0 # Filter Records to show only those to which the user has access current.session.warning = current.T("Only showing accessible records!") membership = self.settings.table_membership permission = self.settings.table_permission query = (membership.user_id == user_id) & \ (membership.group_id == permission.group_id) & \ (permission.name == method) & \ (permission.table_name == table) return table.id.belongs(current.db(query)._select(permission.record_id))
def s3_accessible_query(self, method, table, c=None, f=None): """ Returns a query with all accessible records for the currently logged-in user Args: method: the access method as string, one of: "create", "read", "update" or "delete" table: the table or table name c: the controller name (overrides current.request) f: the function name (overrides current.request) NB This method does not work on GAE because it uses JOIN and IN """ if not hasattr(table, "_tablename"): table = current.s3db[table] if self.override: return table.id > 0 sr = self.get_system_roles() policy = current.deployment_settings.get_security_policy() if policy == 1: # "simple" security policy: show all records return table.id > 0 elif policy == 2: # "editor" security policy: show all records return table.id > 0 elif policy in (3, 4, 5, 6, 7): # ACLs: use S3Permission method query = self.permission.accessible_query(method, table, c=c, f=f) return query # "Full" security policy if self.s3_has_role(sr.ADMIN): # Administrators can see all data return table.id > 0 # If there is access to the entire table then show all records try: user_id = self.user.id except: user_id = 0 if self.has_permission(method, table, 0, user_id): return table.id > 0 # Filter Records to show only those to which the user has access current.session.warning = current.T("Only showing accessible records!") membership = self.settings.table_membership permission = self.settings.table_permission query = (membership.user_id == user_id) & \ (membership.group_id == permission.group_id) & \ (permission.name == method) & \ (permission.table_name == table) return table.id.belongs(current.db(query)._select(permission.record_id))
Python
def s3_requires_membership(self, role): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. Extends Web2Py's requires_membership() to add new functionality: - Custom Flash style - Uses s3_has_role() - Administrators (id=1) are deemed to have all roles """ def decorator(action): def f(*a, **b): if self.override: return action(*a, **b) ADMIN = self.get_system_roles().ADMIN if not self.s3_has_role(role) and not self.s3_has_role(ADMIN): self.permission.fail() return action(*a, **b) f.__doc__ = action.__doc__ return f return decorator
def s3_requires_membership(self, role): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of group_id. If role is provided instead of group_id then the group_id is calculated. Extends Web2Py's requires_membership() to add new functionality: - Custom Flash style - Uses s3_has_role() - Administrators (id=1) are deemed to have all roles """ def decorator(action): def f(*a, **b): if self.override: return action(*a, **b) ADMIN = self.get_system_roles().ADMIN if not self.s3_has_role(role) and not self.s3_has_role(ADMIN): self.permission.fail() return action(*a, **b) f.__doc__ = action.__doc__ return f return decorator
Python
def s3_make_session_owner(self, table, record_id): """ Makes the current session owner of a record Args: table: the table or table name record_id: the record ID """ if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table if not self.user: session = current.session if "owned_records" not in session: session.owned_records = {} records = session.owned_records.get(tablename, []) record_id = str(record_id) if record_id not in records: records.append(record_id) session.owned_records[tablename] = records
def s3_make_session_owner(self, table, record_id): """ Makes the current session owner of a record Args: table: the table or table name record_id: the record ID """ if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table if not self.user: session = current.session if "owned_records" not in session: session.owned_records = {} records = session.owned_records.get(tablename, []) record_id = str(record_id) if record_id not in records: records.append(record_id) session.owned_records[tablename] = records
Python
def s3_session_owns(self, table, record_id): """ Checks whether the current session owns a record Args: table: the table or table name record_id: the record ID """ session = current.session if self.user or not record_id or "owned_records" not in session: return False if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table records = session.owned_records.get(tablename) if records: return str(record_id) in records return False
def s3_session_owns(self, table, record_id): """ Checks whether the current session owns a record Args: table: the table or table name record_id: the record ID """ session = current.session if self.user or not record_id or "owned_records" not in session: return False if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table records = session.owned_records.get(tablename) if records: return str(record_id) in records return False
Python
def s3_clear_session_ownership(table=None, record_id=None): """ Removes session ownership for a record Args: table: the table or table name (default: all tables) record_id: the record ID (default: all records) """ session = current.session if "owned_records" not in session: return if table is not None: if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table if tablename in session.owned_records: if record_id: # Remove just this record ID record_id = str(record_id) records = session.owned_records[tablename] if record_id in records: records.remove(record_id) else: # Remove all record IDs for this table del session.owned_records[tablename] else: # Remove all session ownerships session.owned_records = {}
def s3_clear_session_ownership(table=None, record_id=None): """ Removes session ownership for a record Args: table: the table or table name (default: all tables) record_id: the record ID (default: all records) """ session = current.session if "owned_records" not in session: return if table is not None: if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table if tablename in session.owned_records: if record_id: # Remove just this record ID record_id = str(record_id) records = session.owned_records[tablename] if record_id in records: records.remove(record_id) else: # Remove all record IDs for this table del session.owned_records[tablename] else: # Remove all session ownerships session.owned_records = {}
Python
def s3_update_record_owner(self, table, record, update=False, **fields): """ Update ownership fields in a record (DRY helper method for s3_set_record_owner and set_realm_entity) Args: table: the table record: the record or record ID update: True to update realm_entity in all realm-components fields: dict of {ownership_field:value} """ # Ownership fields OUSR = "owned_by_user" OGRP = "owned_by_group" REALM = "realm_entity" ownership_fields = (OUSR, OGRP, REALM) pkey = table._id.name if isinstance(record, (Row, dict)) and pkey in record: record_id = record[pkey] else: record_id = record data = dict((key, fields[key]) for key in fields if key in ownership_fields) if not data: return db = current.db # Update record q = (table._id == record_id) success = db(q).update(**data) if success and update and REALM in data: # Update realm-components # Only goes down 1 level: doesn't do components of components s3db = current.s3db realm_components = s3db.get_config(table, "realm_components") if realm_components: resource = s3db.resource(table, components = realm_components, ) components = resource.components realm = {REALM: data[REALM]} for alias in realm_components: component = components.get(alias) if not component: continue ctable = component.table if REALM not in ctable.fields: continue query = component.get_join() & q rows = db(query).select(ctable._id) ids = set(row[ctable._id] for row in rows) if ids: ctablename = component.tablename if ctable._tablename != ctablename: # Component with table alias => switch to # original table for update: ctable = db[ctablename] db(ctable._id.belongs(ids)).update(**realm) # Update super-entity self.update_shared_fields(table, record, **data)
def s3_update_record_owner(self, table, record, update=False, **fields): """ Update ownership fields in a record (DRY helper method for s3_set_record_owner and set_realm_entity) Args: table: the table record: the record or record ID update: True to update realm_entity in all realm-components fields: dict of {ownership_field:value} """ # Ownership fields OUSR = "owned_by_user" OGRP = "owned_by_group" REALM = "realm_entity" ownership_fields = (OUSR, OGRP, REALM) pkey = table._id.name if isinstance(record, (Row, dict)) and pkey in record: record_id = record[pkey] else: record_id = record data = dict((key, fields[key]) for key in fields if key in ownership_fields) if not data: return db = current.db # Update record q = (table._id == record_id) success = db(q).update(**data) if success and update and REALM in data: # Update realm-components # Only goes down 1 level: doesn't do components of components s3db = current.s3db realm_components = s3db.get_config(table, "realm_components") if realm_components: resource = s3db.resource(table, components = realm_components, ) components = resource.components realm = {REALM: data[REALM]} for alias in realm_components: component = components.get(alias) if not component: continue ctable = component.table if REALM not in ctable.fields: continue query = component.get_join() & q rows = db(query).select(ctable._id) ids = set(row[ctable._id] for row in rows) if ids: ctablename = component.tablename if ctable._tablename != ctablename: # Component with table alias => switch to # original table for update: ctable = db[ctablename] db(ctable._id.belongs(ids)).update(**realm) # Update super-entity self.update_shared_fields(table, record, **data)
Python
def s3_set_record_owner(self, table, record, force_update = False, **fields): """ Set the record owned_by_user, owned_by_group and realm_entity for a record (auto-detect values). - to be called by CRUD and Importer during record creation. Args: table: the Table (or table name) record: the record (or record ID) force_update: True to update all fields regardless of the current value in the record, False to only update if current value is None fields: override auto-detected values, see keywords Keyword Args: owned_by_user: the auth_user ID of the owner user owned_by_group: the auth_group ID of the owner group realm_entity: the pe_id of the realm entity, or a tuple (instance_type, instance_id) to lookup the pe_id, e.g. ("org_organisation", 2) Notes: - only use with force_update for deliberate owner changes (i.e. with explicit owned_by_user/owned_by_group) - autodetected values can have undesirable side-effects. For mere realm updates use set_realm_entity instead. - if used with force_update, this will also update the realm_entity in all configured realm_components, i.e. no separate call to set_realm_entity required. """ s3db = current.s3db # Ownership fields OUSR = "owned_by_user" OGRP = "owned_by_group" REALM = "realm_entity" ownership_fields = (OUSR, OGRP, REALM) # Entity reference fields EID = "pe_id" OID = "organisation_id" SID = "site_id" GID = "group_id" PID = "person_id" entity_fields = (EID, OID, SID, GID, PID) # Find the table if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table table = s3db.table(tablename) if not table: return # Get the record ID pkey = table._id.name if isinstance(record, (Row, dict)): if pkey not in record: return else: record_id = record[pkey] else: record_id = record record = Storage() # Find the available fields fields_in_table = [f for f in ownership_fields if f in table.fields] if not fields_in_table: return fields_in_table += [f for f in entity_fields if f in table.fields] # Get all available fields for the record fields_missing = [f for f in fields_in_table if f not in record] if fields_missing: fields_to_load = [table._id] + [table[f] for f in fields_in_table] query = (table._id == record_id) row = current.db(query).select(limitby=(0, 1), *fields_to_load).first() else: row = record if not row: return # Prepare the update data = Storage() # Find owned_by_user if OUSR in fields_in_table: pi = ("pr_person", "pr_identity", "pr_education", "pr_contact", "pr_address", "pr_contact_emergency", "pr_person_availability", "pr_person_details", "pr_physical_description", "pr_group_membership", "pr_image", "hrm_training", ) if OUSR in fields: data[OUSR] = fields[OUSR] elif not row[OUSR] or tablename in pi: user_id = None # Records in PI tables should be owned by the person # they refer to (if that person has a user account) if tablename == "pr_person": user_id = self.s3_get_user_id(person_id = row[table._id]) elif PID in row and tablename in pi: user_id = self.s3_get_user_id(person_id = row[PID]) elif EID in row and tablename in pi: user_id = self.s3_get_user_id(pe_id = row[EID]) if not user_id and self.s3_logged_in() and self.user: # Fallback to current user user_id = self.user.id if user_id: data[OUSR] = user_id # Find owned_by_group if OGRP in fields_in_table: # Check for type-specific handler to find the owner group handler = s3db.get_config(tablename, "owner_group") if handler: if callable(handler): data[OGRP] = handler(table, row) else: data[OGRP] = handler # Otherwise, only set if explicitly specified elif OGRP in fields: data[OGRP] = fields[OGRP] # Find realm entity if REALM in fields_in_table: if REALM in row and row[REALM] and not force_update: pass else: if REALM in fields: entity = fields[REALM] else: entity = 0 realm_entity = self.get_realm_entity(table, row, entity=entity) data[REALM] = realm_entity self.s3_update_record_owner(table, row, update=force_update, **data)
def s3_set_record_owner(self, table, record, force_update = False, **fields): """ Set the record owned_by_user, owned_by_group and realm_entity for a record (auto-detect values). - to be called by CRUD and Importer during record creation. Args: table: the Table (or table name) record: the record (or record ID) force_update: True to update all fields regardless of the current value in the record, False to only update if current value is None fields: override auto-detected values, see keywords Keyword Args: owned_by_user: the auth_user ID of the owner user owned_by_group: the auth_group ID of the owner group realm_entity: the pe_id of the realm entity, or a tuple (instance_type, instance_id) to lookup the pe_id, e.g. ("org_organisation", 2) Notes: - only use with force_update for deliberate owner changes (i.e. with explicit owned_by_user/owned_by_group) - autodetected values can have undesirable side-effects. For mere realm updates use set_realm_entity instead. - if used with force_update, this will also update the realm_entity in all configured realm_components, i.e. no separate call to set_realm_entity required. """ s3db = current.s3db # Ownership fields OUSR = "owned_by_user" OGRP = "owned_by_group" REALM = "realm_entity" ownership_fields = (OUSR, OGRP, REALM) # Entity reference fields EID = "pe_id" OID = "organisation_id" SID = "site_id" GID = "group_id" PID = "person_id" entity_fields = (EID, OID, SID, GID, PID) # Find the table if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table table = s3db.table(tablename) if not table: return # Get the record ID pkey = table._id.name if isinstance(record, (Row, dict)): if pkey not in record: return else: record_id = record[pkey] else: record_id = record record = Storage() # Find the available fields fields_in_table = [f for f in ownership_fields if f in table.fields] if not fields_in_table: return fields_in_table += [f for f in entity_fields if f in table.fields] # Get all available fields for the record fields_missing = [f for f in fields_in_table if f not in record] if fields_missing: fields_to_load = [table._id] + [table[f] for f in fields_in_table] query = (table._id == record_id) row = current.db(query).select(limitby=(0, 1), *fields_to_load).first() else: row = record if not row: return # Prepare the update data = Storage() # Find owned_by_user if OUSR in fields_in_table: pi = ("pr_person", "pr_identity", "pr_education", "pr_contact", "pr_address", "pr_contact_emergency", "pr_person_availability", "pr_person_details", "pr_physical_description", "pr_group_membership", "pr_image", "hrm_training", ) if OUSR in fields: data[OUSR] = fields[OUSR] elif not row[OUSR] or tablename in pi: user_id = None # Records in PI tables should be owned by the person # they refer to (if that person has a user account) if tablename == "pr_person": user_id = self.s3_get_user_id(person_id = row[table._id]) elif PID in row and tablename in pi: user_id = self.s3_get_user_id(person_id = row[PID]) elif EID in row and tablename in pi: user_id = self.s3_get_user_id(pe_id = row[EID]) if not user_id and self.s3_logged_in() and self.user: # Fallback to current user user_id = self.user.id if user_id: data[OUSR] = user_id # Find owned_by_group if OGRP in fields_in_table: # Check for type-specific handler to find the owner group handler = s3db.get_config(tablename, "owner_group") if handler: if callable(handler): data[OGRP] = handler(table, row) else: data[OGRP] = handler # Otherwise, only set if explicitly specified elif OGRP in fields: data[OGRP] = fields[OGRP] # Find realm entity if REALM in fields_in_table: if REALM in row and row[REALM] and not force_update: pass else: if REALM in fields: entity = fields[REALM] else: entity = 0 realm_entity = self.get_realm_entity(table, row, entity=entity) data[REALM] = realm_entity self.s3_update_record_owner(table, row, update=force_update, **data)
Python
def update_shared_fields(table, record, **data): """ Update the shared fields in data in all super-entity rows linked with this record. Args: table: the table record: a record, record ID or a query data: the field/value pairs to update """ db = current.db s3db = current.s3db super_entities = s3db.get_config(table, "super_entity") if not super_entities: return if not isinstance(super_entities, (list, tuple)): super_entities = [super_entities] tables = {} load = s3db.table super_key = s3db.super_key for se in super_entities: supertable = load(se) if not supertable or \ not any([f in supertable.fields for f in data]): continue tables[super_key(supertable)] = supertable if not isinstance(record, (Row, dict)) or \ any([f not in record for f in tables]): if isinstance(record, Query): query = record limitby = None elif isinstance(record, (Row, dict)): query = table._id == record[table._id.name] limitby = (0, 1) else: query = table._id == record limitby = (0, 1) fields = [table[f] for f in tables] instance_records = db(query).select(limitby=limitby, *fields) else: instance_records = [record] if not instance_records: return for instance_record in instance_records: for skey in tables: supertable = tables[skey] if skey in instance_record: query = (supertable[skey] == instance_record[skey]) else: continue updates = dict((f, data[f]) for f in data if f in supertable.fields) if not updates: continue db(query).update(**updates)
def update_shared_fields(table, record, **data): """ Update the shared fields in data in all super-entity rows linked with this record. Args: table: the table record: a record, record ID or a query data: the field/value pairs to update """ db = current.db s3db = current.s3db super_entities = s3db.get_config(table, "super_entity") if not super_entities: return if not isinstance(super_entities, (list, tuple)): super_entities = [super_entities] tables = {} load = s3db.table super_key = s3db.super_key for se in super_entities: supertable = load(se) if not supertable or \ not any([f in supertable.fields for f in data]): continue tables[super_key(supertable)] = supertable if not isinstance(record, (Row, dict)) or \ any([f not in record for f in tables]): if isinstance(record, Query): query = record limitby = None elif isinstance(record, (Row, dict)): query = table._id == record[table._id.name] limitby = (0, 1) else: query = table._id == record limitby = (0, 1) fields = [table[f] for f in tables] instance_records = db(query).select(limitby=limitby, *fields) else: instance_records = [record] if not instance_records: return for instance_record in instance_records: for skey in tables: supertable = tables[skey] if skey in instance_record: query = (supertable[skey] == instance_record[skey]) else: continue updates = dict((f, data[f]) for f in data if f in supertable.fields) if not updates: continue db(query).update(**updates)
Python
def permitted_facilities(self, table = None, error_msg = None, redirect_on_error = True, facility_type = None ): """ If there are no facilities that the user has permission for, prevents create & update of records in table & gives a warning if the user tries to. Args: table: the table or table name error_msg: error message redirect_on_error: whether to redirect on error facility_type: restrict to this particular type of facilities (a tablename) """ T = current.T ERROR = T("You do not have permission for any facility to perform this action.") HINT = T("Create a new facility or ensure that you have permissions for an existing facility.") if not error_msg: error_msg = ERROR s3db = current.s3db site_ids = [] if facility_type is None: site_types = self.org_site_types else: if facility_type not in self.org_site_types: return site_ids site_types = [s3db[facility_type]] for site_type in site_types: try: ftable = s3db[site_type] if not "site_id" in ftable.fields: continue query = self.s3_accessible_query("update", ftable) if "deleted" in ftable: query &= (ftable.deleted == False) rows = current.db(query).select(ftable.site_id) site_ids += [row.site_id for row in rows] except: # Module disabled pass if site_ids: return site_ids args = current.request.args if "update" in args or "create" in args: if redirect_on_error: # Trying to create or update # If they do no have permission to any facilities current.session.error = "%s %s" % (error_msg, HINT) redirect(URL(c="default", f="index")) elif table is not None: if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table s3db.configure(tablename, insertable=False) return site_ids
def permitted_facilities(self, table = None, error_msg = None, redirect_on_error = True, facility_type = None ): """ If there are no facilities that the user has permission for, prevents create & update of records in table & gives a warning if the user tries to. Args: table: the table or table name error_msg: error message redirect_on_error: whether to redirect on error facility_type: restrict to this particular type of facilities (a tablename) """ T = current.T ERROR = T("You do not have permission for any facility to perform this action.") HINT = T("Create a new facility or ensure that you have permissions for an existing facility.") if not error_msg: error_msg = ERROR s3db = current.s3db site_ids = [] if facility_type is None: site_types = self.org_site_types else: if facility_type not in self.org_site_types: return site_ids site_types = [s3db[facility_type]] for site_type in site_types: try: ftable = s3db[site_type] if not "site_id" in ftable.fields: continue query = self.s3_accessible_query("update", ftable) if "deleted" in ftable: query &= (ftable.deleted == False) rows = current.db(query).select(ftable.site_id) site_ids += [row.site_id for row in rows] except: # Module disabled pass if site_ids: return site_ids args = current.request.args if "update" in args or "create" in args: if redirect_on_error: # Trying to create or update # If they do no have permission to any facilities current.session.error = "%s %s" % (error_msg, HINT) redirect(URL(c="default", f="index")) elif table is not None: if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table s3db.configure(tablename, insertable=False) return site_ids
Python
def permitted_organisations(self, table = None, error_msg = None, redirect_on_error = True ): """ If there are no organisations that the user has update permission for, prevents create & update of a record in table & gives an warning if the user tries to. Args: table: the table or table name error_msg: error message redirect_on_error: whether to redirect on error """ T = current.T ERROR = T("You do not have permission for any organization to perform this action.") HINT = T("Create a new organization or ensure that you have permissions for an existing organization.") if not error_msg: error_msg = ERROR s3db = current.s3db org_table = s3db.org_organisation query = self.s3_accessible_query("update", org_table) query &= (org_table.deleted == False) rows = current.db(query).select(org_table.id) if rows: return [org.id for org in rows] request = current.request if "update" in request.args or "create" in request.args: if redirect_on_error: current.session.error = error_msg + " " + HINT redirect(URL(c="default", f="index")) elif table is not None: if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table s3db.configure(tablename, insertable=False) return []
def permitted_organisations(self, table = None, error_msg = None, redirect_on_error = True ): """ If there are no organisations that the user has update permission for, prevents create & update of a record in table & gives an warning if the user tries to. Args: table: the table or table name error_msg: error message redirect_on_error: whether to redirect on error """ T = current.T ERROR = T("You do not have permission for any organization to perform this action.") HINT = T("Create a new organization or ensure that you have permissions for an existing organization.") if not error_msg: error_msg = ERROR s3db = current.s3db org_table = s3db.org_organisation query = self.s3_accessible_query("update", org_table) query &= (org_table.deleted == False) rows = current.db(query).select(org_table.id) if rows: return [org.id for org in rows] request = current.request if "update" in request.args or "create" in request.args: if redirect_on_error: current.session.error = error_msg + " " + HINT redirect(URL(c="default", f="index")) elif table is not None: if hasattr(table, "_tablename"): tablename = original_tablename(table) else: tablename = table s3db.configure(tablename, insertable=False) return []
Python
def root_org(self): """ Return the current user's root organisation ID or None """ if not self.user: return None org_id = self.user.organisation_id if not org_id: return None if not current.deployment_settings.get_org_branches(): return org_id return current.cache.ram( # Common key for all users of this org & vol_service_record() & hrm_training_event_realm_entity() "root_org_%s" % org_id, lambda: current.s3db.org_root_organisation(org_id), time_expire=120 )
def root_org(self): """ Return the current user's root organisation ID or None """ if not self.user: return None org_id = self.user.organisation_id if not org_id: return None if not current.deployment_settings.get_org_branches(): return org_id return current.cache.ram( # Common key for all users of this org & vol_service_record() & hrm_training_event_realm_entity() "root_org_%s" % org_id, lambda: current.s3db.org_root_organisation(org_id), time_expire=120 )
Python
def root_org_name(self): """ Return the current user's root organisation name or None """ if not self.user: return None org_id = self.user.organisation_id if not org_id: return None if not current.deployment_settings.get_org_branches(): s3db = current.s3db table = s3db.org_organisation row = current.db(table.id == org_id).select(table.name, cache = s3db.cache, limitby=(0, 1)).first() try: return row.name except: # Org not found! return None return current.cache.ram( # Common key for all users of this org "root_org_name_%s" % org_id, lambda: current.s3db.org_root_organisation_name(org_id), time_expire=120 )
def root_org_name(self): """ Return the current user's root organisation name or None """ if not self.user: return None org_id = self.user.organisation_id if not org_id: return None if not current.deployment_settings.get_org_branches(): s3db = current.s3db table = s3db.org_organisation row = current.db(table.id == org_id).select(table.name, cache = s3db.cache, limitby=(0, 1)).first() try: return row.name except: # Org not found! return None return current.cache.ram( # Common key for all users of this org "root_org_name_%s" % org_id, lambda: current.s3db.org_root_organisation_name(org_id), time_expire=120 )
Python
def filter_by_root_org(self, table): """ Function to return a query to filter a table to only display results for the user's root org OR record with no root org @ToDo: Restore Realms and add a role/functionality support for Master Data Then this function is redundant """ root_org = self.root_org() if root_org: return (table.organisation_id == root_org) | (table.organisation_id == None) else: return (table.organisation_id == None)
def filter_by_root_org(self, table): """ Function to return a query to filter a table to only display results for the user's root org OR record with no root org @ToDo: Restore Realms and add a role/functionality support for Master Data Then this function is redundant """ root_org = self.root_org() if root_org: return (table.organisation_id == root_org) | (table.organisation_id == None) else: return (table.organisation_id == None)
Python
def shelter_type(): """ RESTful CRUD controller List / add shelter types (e.g. NGO-operated, Government evacuation center, School, Hospital -- see Agasti opt_camp_type.) """ return crud_controller()
def shelter_type(): """ RESTful CRUD controller List / add shelter types (e.g. NGO-operated, Government evacuation center, School, Hospital -- see Agasti opt_camp_type.) """ return crud_controller()
Python
def shelter_service(): """ RESTful CRUD controller List / add shelter services (e.g. medical, housing, food, ...) """ return crud_controller()
def shelter_service(): """ RESTful CRUD controller List / add shelter services (e.g. medical, housing, food, ...) """ return crud_controller()
Python
def population_type(): """ Shelter population subgroups: CRUD controller """ return crud_controller()
def population_type(): """ Shelter population subgroups: CRUD controller """ return crud_controller()
Python
def shelter_unit(): """ REST controller to retrieve options for shelter unit selection show layer on Map imports """ # [Geo]JSON & Map Popups or Imports only def prep(r): if r.representation == "plain": # Have the 'Open' button open in the context of the Shelter record_id = r.id table = s3db.cr_shelter_unit row = db(table.id == record_id).select(table.shelter_id, limitby = (0, 1) ).first() shelter_id = row.shelter_id s3db.configure("cr_shelter_unit", popup_url = URL(c="cr", f="shelter", args=[shelter_id, "shelter_unit", record_id]), ) return True elif r.representation in ("json", "geojson", "plain") or \ r.method == "import": return True return False s3.prep = prep return crud_controller()
def shelter_unit(): """ REST controller to retrieve options for shelter unit selection show layer on Map imports """ # [Geo]JSON & Map Popups or Imports only def prep(r): if r.representation == "plain": # Have the 'Open' button open in the context of the Shelter record_id = r.id table = s3db.cr_shelter_unit row = db(table.id == record_id).select(table.shelter_id, limitby = (0, 1) ).first() shelter_id = row.shelter_id s3db.configure("cr_shelter_unit", popup_url = URL(c="cr", f="shelter", args=[shelter_id, "shelter_unit", record_id]), ) return True elif r.representation in ("json", "geojson", "plain") or \ r.method == "import": return True return False s3.prep = prep return crud_controller()
Python
def shelter_flag(): """ Shelter Flags - RESTful CRUD controller """ def prep(r): if r.interactive: # Filter task_assign_to option to human resources and teams assignees = [] # Select active HRs hr = s3db.resource("hrm_human_resource", filter = FS("status") == 1, ) rows = hr.select(["person_id$pe_id"], limit=None, represent=False).rows if rows: assignees.extend(row["pr_person.pe_id"] for row in rows) # Select teams teams = s3db.resource("pr_group", filter = FS("group_type") == 3, ) rows = teams.select(["pe_id"], limit=None, represent=False).rows if rows: assignees.extend(row["pr_group.pe_id"] for row in rows) # Set filter for task_assign_to.requires field = r.table.task_assign_to requires = field.requires if isinstance(requires, IS_EMPTY_OR): requires = requires.other requires.set_filter(filterby = "pe_id", filter_opts = assignees, ) return True s3.prep = prep return crud_controller()
def shelter_flag(): """ Shelter Flags - RESTful CRUD controller """ def prep(r): if r.interactive: # Filter task_assign_to option to human resources and teams assignees = [] # Select active HRs hr = s3db.resource("hrm_human_resource", filter = FS("status") == 1, ) rows = hr.select(["person_id$pe_id"], limit=None, represent=False).rows if rows: assignees.extend(row["pr_person.pe_id"] for row in rows) # Select teams teams = s3db.resource("pr_group", filter = FS("group_type") == 3, ) rows = teams.select(["pe_id"], limit=None, represent=False).rows if rows: assignees.extend(row["pr_group.pe_id"] for row in rows) # Set filter for task_assign_to.requires field = r.table.task_assign_to requires = field.requires if isinstance(requires, IS_EMPTY_OR): requires = requires.other requires.set_filter(filterby = "pe_id", filter_opts = assignees, ) return True s3.prep = prep return crud_controller()
Python
def shelter_inspection(): """ Shelter Inspections - RESTful CRUD controller """ return crud_controller()
def shelter_inspection(): """ Shelter Inspections - RESTful CRUD controller """ return crud_controller()
Python
def shelter_inspection_flag(): """ Shelter Inspection Flags - RESTful CRUD controller """ return crud_controller()
def shelter_inspection_flag(): """ Shelter Inspection Flags - RESTful CRUD controller """ return crud_controller()
Python
def apply_method(self, r, **attr): """ Entry point to apply map method to CRUDRequests - produces a full page with FilterWidgets above a Map Args: r: the CRUDRequest instance attr: controller attributes for the request Returns: output object to send to the view """ output = None if r.http == "GET": representation = r.representation if representation == "html": output = self.page(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) return output
def apply_method(self, r, **attr): """ Entry point to apply map method to CRUDRequests - produces a full page with FilterWidgets above a Map Args: r: the CRUDRequest instance attr: controller attributes for the request Returns: output object to send to the view """ output = None if r.http == "GET": representation = r.representation if representation == "html": output = self.page(r, **attr) else: r.error(405, current.ERROR.BAD_METHOD) return output
Python
def widget(self, r, method = "map", widget_id = None, visible = True, **attr): """ Render a Map widget suitable for use in an S3Filter-based page such as S3Summary Args: r: the CRUDRequest method: the widget method widget_id: the widget ID visible: whether the widget is initially visible attr: controller attributes Keyword Args: callback: callback to show the map: - "DEFAULT".............call show_map as soon as all components are loaded and ready (= use default show_map callback) - custom JavaScript.....invoked as soon as all components are loaded an ready - None..................only load the components, map will be shown by a later explicit call to show_map (this is the default here since the map DIV would typically be hidden initially, e.g. summary tab) """ callback = attr.get("callback") if not widget_id: widget_id = "default_map" gis = current.gis tablename = self.tablename ftable = current.s3db.gis_layer_feature def lookup_layer(prefix, name): query = (ftable.controller == prefix) & \ (ftable.function == name) layers = current.db(query).select(ftable.layer_id, ftable.style_default, ) if len(layers) > 1: layers.exclude(lambda row: row.style_default == False) if len(layers) == 1: layer_id = layers.first().layer_id else: # We can't distinguish layer_id = None return layer_id prefix = r.controller name = r.function layer_id = lookup_layer(prefix, name) if not layer_id: # Try the tablename prefix, name = tablename.split("_", 1) layer_id = lookup_layer(prefix, name) # This URL is ignored if we have a layer_id: url = URL(extension="geojson", args=None, vars=r.get_vars) # Retain any custom filter parameters for the layer lookup custom_params = {k: v for k, v in r.get_vars.items() if k[:2] == "$$"} # @ToDo: Support maps with multiple layers (Dashboards) #_id = "search_results_%s" % widget_id _id = "search_results" feature_resources = [{"name" : current.T("Search Results"), "id" : _id, "layer_id" : layer_id, "tablename" : tablename, "url" : url, "custom_params" : custom_params, # We activate in callback after ensuring URL is updated for current filter status "active" : False, }] settings = current.deployment_settings catalogue_layers = settings.get_gis_widget_catalogue_layers() legend = settings.get_gis_legend() search = settings.get_gis_search_geonames() toolbar = settings.get_gis_toolbar() wms_browser = settings.get_gis_widget_wms_browser() if wms_browser: config = gis.get_config() if config.wmsbrowser_url: wms_browser = wms_browser = {"name" : config.wmsbrowser_name, "url" : config.wmsbrowser_url, } else: wms_browser = None map_widget = gis.show_map(id = widget_id, feature_resources = feature_resources, catalogue_layers = catalogue_layers, collapsed = True, legend = legend, toolbar = toolbar, save = False, search = search, wms_browser = wms_browser, callback = callback, ) return map_widget
def widget(self, r, method = "map", widget_id = None, visible = True, **attr): """ Render a Map widget suitable for use in an S3Filter-based page such as S3Summary Args: r: the CRUDRequest method: the widget method widget_id: the widget ID visible: whether the widget is initially visible attr: controller attributes Keyword Args: callback: callback to show the map: - "DEFAULT".............call show_map as soon as all components are loaded and ready (= use default show_map callback) - custom JavaScript.....invoked as soon as all components are loaded an ready - None..................only load the components, map will be shown by a later explicit call to show_map (this is the default here since the map DIV would typically be hidden initially, e.g. summary tab) """ callback = attr.get("callback") if not widget_id: widget_id = "default_map" gis = current.gis tablename = self.tablename ftable = current.s3db.gis_layer_feature def lookup_layer(prefix, name): query = (ftable.controller == prefix) & \ (ftable.function == name) layers = current.db(query).select(ftable.layer_id, ftable.style_default, ) if len(layers) > 1: layers.exclude(lambda row: row.style_default == False) if len(layers) == 1: layer_id = layers.first().layer_id else: # We can't distinguish layer_id = None return layer_id prefix = r.controller name = r.function layer_id = lookup_layer(prefix, name) if not layer_id: # Try the tablename prefix, name = tablename.split("_", 1) layer_id = lookup_layer(prefix, name) # This URL is ignored if we have a layer_id: url = URL(extension="geojson", args=None, vars=r.get_vars) # Retain any custom filter parameters for the layer lookup custom_params = {k: v for k, v in r.get_vars.items() if k[:2] == "$$"} # @ToDo: Support maps with multiple layers (Dashboards) #_id = "search_results_%s" % widget_id _id = "search_results" feature_resources = [{"name" : current.T("Search Results"), "id" : _id, "layer_id" : layer_id, "tablename" : tablename, "url" : url, "custom_params" : custom_params, # We activate in callback after ensuring URL is updated for current filter status "active" : False, }] settings = current.deployment_settings catalogue_layers = settings.get_gis_widget_catalogue_layers() legend = settings.get_gis_legend() search = settings.get_gis_search_geonames() toolbar = settings.get_gis_toolbar() wms_browser = settings.get_gis_widget_wms_browser() if wms_browser: config = gis.get_config() if config.wmsbrowser_url: wms_browser = wms_browser = {"name" : config.wmsbrowser_name, "url" : config.wmsbrowser_url, } else: wms_browser = None map_widget = gis.show_map(id = widget_id, feature_resources = feature_resources, catalogue_layers = catalogue_layers, collapsed = True, legend = legend, toolbar = toolbar, save = False, search = search, wms_browser = wms_browser, callback = callback, ) return map_widget
Python
def represent(self, records): """ Provide a Human-readable representation of Audit records - currently unused Args: record: the record IDs """ table = self.table # Retrieve the records if isinstance(records, int): limit = 1 query = (table.id == records) else: limit = len(records) query = (table.id.belongs(records)) records = current.db(query).select(table.tablename, table.method, table.user_id, table.old_value, table.new_value, limitby = (0, limit) ) # Convert to Human-readable form s3db = current.s3db output = [] oappend = output.append for record in records: table = s3db[record.tablename] method = record.method if method == "create": new_value = record.new_value if not new_value: continue diff = [] dappend = diff.append for v in new_value: fieldname, value = v.split(":", 1) represent = table[fieldname].represent if represent: value = represent(value) label = table[fieldname].label or fieldname dappend("%s is %s" % (label, value)) elif method == "update": old_values = record.old_value new_values = record.new_value if not new_value: continue changed = {} for v in new_values: fieldname, new_value = v.split(":", 1) old_value = old_values.get(fieldname, None) if new_value != old_value: ftype = table[fieldname].type if ftype == "integer" or \ ftype.startswith("reference"): if new_value: new_value = int(new_value) if new_value == old_value: continue represent = table[fieldname].represent if represent: new_value = represent(new_value) label = table[fieldname].label or fieldname if old_value: if represent: old_value = represent(old_value) changed[fieldname] = "%s changed from %s to %s" % \ (label, old_value, new_value) else: changed[fieldname] = "%s changed to %s" % \ (label, new_value) diff = [] dappend = diff.append for fieldname in changed: dappend(changed[fieldname]) elif method == "delete": old_value = record.old_value if not old_value: continue diff = [] dappend = diff.append for v in old_value: fieldname, value = v.split(":", 1) represent = table[fieldname].represent if represent: value = represent(value) label = table[fieldname].label or fieldname dappend("%s was %s" % (label, value)) oappend("\n".join(diff)) return output
def represent(self, records): """ Provide a Human-readable representation of Audit records - currently unused Args: record: the record IDs """ table = self.table # Retrieve the records if isinstance(records, int): limit = 1 query = (table.id == records) else: limit = len(records) query = (table.id.belongs(records)) records = current.db(query).select(table.tablename, table.method, table.user_id, table.old_value, table.new_value, limitby = (0, limit) ) # Convert to Human-readable form s3db = current.s3db output = [] oappend = output.append for record in records: table = s3db[record.tablename] method = record.method if method == "create": new_value = record.new_value if not new_value: continue diff = [] dappend = diff.append for v in new_value: fieldname, value = v.split(":", 1) represent = table[fieldname].represent if represent: value = represent(value) label = table[fieldname].label or fieldname dappend("%s is %s" % (label, value)) elif method == "update": old_values = record.old_value new_values = record.new_value if not new_value: continue changed = {} for v in new_values: fieldname, new_value = v.split(":", 1) old_value = old_values.get(fieldname, None) if new_value != old_value: ftype = table[fieldname].type if ftype == "integer" or \ ftype.startswith("reference"): if new_value: new_value = int(new_value) if new_value == old_value: continue represent = table[fieldname].represent if represent: new_value = represent(new_value) label = table[fieldname].label or fieldname if old_value: if represent: old_value = represent(old_value) changed[fieldname] = "%s changed from %s to %s" % \ (label, old_value, new_value) else: changed[fieldname] = "%s changed to %s" % \ (label, new_value) diff = [] dappend = diff.append for fieldname in changed: dappend(changed[fieldname]) elif method == "delete": old_value = record.old_value if not old_value: continue diff = [] dappend = diff.append for v in old_value: fieldname, value = v.split(":", 1) represent = table[fieldname].represent if represent: value = represent(value) label = table[fieldname].label or fieldname dappend("%s was %s" % (label, value)) oappend("\n".join(diff)) return output
Python
def lookup_rows(self, key, values, fields=None): """ Lookup all rows referenced by values (in foreign key representations) Args: key: the key Field values: the values fields: the fields to retrieve """ if fields is None: fields = [] fields.append(key) if len(values) == 1: query = (key == values[0]) else: query = key.belongs(values) rows = current.db(query).select(*fields) self.queries += 1 return rows
def lookup_rows(self, key, values, fields=None): """ Lookup all rows referenced by values (in foreign key representations) Args: key: the key Field values: the values fields: the fields to retrieve """ if fields is None: fields = [] fields.append(key) if len(values) == 1: query = (key == values[0]) else: query = key.belongs(values) rows = current.db(query).select(*fields) self.queries += 1 return rows
Python
def represent_row(self, row): """ Represent the referenced row (in foreign key representations) Args: row: the row Returns: the representation of the Row, or None if there is an error in the Row """ labels = self.labels translated = False if self.slabels: # String Template or lazyT try: row_dict = row.as_dict() except AttributeError: # Row just a dict/Storage after all? (e.g. custom lookup) row_dict = row # Represent None as self.none none = self.none for k, v in list(row_dict.items()): if v is None: row_dict[k] = none v = labels % row_dict elif self.clabels: # External Renderer v = labels(row) else: # Default values = [row[f] for f in self.fields if row[f] not in (None, "")] if len(values) > 1: # Multiple values => concatenate with separator if self.translate: # Translate items individually before concatenating T = current.T values = [T(v) if not type(v) is lazyT else v for v in values] translated = True sep = self.field_sep v = sep.join(s3_str(value) for value in values) elif values: v = s3_str(values[0]) else: v = self.none if not translated and self.translate and not type(v) is lazyT: output = current.T(v) else: output = v return output
def represent_row(self, row): """ Represent the referenced row (in foreign key representations) Args: row: the row Returns: the representation of the Row, or None if there is an error in the Row """ labels = self.labels translated = False if self.slabels: # String Template or lazyT try: row_dict = row.as_dict() except AttributeError: # Row just a dict/Storage after all? (e.g. custom lookup) row_dict = row # Represent None as self.none none = self.none for k, v in list(row_dict.items()): if v is None: row_dict[k] = none v = labels % row_dict elif self.clabels: # External Renderer v = labels(row) else: # Default values = [row[f] for f in self.fields if row[f] not in (None, "")] if len(values) > 1: # Multiple values => concatenate with separator if self.translate: # Translate items individually before concatenating T = current.T values = [T(v) if not type(v) is lazyT else v for v in values] translated = True sep = self.field_sep v = sep.join(s3_str(value) for value in values) elif values: v = s3_str(values[0]) else: v = self.none if not translated and self.translate and not type(v) is lazyT: output = current.T(v) else: output = v return output
Python
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link. - Typically, k is a foreign key value, and v the representation of the referenced record, and the link shall open a read view of the referenced record. - In the base class, the linkto-parameter expects a URL (as string) with "[id]" as placeholder for the key. Args: k: the key v: the representation of the key row: the row with this key (unused in the base class) """ if self.linkto: k = s3_str(k) return A(v, _href=self.linkto.replace("[id]", k) \ .replace("%5Bid%5D", k)) else: return v
def link(self, k, v, row=None): """ Represent a (key, value) as hypertext link. - Typically, k is a foreign key value, and v the representation of the referenced record, and the link shall open a read view of the referenced record. - In the base class, the linkto-parameter expects a URL (as string) with "[id]" as placeholder for the key. Args: k: the key v: the representation of the key row: the row with this key (unused in the base class) """ if self.linkto: k = s3_str(k) return A(v, _href=self.linkto.replace("[id]", k) \ .replace("%5Bid%5D", k)) else: return v
Python
def multiple(self, values, rows=None, list_type=True, show_link=True): """ Represent multiple values as a comma-separated list. Args: values: list of values rows: the referenced rows (if values are foreign keys) show_link: render each representation as link """ self._setup() show_link = show_link and self.show_link # Get the values if rows and self.table: key = self.key values = [row[key] for row in rows] elif self.list_type and list_type: try: hasnone = None in values if hasnone: values = [i for i in values if i != None] values = list(set(chain.from_iterable(values))) if hasnone: values.append(None) except TypeError: raise ValueError("List of lists expected, got %s" % values) else: values = [values] if type(values) is not list else values # Lookup the representations if values: default = self.default items = self._lookup(values, rows=rows) if show_link: link = self.link rows = self.rows labels = [[link(k, s3_str(items[k]), row=rows.get(k)), ", "] if k in items else [default, ", "] for k in values] if labels: return TAG[""](list(chain.from_iterable(labels))[:-1]) else: return "" else: labels = [s3_str(items[k]) if k in items else default for k in values] if labels: return ", ".join(labels) return self.none
def multiple(self, values, rows=None, list_type=True, show_link=True): """ Represent multiple values as a comma-separated list. Args: values: list of values rows: the referenced rows (if values are foreign keys) show_link: render each representation as link """ self._setup() show_link = show_link and self.show_link # Get the values if rows and self.table: key = self.key values = [row[key] for row in rows] elif self.list_type and list_type: try: hasnone = None in values if hasnone: values = [i for i in values if i != None] values = list(set(chain.from_iterable(values))) if hasnone: values.append(None) except TypeError: raise ValueError("List of lists expected, got %s" % values) else: values = [values] if type(values) is not list else values # Lookup the representations if values: default = self.default items = self._lookup(values, rows=rows) if show_link: link = self.link rows = self.rows labels = [[link(k, s3_str(items[k]), row=rows.get(k)), ", "] if k in items else [default, ", "] for k in values] if labels: return TAG[""](list(chain.from_iterable(labels))[:-1]) else: return "" else: labels = [s3_str(items[k]) if k in items else default for k in values] if labels: return ", ".join(labels) return self.none
Python
def bulk(self, values, rows=None, list_type=True, show_link=True): """ Represent multiple values as dict {value: representation} Args: values: list of values rows: the rows show_link: render each representation as link Returns: a dict {value: representation} Note: For list-types, the dict keys will be the individual values within all lists - and not the lists (simply because lists can not be dict keys). Thus, the caller would still have to construct the final string/HTML. """ self._setup() show_link = show_link and self.show_link # Get the values if rows and self.table: key = self.key _rows = self.rows values = set() add_value = values.add for row in rows: value = row[key] _rows[value] = row add_value(value) values = list(values) elif self.list_type and list_type: try: hasnone = None in values if hasnone: values = [i for i in values if i != None] values = list(set(chain.from_iterable(values))) if hasnone: values.append(None) except TypeError: raise ValueError("List of lists expected, got %s" % values) else: values = [values] if type(values) is not list else values # Lookup the representations if values: labels = self._lookup(values, rows=rows) if show_link: link = self.link rows = self.rows labels = {k: link(k, v, rows.get(k)) for k, v in labels.items()} for k in values: if k not in labels: labels[k] = self.default else: labels = {} labels[None] = self.none return labels
def bulk(self, values, rows=None, list_type=True, show_link=True): """ Represent multiple values as dict {value: representation} Args: values: list of values rows: the rows show_link: render each representation as link Returns: a dict {value: representation} Note: For list-types, the dict keys will be the individual values within all lists - and not the lists (simply because lists can not be dict keys). Thus, the caller would still have to construct the final string/HTML. """ self._setup() show_link = show_link and self.show_link # Get the values if rows and self.table: key = self.key _rows = self.rows values = set() add_value = values.add for row in rows: value = row[key] _rows[value] = row add_value(value) values = list(values) elif self.list_type and list_type: try: hasnone = None in values if hasnone: values = [i for i in values if i != None] values = list(set(chain.from_iterable(values))) if hasnone: values.append(None) except TypeError: raise ValueError("List of lists expected, got %s" % values) else: values = [values] if type(values) is not list else values # Lookup the representations if values: labels = self._lookup(values, rows=rows) if show_link: link = self.link rows = self.rows labels = {k: link(k, v, rows.get(k)) for k, v in labels.items()} for k in values: if k not in labels: labels[k] = self.default else: labels = {} labels[None] = self.none return labels
Python
def render_list(self, value, labels, show_link=True): """ Helper method to render list-type representations from bulk()-results. Args: value: the list labels: the labels as returned from bulk() show_link: render references as links, should be the same as used with bulk() """ show_link = show_link and self.show_link if show_link: labels = [(labels[v], ", ") if v in labels else (self.default, ", ") for v in value] if labels: return TAG[""](list(chain.from_iterable(labels))[:-1]) else: return "" else: return ", ".join([s3_str(labels[v]) if v in labels else self.default for v in value])
def render_list(self, value, labels, show_link=True): """ Helper method to render list-type representations from bulk()-results. Args: value: the list labels: the labels as returned from bulk() show_link: render references as links, should be the same as used with bulk() """ show_link = show_link and self.show_link if show_link: labels = [(labels[v], ", ") if v in labels else (self.default, ", ") for v in value] if labels: return TAG[""](list(chain.from_iterable(labels))[:-1]) else: return "" else: return ", ".join([s3_str(labels[v]) if v in labels else self.default for v in value])
Python
def _represent_path(self, value, row, rows=None, hierarchy=None): """ Recursive helper method to represent value as path in a hierarchy. Args: value: the value row: the row containing the value rows: all rows from _loopup as dict hierarchy: the S3Hierarchy instance """ theset = self.theset if value in theset: return theset[value] prefix = None parent = hierarchy.parent(value) if parent: if parent in theset: prefix = theset[parent] elif parent in rows: prefix = self._represent_path(parent, rows[parent], rows=rows, hierarchy=hierarchy) result = self.represent_row(row) if prefix: result = self.htemplate % (prefix, result) theset[value] = result return result
def _represent_path(self, value, row, rows=None, hierarchy=None): """ Recursive helper method to represent value as path in a hierarchy. Args: value: the value row: the row containing the value rows: all rows from _loopup as dict hierarchy: the S3Hierarchy instance """ theset = self.theset if value in theset: return theset[value] prefix = None parent = hierarchy.parent(value) if parent: if parent in theset: prefix = theset[parent] elif parent in rows: prefix = self._represent_path(parent, rows[parent], rows=rows, hierarchy=hierarchy) result = self.represent_row(row) if prefix: result = self.htemplate % (prefix, result) theset[value] = result return result
Python
def render_node(self, element, attributes, name): """ Render as text or attribute of an XML element Args: element: the element attributes: the attributes dict of the element name: the attribute name """ # Render value text = s3_str(self.represent()) # Strip markup + XML-escape if text and "<" in text: try: stripper = MarkupStripper() stripper.feed(text) text = stripper.stripped() except: pass # Add to node if text is not None: if element is not None: element.text = text else: attributes[name] = text return
def render_node(self, element, attributes, name): """ Render as text or attribute of an XML element Args: element: the element attributes: the attributes dict of the element name: the attribute name """ # Render value text = s3_str(self.represent()) # Strip markup + XML-escape if text and "<" in text: try: stripper = MarkupStripper() stripper.feed(text) text = stripper.stripped() except: pass # Add to node if text is not None: if element is not None: element.text = text else: attributes[name] = text return
Python
def s3_phone_represent(value): """ Ensure that Phone numbers always show as LTR - otherwise + appears at the end which looks wrong even in RTL """ if not value: return current.messages["NONE"] return s3_str("%s%s" % (chr(8206), s3_str(value)))
def s3_phone_represent(value): """ Ensure that Phone numbers always show as LTR - otherwise + appears at the end which looks wrong even in RTL """ if not value: return current.messages["NONE"] return s3_str("%s%s" % (chr(8206), s3_str(value)))
Python
def s3_qrcode_represent(value, row=None, show_value=True): """ Simple QR Code representer, produces a DIV with embedded SVG, useful to embed QR Codes that are to be scanned directly from the screen, or for previews - requires python-qrcode (pip install qrcode), and PIL Args: value: the value to render (will be converted to str) row: the Row (unused, for API-compatibility) show_value: include the value (as str) in the representation Returns: a DIV containing the QR code (SVG) """ try: import qrcode import qrcode.image.svg except ImportError: return s3_str(value) # Generate the QR Code qr = qrcode.QRCode(version = 2, # L-level good enough for displaying on screen, as # it would rarely be damaged or dirty there ;) error_correction = qrcode.constants.ERROR_CORRECT_L, box_size = 10, border = 4, image_factory=qrcode.image.svg.SvgImage, ) qr.add_data(s3_str(value)) qr.make(fit=True) # Write the SVG into a buffer qr_svg = qr.make_image() from io import BytesIO stream = BytesIO() qr_svg.save(stream) # Generate XML string to embed stream.seek(0) svgxml = XML(stream.read()) output = DIV(DIV(svgxml, _class="s3-qrcode-svg"), _class="s3-qrcode-display", ) if show_value: output.append(DIV(s3_str(value), _class="s3-qrcode-val")) return output
def s3_qrcode_represent(value, row=None, show_value=True): """ Simple QR Code representer, produces a DIV with embedded SVG, useful to embed QR Codes that are to be scanned directly from the screen, or for previews - requires python-qrcode (pip install qrcode), and PIL Args: value: the value to render (will be converted to str) row: the Row (unused, for API-compatibility) show_value: include the value (as str) in the representation Returns: a DIV containing the QR code (SVG) """ try: import qrcode import qrcode.image.svg except ImportError: return s3_str(value) # Generate the QR Code qr = qrcode.QRCode(version = 2, # L-level good enough for displaying on screen, as # it would rarely be damaged or dirty there ;) error_correction = qrcode.constants.ERROR_CORRECT_L, box_size = 10, border = 4, image_factory=qrcode.image.svg.SvgImage, ) qr.add_data(s3_str(value)) qr.make(fit=True) # Write the SVG into a buffer qr_svg = qr.make_image() from io import BytesIO stream = BytesIO() qr_svg.save(stream) # Generate XML string to embed stream.seek(0) svgxml = XML(stream.read()) output = DIV(DIV(svgxml, _class="s3-qrcode-svg"), _class="s3-qrcode-display", ) if show_value: output.append(DIV(s3_str(value), _class="s3-qrcode-val")) return output
Python
def s3_URLise(text): """ Convert all URLs in a text into an HTML <A> tag. Args: text: the text """ output = URLSCHEMA.sub(lambda m: '<a href="%s" target="_blank">%s</a>' % (m.group(0), m.group(0)), text) return output
def s3_URLise(text): """ Convert all URLs in a text into an HTML <A> tag. Args: text: the text """ output = URLSCHEMA.sub(lambda m: '<a href="%s" target="_blank">%s</a>' % (m.group(0), m.group(0)), text) return output
Python
def s3_avatar_represent(user_id, tablename="auth_user", gravatar=False, **attr): """ Represent a User as their profile picture or Gravatar Args: tablename: either "auth_user" or "pr_person" depending on which table the 'user_id' refers to attr: additional HTML attributes for the IMG(), such as _class """ size = (50, 50) if user_id: db = current.db s3db = current.s3db cache = s3db.cache table = s3db[tablename] email = None image = None if tablename == "auth_user": user = db(table.id == user_id).select(table.email, cache = cache, limitby = (0, 1), ).first() if user: email = user.email.strip().lower() ltable = s3db.pr_person_user itable = s3db.pr_image query = (ltable.user_id == user_id) & \ (ltable.pe_id == itable.pe_id) & \ (itable.profile == True) image = db(query).select(itable.image, limitby = (0, 1), ).first() if image: image = image.image elif tablename == "pr_person": user = db(table.id == user_id).select(table.pe_id, cache = cache, limitby = (0, 1), ).first() if user: ctable = s3db.pr_contact query = (ctable.pe_id == user.pe_id) & \ (ctable.contact_method == "EMAIL") email = db(query).select(ctable.value, cache = cache, limitby = (0, 1), ).first() if email: email = email.value itable = s3db.pr_image query = (itable.pe_id == user.pe_id) & \ (itable.profile == True) image = db(query).select(itable.image, limitby = (0, 1), ).first() if image: image = image.image if image: image = s3db.pr_image_library_represent(image, size=size) size = s3db.pr_image_size(image, size) url = URL(c="default", f="download", args=image) elif gravatar: if email: # If no Image uploaded, try Gravatar, which also provides a nice fallback identicon import hashlib email_hash = hashlib.md5(email).hexdigest() url = "//www.gravatar.com/avatar/%s?s=50&d=identicon" % email_hash else: url = "//www.gravatar.com/avatar/00000000000000000000000000000000?d=mm" else: url = URL(c="static", f="img", args="blank-user.gif") else: url = URL(c="static", f="img", args="blank-user.gif") if "_class" not in attr: attr["_class"] = "avatar" if "_width" not in attr: attr["_width"] = size[0] if "_height" not in attr: attr["_height"] = size[1] return IMG(_src=url, **attr)
def s3_avatar_represent(user_id, tablename="auth_user", gravatar=False, **attr): """ Represent a User as their profile picture or Gravatar Args: tablename: either "auth_user" or "pr_person" depending on which table the 'user_id' refers to attr: additional HTML attributes for the IMG(), such as _class """ size = (50, 50) if user_id: db = current.db s3db = current.s3db cache = s3db.cache table = s3db[tablename] email = None image = None if tablename == "auth_user": user = db(table.id == user_id).select(table.email, cache = cache, limitby = (0, 1), ).first() if user: email = user.email.strip().lower() ltable = s3db.pr_person_user itable = s3db.pr_image query = (ltable.user_id == user_id) & \ (ltable.pe_id == itable.pe_id) & \ (itable.profile == True) image = db(query).select(itable.image, limitby = (0, 1), ).first() if image: image = image.image elif tablename == "pr_person": user = db(table.id == user_id).select(table.pe_id, cache = cache, limitby = (0, 1), ).first() if user: ctable = s3db.pr_contact query = (ctable.pe_id == user.pe_id) & \ (ctable.contact_method == "EMAIL") email = db(query).select(ctable.value, cache = cache, limitby = (0, 1), ).first() if email: email = email.value itable = s3db.pr_image query = (itable.pe_id == user.pe_id) & \ (itable.profile == True) image = db(query).select(itable.image, limitby = (0, 1), ).first() if image: image = image.image if image: image = s3db.pr_image_library_represent(image, size=size) size = s3db.pr_image_size(image, size) url = URL(c="default", f="download", args=image) elif gravatar: if email: # If no Image uploaded, try Gravatar, which also provides a nice fallback identicon import hashlib email_hash = hashlib.md5(email).hexdigest() url = "//www.gravatar.com/avatar/%s?s=50&d=identicon" % email_hash else: url = "//www.gravatar.com/avatar/00000000000000000000000000000000?d=mm" else: url = URL(c="static", f="img", args="blank-user.gif") else: url = URL(c="static", f="img", args="blank-user.gif") if "_class" not in attr: attr["_class"] = "avatar" if "_width" not in attr: attr["_width"] = size[0] if "_height" not in attr: attr["_height"] = size[1] return IMG(_src=url, **attr)
Python
def s3_yes_no_represent(value): " Represent a Boolean field as Yes/No instead of True/False " if value is True: return current.T("Yes") elif value is False: return current.T("No") else: return current.messages["NONE"]
def s3_yes_no_represent(value): " Represent a Boolean field as Yes/No instead of True/False " if value is True: return current.T("Yes") elif value is False: return current.T("No") else: return current.messages["NONE"]
Python
def s3_datatable_truncate(string, maxlength=40): """ Representation method to override the dataTables-internal truncation of strings per field, like: Example: table.field.represent = lambda v, row=None: \ s3_datatable_truncate(v, maxlength=40) Args: string: the string maxlength: the maximum string length Note: The JS click-event will be attached by s3.ui.datatable.js """ # Make sure text is multi-byte-aware before truncating it string = s3_str(string) if string and len(string) > maxlength: _class = "dt-truncate" return TAG[""]( DIV(SPAN(_class="ui-icon ui-icon-zoomin", _style="float:right", ), string[:maxlength-3] + "...", _class=_class), DIV(SPAN(_class="ui-icon ui-icon-zoomout", _style="float:right"), string, _style="display:none", _class=_class), ) else: return string if string else ""
def s3_datatable_truncate(string, maxlength=40): """ Representation method to override the dataTables-internal truncation of strings per field, like: Example: table.field.represent = lambda v, row=None: \ s3_datatable_truncate(v, maxlength=40) Args: string: the string maxlength: the maximum string length Note: The JS click-event will be attached by s3.ui.datatable.js """ # Make sure text is multi-byte-aware before truncating it string = s3_str(string) if string and len(string) > maxlength: _class = "dt-truncate" return TAG[""]( DIV(SPAN(_class="ui-icon ui-icon-zoomin", _style="float:right", ), string[:maxlength-3] + "...", _class=_class), DIV(SPAN(_class="ui-icon ui-icon-zoomout", _style="float:right"), string, _style="display:none", _class=_class), ) else: return string if string else ""
Python
def s3_trunk8(selector=None, lines=None, less=None, more=None): """ Intelligent client-side text truncation Args: selector: the jQuery selector (default: .s3-truncate) lines: maximum number of lines (default: 1) """ T = current.T s3 = current.response.s3 scripts = s3.scripts jquery_ready = s3.jquery_ready if s3.debug: script = "/%s/static/scripts/trunk8.js" % current.request.application else: script = "/%s/static/scripts/trunk8.min.js" % current.request.application if script not in scripts: scripts.append(script) # Toggle-script # - only required once per page script = \ """$(document).on('click','.s3-truncate-more',function(event){ $(this).parent().trunk8('revert').append(' <a class="s3-truncate-less" href="#">%(less)s</a>') return false}) $(document).on('click','.s3-truncate-less',function(event){ $(this).parent().trunk8() return false})""" % {"less": T("less") if less is None else less} s3.jquery_ready.append(script) # Init-script # - required separately for each selector (but do not repeat the # same statement if called multiple times => makes the page very # slow) script = """S3.trunk8('%(selector)s',%(lines)s,'%(more)s')""" % \ {"selector": ".s3-truncate" if selector is None else selector, "lines": "null" if lines is None else lines, "more": T("more") if more is None else more, } if script not in jquery_ready: jquery_ready.append(script)
def s3_trunk8(selector=None, lines=None, less=None, more=None): """ Intelligent client-side text truncation Args: selector: the jQuery selector (default: .s3-truncate) lines: maximum number of lines (default: 1) """ T = current.T s3 = current.response.s3 scripts = s3.scripts jquery_ready = s3.jquery_ready if s3.debug: script = "/%s/static/scripts/trunk8.js" % current.request.application else: script = "/%s/static/scripts/trunk8.min.js" % current.request.application if script not in scripts: scripts.append(script) # Toggle-script # - only required once per page script = \ """$(document).on('click','.s3-truncate-more',function(event){ $(this).parent().trunk8('revert').append(' <a class="s3-truncate-less" href="#">%(less)s</a>') return false}) $(document).on('click','.s3-truncate-less',function(event){ $(this).parent().trunk8() return false})""" % {"less": T("less") if less is None else less} s3.jquery_ready.append(script) # Init-script # - required separately for each selector (but do not repeat the # same statement if called multiple times => makes the page very # slow) script = """S3.trunk8('%(selector)s',%(lines)s,'%(more)s')""" % \ {"selector": ".s3-truncate" if selector is None else selector, "lines": "null" if lines is None else lines, "more": T("more") if more is None else more, } if script not in jquery_ready: jquery_ready.append(script)
Python
def s3_text_represent(text, truncate=True, lines=5, _class=None): """ Representation function for text fields with intelligent truncation and preserving whitespace. Args: text: the text truncate: whether to truncate or not lines: maximum number of lines to show _class: CSS class to use for truncation (otherwise using the text-body class itself) """ if not text: text = current.messages["NONE"] if _class is None: selector = ".text-body" _class = "text-body" else: selector = ".%s" % _class _class = "text-body %s" % _class if truncate and \ current.auth.permission.format in ("html", "popup", "iframe"): s3_trunk8(selector = selector, lines = lines) return DIV(text, _class=_class)
def s3_text_represent(text, truncate=True, lines=5, _class=None): """ Representation function for text fields with intelligent truncation and preserving whitespace. Args: text: the text truncate: whether to truncate or not lines: maximum number of lines to show _class: CSS class to use for truncation (otherwise using the text-body class itself) """ if not text: text = current.messages["NONE"] if _class is None: selector = ".text-body" _class = "text-body" else: selector = ".%s" % _class _class = "text-body %s" % _class if truncate and \ current.auth.permission.format in ("html", "popup", "iframe"): s3_trunk8(selector = selector, lines = lines) return DIV(text, _class=_class)
Python
def s3_format_fullname(fname=None, mname=None, lname=None, truncate=True): """ Formats the full name of a person Args: fname: the person's pr_person.first_name value mname: the person's pr_person.middle_name value lname: the person's pr_person.last_name value truncate: truncate the name to max 24 characters """ name = "" if fname or mname or lname: if not fname: fname = "" if not mname: mname = "" if not lname: lname = "" if truncate: fname = "%s" % s3_truncate(fname, 24) mname = "%s" % s3_truncate(mname, 24) lname = "%s" % s3_truncate(lname, 24, nice=False) name_format = current.deployment_settings.get_pr_name_format() name = name_format % {"first_name": fname, "middle_name": mname, "last_name": lname, } name = name.replace(" ", " ").rstrip() if truncate: name = s3_truncate(name, 24, nice=False) return name
def s3_format_fullname(fname=None, mname=None, lname=None, truncate=True): """ Formats the full name of a person Args: fname: the person's pr_person.first_name value mname: the person's pr_person.middle_name value lname: the person's pr_person.last_name value truncate: truncate the name to max 24 characters """ name = "" if fname or mname or lname: if not fname: fname = "" if not mname: mname = "" if not lname: lname = "" if truncate: fname = "%s" % s3_truncate(fname, 24) mname = "%s" % s3_truncate(mname, 24) lname = "%s" % s3_truncate(lname, 24, nice=False) name_format = current.deployment_settings.get_pr_name_format() name = name_format % {"first_name": fname, "middle_name": mname, "last_name": lname, } name = name.replace(" ", " ").rstrip() if truncate: name = s3_truncate(name, 24, nice=False) return name
Python
def s3_fullname(person=None, pe_id=None, truncate=True): """ Returns the full name of a person Args: person: the pr_person record or record_id pe_id: alternatively, the person entity ID truncate: truncate the name to max 24 characters """ record = None query = None if isinstance(person, int) or str(person).isdigit(): db = current.db ptable = db.pr_person query = (ptable.id == person) elif person is not None: record = person elif pe_id is not None: db = current.db ptable = db.pr_person query = (ptable.pe_id == pe_id) if not record and query is not None: record = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, limitby = (0, 1) ).first() if record: fname, mname, lname = "", "", "" if "pr_person" in record: # Check if this is a LazySet from db.auth_user #test = record["pr_person"] #from pydal.objects import LazySet #if not isinstance(test, LazySet) # record = test record = record["pr_person"] if record.first_name: fname = record.first_name.strip() if "middle_name" in record and record.middle_name: mname = record.middle_name.strip() if record.last_name: lname = record.last_name.strip() return s3_format_fullname(fname, mname, lname, truncate) else: return ""
def s3_fullname(person=None, pe_id=None, truncate=True): """ Returns the full name of a person Args: person: the pr_person record or record_id pe_id: alternatively, the person entity ID truncate: truncate the name to max 24 characters """ record = None query = None if isinstance(person, int) or str(person).isdigit(): db = current.db ptable = db.pr_person query = (ptable.id == person) elif person is not None: record = person elif pe_id is not None: db = current.db ptable = db.pr_person query = (ptable.pe_id == pe_id) if not record and query is not None: record = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, limitby = (0, 1) ).first() if record: fname, mname, lname = "", "", "" if "pr_person" in record: # Check if this is a LazySet from db.auth_user #test = record["pr_person"] #from pydal.objects import LazySet #if not isinstance(test, LazySet) # record = test record = record["pr_person"] if record.first_name: fname = record.first_name.strip() if "middle_name" in record and record.middle_name: mname = record.middle_name.strip() if record.last_name: lname = record.last_name.strip() return s3_format_fullname(fname, mname, lname, truncate) else: return ""
Python
def s3_fullname_bulk(record_ids=None, truncate=True): """ Returns the full name for a set of Persons - currently unused Args: record_ids: a list of record_ids truncate: truncate the name to max 24 characters """ represents = {} if record_ids: db = current.db ptable = db.pr_person query = (ptable.id.belongs(record_ids)) rows = db(query).select(ptable.id, ptable.first_name, ptable.middle_name, ptable.last_name, ) for row in rows: fname, mname, lname = "", "", "" if row.first_name: fname = row.first_name.strip() if row.middle_name: mname = row.middle_name.strip() if row.last_name: lname = row.last_name.strip() represent = s3_format_fullname(fname, mname, lname, truncate) represents[row.id] = represent return represents
def s3_fullname_bulk(record_ids=None, truncate=True): """ Returns the full name for a set of Persons - currently unused Args: record_ids: a list of record_ids truncate: truncate the name to max 24 characters """ represents = {} if record_ids: db = current.db ptable = db.pr_person query = (ptable.id.belongs(record_ids)) rows = db(query).select(ptable.id, ptable.first_name, ptable.middle_name, ptable.last_name, ) for row in rows: fname, mname, lname = "", "", "" if row.first_name: fname = row.first_name.strip() if row.middle_name: mname = row.middle_name.strip() if row.last_name: lname = row.last_name.strip() represent = s3_format_fullname(fname, mname, lname, truncate) represents[row.id] = represent return represents
Python
def __get_fields(cls, trackable, super_entity=True): """ Check a trackable for presence of required fields Args: the trackable object """ fields = [] if hasattr(trackable, "fields"): keys = trackable.fields else: keys = trackable if super_entity and \ cls.__super_entity(trackable) and UID in keys: return ("instance_type", UID) if LOCATION_ID in keys: fields.append(LOCATION_ID) if TRACK_ID in keys: fields.append(TRACK_ID) return fields elif hasattr(trackable, "update_record") or \ isinstance(trackable, (Table, Row)): return fields return None
def __get_fields(cls, trackable, super_entity=True): """ Check a trackable for presence of required fields Args: the trackable object """ fields = [] if hasattr(trackable, "fields"): keys = trackable.fields else: keys = trackable if super_entity and \ cls.__super_entity(trackable) and UID in keys: return ("instance_type", UID) if LOCATION_ID in keys: fields.append(LOCATION_ID) if TRACK_ID in keys: fields.append(TRACK_ID) return fields elif hasattr(trackable, "update_record") or \ isinstance(trackable, (Table, Row)): return fields return None
Python
def check_out(self, table=None, record=None, timestmp=None): """ Make the last log entry before timestmp independent from the referenced entity (if any) Args: timestmp: the date/time of the check-out, defaults to current time """ db = current.db s3db = current.s3db ptable = s3db[PRESENCE] if timestmp is None: timestmp = datetime.utcnow() interlock = None if table is not None: if isinstance(table, str): table = s3db[table] if isinstance(record, Rows): record = record.first() if self.__super_entity(table): if not isinstance(record, Row): record = table[record] table = s3db[record.instance_type] fields = self.__get_fields(table, super_entity=False) if not fields: raise SyntaxError("No trackable type: %s" % table._tablename) query = table[UID] == record[UID] record = db(query).select(limitby=(0, 1)).first() if isinstance(record, Row) and table._id.name in record: record = record[table._id.name] if record: interlock = "%s,%s" % (table, record) else: return q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp)) for r in self.records: if TRACK_ID not in r: # Cannot check-out a non-trackable continue query = q & (ptable[TRACK_ID] == r[TRACK_ID]) presence = db(query).select(orderby=~ptable.timestmp, limitby=(0, 1)).first() if presence and presence.interlock: if interlock and presence.interlock != interlock: continue elif not interlock and table and \ not presence.interlock.startswith("%s" % table): continue tablename, record_id = presence.interlock.split(",", 1) trackable = S3Trackable(tablename=tablename, record_id=record_id) location = trackable.get_location(_fields=["id"], timestmp=timestmp, as_rows=True).first() if timestmp - presence.timestmp < timedelta(seconds=1): timestmp = timestmp + timedelta(seconds=1) data = dict(location_id=location.id, timestmp=timestmp, interlock=None) data.update({TRACK_ID:r[TRACK_ID]}) ptable.insert(**data) self.__update_timestamp(r[TRACK_ID], timestmp)
def check_out(self, table=None, record=None, timestmp=None): """ Make the last log entry before timestmp independent from the referenced entity (if any) Args: timestmp: the date/time of the check-out, defaults to current time """ db = current.db s3db = current.s3db ptable = s3db[PRESENCE] if timestmp is None: timestmp = datetime.utcnow() interlock = None if table is not None: if isinstance(table, str): table = s3db[table] if isinstance(record, Rows): record = record.first() if self.__super_entity(table): if not isinstance(record, Row): record = table[record] table = s3db[record.instance_type] fields = self.__get_fields(table, super_entity=False) if not fields: raise SyntaxError("No trackable type: %s" % table._tablename) query = table[UID] == record[UID] record = db(query).select(limitby=(0, 1)).first() if isinstance(record, Row) and table._id.name in record: record = record[table._id.name] if record: interlock = "%s,%s" % (table, record) else: return q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp)) for r in self.records: if TRACK_ID not in r: # Cannot check-out a non-trackable continue query = q & (ptable[TRACK_ID] == r[TRACK_ID]) presence = db(query).select(orderby=~ptable.timestmp, limitby=(0, 1)).first() if presence and presence.interlock: if interlock and presence.interlock != interlock: continue elif not interlock and table and \ not presence.interlock.startswith("%s" % table): continue tablename, record_id = presence.interlock.split(",", 1) trackable = S3Trackable(tablename=tablename, record_id=record_id) location = trackable.get_location(_fields=["id"], timestmp=timestmp, as_rows=True).first() if timestmp - presence.timestmp < timedelta(seconds=1): timestmp = timestmp + timedelta(seconds=1) data = dict(location_id=location.id, timestmp=timestmp, interlock=None) data.update({TRACK_ID:r[TRACK_ID]}) ptable.insert(**data) self.__update_timestamp(r[TRACK_ID], timestmp)
Python
def __update_timestamp(self, track_id, timestamp): """ Update the timestamp of a trackable Args: track_id: the trackable ID (super-entity key) timestamp: the timestamp """ if track_id: if timestamp is None: timestamp = datetime.utcnow() current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)
def __update_timestamp(self, track_id, timestamp): """ Update the timestamp of a trackable Args: track_id: the trackable ID (super-entity key) timestamp: the timestamp """ if track_id: if timestamp is None: timestamp = datetime.utcnow() current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)
Python
def download_kml(self, record_id, filename, session_id_name, session_id): """ Download a KML file: - unzip it if-required - follow NetworkLinks recursively if-required Save the file to the /uploads folder Designed to be called asynchronously using: current.s3task.run_async("download_kml", [record_id, filename]) Args: record_id: id of the record in db.gis_layer_kml filename: name to save the file as session_id_name: name of the session session_id: id of the session TODO Pass error messages to Result & have JavaScript listen for these """ table = current.s3db.gis_layer_kml record = current.db(table.id == record_id).select(table.url, limitby = (0, 1) ).first() url = record.url filepath = os.path.join(global_settings.applications_parent, current.request.folder, "uploads", "gis_cache", filename) warning = self.fetch_kml(url, filepath, session_id_name, session_id) # @ToDo: Handle errors #query = (cachetable.name == name) if "URLError" in warning or "HTTPError" in warning: # URL inaccessible if os.access(filepath, os.R_OK): statinfo = os.stat(filepath) if statinfo.st_size: # Use cached version #date = db(query).select(cachetable.modified_on, # limitby = (0, 1)).first().modified_on #response.warning += "%s %s %s\n" % (url, # T("not accessible - using cached version from"), # str(date)) #url = URL(c="default", f="download", # args=[filename]) pass else: # 0k file is all that is available #response.warning += "%s %s\n" % (url, # T("not accessible - no cached version available!")) # skip layer return else: # No cached version available #response.warning += "%s %s\n" % (url, # T("not accessible - no cached version available!")) # skip layer return else: # Download was succesful #db(query).update(modified_on=request.utcnow) if "ParseError" in warning: # @ToDo Parse detail #response.warning += "%s: %s %s\n" % (T("Layer"), # name, # T("couldn't be parsed so NetworkLinks not followed.")) pass if "GroundOverlay" in warning or "ScreenOverlay" in warning: #response.warning += "%s: %s %s\n" % (T("Layer"), # name, # T("includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.")) # Code to support GroundOverlay: # https://github.com/openlayers/openlayers/pull/759 pass
def download_kml(self, record_id, filename, session_id_name, session_id): """ Download a KML file: - unzip it if-required - follow NetworkLinks recursively if-required Save the file to the /uploads folder Designed to be called asynchronously using: current.s3task.run_async("download_kml", [record_id, filename]) Args: record_id: id of the record in db.gis_layer_kml filename: name to save the file as session_id_name: name of the session session_id: id of the session TODO Pass error messages to Result & have JavaScript listen for these """ table = current.s3db.gis_layer_kml record = current.db(table.id == record_id).select(table.url, limitby = (0, 1) ).first() url = record.url filepath = os.path.join(global_settings.applications_parent, current.request.folder, "uploads", "gis_cache", filename) warning = self.fetch_kml(url, filepath, session_id_name, session_id) # @ToDo: Handle errors #query = (cachetable.name == name) if "URLError" in warning or "HTTPError" in warning: # URL inaccessible if os.access(filepath, os.R_OK): statinfo = os.stat(filepath) if statinfo.st_size: # Use cached version #date = db(query).select(cachetable.modified_on, # limitby = (0, 1)).first().modified_on #response.warning += "%s %s %s\n" % (url, # T("not accessible - using cached version from"), # str(date)) #url = URL(c="default", f="download", # args=[filename]) pass else: # 0k file is all that is available #response.warning += "%s %s\n" % (url, # T("not accessible - no cached version available!")) # skip layer return else: # No cached version available #response.warning += "%s %s\n" % (url, # T("not accessible - no cached version available!")) # skip layer return else: # Download was succesful #db(query).update(modified_on=request.utcnow) if "ParseError" in warning: # @ToDo Parse detail #response.warning += "%s: %s %s\n" % (T("Layer"), # name, # T("couldn't be parsed so NetworkLinks not followed.")) pass if "GroundOverlay" in warning or "ScreenOverlay" in warning: #response.warning += "%s: %s %s\n" % (T("Layer"), # name, # T("includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.")) # Code to support GroundOverlay: # https://github.com/openlayers/openlayers/pull/759 pass
Python
def fetch_kml(self, url, filepath, session_id_name, session_id): """ Fetch a KML file: - unzip it if-required - follow NetworkLinks recursively if-required Designed as a helper function for download_kml() Returns: a file object """ from gluon.tools import fetch response = current.response public_url = current.deployment_settings.get_base_public_url() warning = "" local = False if not url.startswith("http"): local = True url = "%s%s" % (public_url, url) elif len(url) > len(public_url) and url[:len(public_url)] == public_url: local = True if local: # Keep Session for local URLs cookie = Cookie.SimpleCookie() cookie[session_id_name] = session_id # For sync connections current.session._unlock(response) try: file = fetch(url, cookie=cookie) except HTTPError: warning = "HTTPError" return warning except URLError: warning = "URLError" return warning else: try: file = fetch(url) except HTTPError: warning = "HTTPError" return warning except URLError: warning = "URLError" return warning filenames = [] if file[:2] == "PK": # Unzip fp = StringIO(file) import zipfile myfile = zipfile.ZipFile(fp) files = myfile.infolist() main = None candidates = [] for _file in files: filename = _file.filename if filename == "doc.kml": main = filename elif filename[-4:] == ".kml": candidates.append(filename) if not main: if candidates: # Any better way than this to guess which KML file is the main one? main = candidates[0] else: response.error = "KMZ contains no KML Files!" return "" # Write files to cache (other than the main one) request = current.request path = os.path.join(request.folder, "static", "cache", "kml") if not os.path.exists(path): os.makedirs(path) for _file in files: filename = _file.filename if filename != main: if "/" in filename: _filename = filename.split("/") dir = os.path.join(path, _filename[0]) if not os.path.exists(dir): os.mkdir(dir) _filepath = os.path.join(path, *_filename) else: _filepath = os.path.join(path, filename) try: f = open(_filepath, "wb") except: # Trying to write the Folder pass else: filenames.append(filename) __file = myfile.read(filename) f.write(__file) f.close() # Now read the main one (to parse) file = myfile.read(main) myfile.close() # Check for NetworkLink if "<NetworkLink>" in file: try: # Remove extraneous whitespace parser = etree.XMLParser(recover=True, remove_blank_text=True) tree = etree.XML(file, parser) # Find contents of href tag (must be a better way?) url = "" for element in tree.iter(): if element.tag == "{%s}href" % KML_NAMESPACE: url = element.text if url: # Follow NetworkLink (synchronously) warning2 = self.fetch_kml(url, filepath, session_id_name, session_id) warning += warning2 except (etree.XMLSyntaxError,): e = sys.exc_info()[1] warning += "<ParseError>%s %s</ParseError>" % (e.line, e.errormsg) # Check for Overlays if "<GroundOverlay>" in file: warning += "GroundOverlay" if "<ScreenOverlay>" in file: warning += "ScreenOverlay" for filename in filenames: replace = "%s/%s" % (URL(c="static", f="cache", args=["kml"]), filename) # Rewrite all references to point to the correct place # need to catch <Icon><href> (which could be done via lxml) # & also <description><![CDATA[<img src=" (which can't) file = file.replace(filename, replace) # Write main file to cache f = open(filepath, "w") f.write(file) f.close() return warning
def fetch_kml(self, url, filepath, session_id_name, session_id): """ Fetch a KML file: - unzip it if-required - follow NetworkLinks recursively if-required Designed as a helper function for download_kml() Returns: a file object """ from gluon.tools import fetch response = current.response public_url = current.deployment_settings.get_base_public_url() warning = "" local = False if not url.startswith("http"): local = True url = "%s%s" % (public_url, url) elif len(url) > len(public_url) and url[:len(public_url)] == public_url: local = True if local: # Keep Session for local URLs cookie = Cookie.SimpleCookie() cookie[session_id_name] = session_id # For sync connections current.session._unlock(response) try: file = fetch(url, cookie=cookie) except HTTPError: warning = "HTTPError" return warning except URLError: warning = "URLError" return warning else: try: file = fetch(url) except HTTPError: warning = "HTTPError" return warning except URLError: warning = "URLError" return warning filenames = [] if file[:2] == "PK": # Unzip fp = StringIO(file) import zipfile myfile = zipfile.ZipFile(fp) files = myfile.infolist() main = None candidates = [] for _file in files: filename = _file.filename if filename == "doc.kml": main = filename elif filename[-4:] == ".kml": candidates.append(filename) if not main: if candidates: # Any better way than this to guess which KML file is the main one? main = candidates[0] else: response.error = "KMZ contains no KML Files!" return "" # Write files to cache (other than the main one) request = current.request path = os.path.join(request.folder, "static", "cache", "kml") if not os.path.exists(path): os.makedirs(path) for _file in files: filename = _file.filename if filename != main: if "/" in filename: _filename = filename.split("/") dir = os.path.join(path, _filename[0]) if not os.path.exists(dir): os.mkdir(dir) _filepath = os.path.join(path, *_filename) else: _filepath = os.path.join(path, filename) try: f = open(_filepath, "wb") except: # Trying to write the Folder pass else: filenames.append(filename) __file = myfile.read(filename) f.write(__file) f.close() # Now read the main one (to parse) file = myfile.read(main) myfile.close() # Check for NetworkLink if "<NetworkLink>" in file: try: # Remove extraneous whitespace parser = etree.XMLParser(recover=True, remove_blank_text=True) tree = etree.XML(file, parser) # Find contents of href tag (must be a better way?) url = "" for element in tree.iter(): if element.tag == "{%s}href" % KML_NAMESPACE: url = element.text if url: # Follow NetworkLink (synchronously) warning2 = self.fetch_kml(url, filepath, session_id_name, session_id) warning += warning2 except (etree.XMLSyntaxError,): e = sys.exc_info()[1] warning += "<ParseError>%s %s</ParseError>" % (e.line, e.errormsg) # Check for Overlays if "<GroundOverlay>" in file: warning += "GroundOverlay" if "<ScreenOverlay>" in file: warning += "ScreenOverlay" for filename in filenames: replace = "%s/%s" % (URL(c="static", f="cache", args=["kml"]), filename) # Rewrite all references to point to the correct place # need to catch <Icon><href> (which could be done via lxml) # & also <description><![CDATA[<img src=" (which can't) file = file.replace(filename, replace) # Write main file to cache f = open(filepath, "w") f.write(file) f.close() return warning
Python
def geocode_r(lat, lon): """ Reverse Geocode a Lat/Lon - used by S3LocationSelector """ if lat is None or lon is None: return "Need Lat & Lon" results = "" # Check vaguely valid try: lat = float(lat) except ValueError: results = "Latitude is Invalid!" try: lon = float(lon) except ValueError: results += "Longitude is Invalid!" if not results: if lon > 180 or lon < -180: results = "Longitude must be between -180 & 180!" elif lat > 90 or lat < -90: results = "Latitude must be between -90 & 90!" else: table = current.s3db.gis_location query = (table.level != None) & \ (table.deleted == False) if current.deployment_settings.get_gis_spatialdb(): point = "POINT (%s %s)" % (lon, lat) query &= (table.the_geom.st_intersects(point)) rows = current.db(query).select(table.id, table.level, ) results = {} for row in rows: results[row.level] = row.id else: # Oh dear, this is going to be slow :/ # Filter to the BBOX initially query &= (table.lat_min < lat) & \ (table.lat_max > lat) & \ (table.lon_min < lon) & \ (table.lon_max > lon) rows = current.db(query).select(table.id, table.level, table.wkt, ) from shapely.geometry import point from shapely.wkt import loads as wkt_loads test = point.Point(lon, lat) results = {} for row in rows: shape = wkt_loads(row.wkt) ok = test.intersects(shape) if ok: #sys.stderr.write("Level: %s, id: %s\n" % (row.level, row.id)) results[row.level] = row.id return results
def geocode_r(lat, lon): """ Reverse Geocode a Lat/Lon - used by S3LocationSelector """ if lat is None or lon is None: return "Need Lat & Lon" results = "" # Check vaguely valid try: lat = float(lat) except ValueError: results = "Latitude is Invalid!" try: lon = float(lon) except ValueError: results += "Longitude is Invalid!" if not results: if lon > 180 or lon < -180: results = "Longitude must be between -180 & 180!" elif lat > 90 or lat < -90: results = "Latitude must be between -90 & 90!" else: table = current.s3db.gis_location query = (table.level != None) & \ (table.deleted == False) if current.deployment_settings.get_gis_spatialdb(): point = "POINT (%s %s)" % (lon, lat) query &= (table.the_geom.st_intersects(point)) rows = current.db(query).select(table.id, table.level, ) results = {} for row in rows: results[row.level] = row.id else: # Oh dear, this is going to be slow :/ # Filter to the BBOX initially query &= (table.lat_min < lat) & \ (table.lat_max > lat) & \ (table.lon_min < lon) & \ (table.lon_max > lon) rows = current.db(query).select(table.id, table.level, table.wkt, ) from shapely.geometry import point from shapely.wkt import loads as wkt_loads test = point.Point(lon, lat) results = {} for row in rows: shape = wkt_loads(row.wkt) ok = test.intersects(shape) if ok: #sys.stderr.write("Level: %s, id: %s\n" % (row.level, row.id)) results[row.level] = row.id return results
Python
def _lookup_parent_path(feature_id): """ Helper that gets parent and path for a location. """ db = current.db table = current.s3db.gis_location feature = db(table.id == feature_id).select(table.id, table.name, table.level, table.path, table.parent, limitby = (0, 1) ).first() return feature
def _lookup_parent_path(feature_id): """ Helper that gets parent and path for a location. """ db = current.db table = current.s3db.gis_location feature = db(table.id == feature_id).select(table.id, table.name, table.level, table.path, table.parent, limitby = (0, 1) ).first() return feature
Python
def import_gadm1_L0(ogr, countries=[]): """ Import L0 Admin Boundaries into the Locations table from GADMv1 - designed to be called from import_admin_areas() - assumes that basic prepop has been done, so that no new records need to be created Args: ogr - The OGR Python module countries - List of ISO2 countrycodes to download data for defaults to all countries """ db = current.db s3db = current.s3db ttable = s3db.gis_location_tag table = db.gis_location layer = { "url" : "http://gadm.org/data/gadm_v1_lev0_shp.zip", "zipfile" : "gadm_v1_lev0_shp.zip", "shapefile" : "gadm1_lev0", "codefield" : "ISO2", # This field is used to uniquely identify the L0 for updates "code2field" : "ISO" # This field is used to uniquely identify the L0 for parenting the L1s } # Copy the current working directory to revert back to later cwd = os.getcwd() # Create the working directory TEMP = os.path.join(cwd, "temp") if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache import tempfile TEMP = tempfile.gettempdir() tempPath = os.path.join(TEMP, "GADMv1") if not os.path.exists(tempPath): try: os.mkdir(tempPath) except OSError: current.log.error("Unable to create temp folder %s!" % tempPath) return # Set the current working directory os.chdir(tempPath) layerName = layer["shapefile"] # Check if file has already been downloaded fileName = layer["zipfile"] if not os.path.isfile(fileName): # Download the file from gluon.tools import fetch url = layer["url"] current.log.debug("Downloading %s" % url) try: file = fetch(url) except URLError as exception: current.log.error(exception) return fp = StringIO(file) else: current.log.debug("Using existing file %s" % fileName) fp = open(fileName) # Unzip it current.log.debug("Unzipping %s" % layerName) import zipfile myfile = zipfile.ZipFile(fp) for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"): fileName = "%s.%s" % (layerName, ext) file = myfile.read(fileName) f = open(fileName, "w") f.write(file) f.close() myfile.close() # Use OGR to read Shapefile current.log.debug("Opening %s.shp" % layerName) ds = ogr.Open("%s.shp" % layerName) if ds is None: current.log.error("Open failed.\n") return lyr = ds.GetLayerByName(layerName) lyr.ResetReading() codeField = layer["codefield"] code2Field = layer["code2field"] for feat in lyr: code = feat.GetField(codeField) if not code: # Skip the entries which aren't countries continue if countries and code not in countries: # Skip the countries which we're not interested in continue geom = feat.GetGeometryRef() if geom is not None: if geom.GetGeometryType() == ogr.wkbPoint: pass else: query = (table.id == ttable.location_id) & \ (ttable.tag == "ISO2") & \ (ttable.value == code) wkt = geom.ExportToWkt() if wkt.startswith("LINESTRING"): gis_feature_type = 2 elif wkt.startswith("POLYGON"): gis_feature_type = 3 elif wkt.startswith("MULTIPOINT"): gis_feature_type = 4 elif wkt.startswith("MULTILINESTRING"): gis_feature_type = 5 elif wkt.startswith("MULTIPOLYGON"): gis_feature_type = 6 elif wkt.startswith("GEOMETRYCOLLECTION"): gis_feature_type = 7 code2 = feat.GetField(code2Field) #area = feat.GetField("Shape_Area") try: id = db(query).select(table.id, limitby = (0, 1)).first().id query = (table.id == id) db(query).update(gis_feature_type=gis_feature_type, wkt=wkt) ttable.insert(location_id = id, tag = "ISO3", value = code2) #ttable.insert(location_id = location_id, # tag = "area", # value = area) except db._adapter.driver.OperationalError: current.log.error(sys.exc_info()[1]) else: current.log.debug("No geometry\n") # Close the shapefile ds.Destroy() db.commit() # Revert back to the working directory as before. os.chdir(cwd) return
def import_gadm1_L0(ogr, countries=[]): """ Import L0 Admin Boundaries into the Locations table from GADMv1 - designed to be called from import_admin_areas() - assumes that basic prepop has been done, so that no new records need to be created Args: ogr - The OGR Python module countries - List of ISO2 countrycodes to download data for defaults to all countries """ db = current.db s3db = current.s3db ttable = s3db.gis_location_tag table = db.gis_location layer = { "url" : "http://gadm.org/data/gadm_v1_lev0_shp.zip", "zipfile" : "gadm_v1_lev0_shp.zip", "shapefile" : "gadm1_lev0", "codefield" : "ISO2", # This field is used to uniquely identify the L0 for updates "code2field" : "ISO" # This field is used to uniquely identify the L0 for parenting the L1s } # Copy the current working directory to revert back to later cwd = os.getcwd() # Create the working directory TEMP = os.path.join(cwd, "temp") if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache import tempfile TEMP = tempfile.gettempdir() tempPath = os.path.join(TEMP, "GADMv1") if not os.path.exists(tempPath): try: os.mkdir(tempPath) except OSError: current.log.error("Unable to create temp folder %s!" % tempPath) return # Set the current working directory os.chdir(tempPath) layerName = layer["shapefile"] # Check if file has already been downloaded fileName = layer["zipfile"] if not os.path.isfile(fileName): # Download the file from gluon.tools import fetch url = layer["url"] current.log.debug("Downloading %s" % url) try: file = fetch(url) except URLError as exception: current.log.error(exception) return fp = StringIO(file) else: current.log.debug("Using existing file %s" % fileName) fp = open(fileName) # Unzip it current.log.debug("Unzipping %s" % layerName) import zipfile myfile = zipfile.ZipFile(fp) for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"): fileName = "%s.%s" % (layerName, ext) file = myfile.read(fileName) f = open(fileName, "w") f.write(file) f.close() myfile.close() # Use OGR to read Shapefile current.log.debug("Opening %s.shp" % layerName) ds = ogr.Open("%s.shp" % layerName) if ds is None: current.log.error("Open failed.\n") return lyr = ds.GetLayerByName(layerName) lyr.ResetReading() codeField = layer["codefield"] code2Field = layer["code2field"] for feat in lyr: code = feat.GetField(codeField) if not code: # Skip the entries which aren't countries continue if countries and code not in countries: # Skip the countries which we're not interested in continue geom = feat.GetGeometryRef() if geom is not None: if geom.GetGeometryType() == ogr.wkbPoint: pass else: query = (table.id == ttable.location_id) & \ (ttable.tag == "ISO2") & \ (ttable.value == code) wkt = geom.ExportToWkt() if wkt.startswith("LINESTRING"): gis_feature_type = 2 elif wkt.startswith("POLYGON"): gis_feature_type = 3 elif wkt.startswith("MULTIPOINT"): gis_feature_type = 4 elif wkt.startswith("MULTILINESTRING"): gis_feature_type = 5 elif wkt.startswith("MULTIPOLYGON"): gis_feature_type = 6 elif wkt.startswith("GEOMETRYCOLLECTION"): gis_feature_type = 7 code2 = feat.GetField(code2Field) #area = feat.GetField("Shape_Area") try: id = db(query).select(table.id, limitby = (0, 1)).first().id query = (table.id == id) db(query).update(gis_feature_type=gis_feature_type, wkt=wkt) ttable.insert(location_id = id, tag = "ISO3", value = code2) #ttable.insert(location_id = location_id, # tag = "area", # value = area) except db._adapter.driver.OperationalError: current.log.error(sys.exc_info()[1]) else: current.log.debug("No geometry\n") # Close the shapefile ds.Destroy() db.commit() # Revert back to the working directory as before. os.chdir(cwd) return
Python
def import_gadm2(ogr, level="L0", countries=[]): """ Import Admin Boundaries into the Locations table from GADMv2 - designed to be called from import_admin_areas() - assumes that basic prepop has been done, so that no new L0 records need to be created Args: ogr - The OGR Python module level - The OGR Python module countries - List of ISO2 countrycodes to download data for defaults to all countries TODO Complete this - not currently possible to get all data from the 1 file easily - no ISO2 - needs updating for gis_location_tag model - only the lowest available levels accessible - use GADMv1 for L0, L1, L2 & GADMv2 for specific lower? """ if level == "L0": codeField = "ISO2" # This field is used to uniquely identify the L0 for updates code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s elif level == "L1": #nameField = "NAME_1" codeField = "ID_1" # This field is used to uniquely identify the L1 for updates code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s #parent = "L0" #parentCode = "code2" elif level == "L2": #nameField = "NAME_2" codeField = "ID_2" # This field is used to uniquely identify the L2 for updates code2Field = "ID_1" # This field is used to uniquely identify the L1 for parenting the L2s #parent = "L1" #parentCode = "code" else: current.log.error("Level %s not supported!" % level) return db = current.db s3db = current.s3db table = s3db.gis_location url = "http://gadm.org/data2/gadm_v2_shp.zip" zipfile = "gadm_v2_shp.zip" shapefile = "gadm2" # Copy the current working directory to revert back to later old_working_directory = os.getcwd() # Create the working directory if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv2 as a cache TEMP = os.path.join(os.getcwd(), "temp") else: import tempfile TEMP = tempfile.gettempdir() tempPath = os.path.join(TEMP, "GADMv2") try: os.mkdir(tempPath) except OSError: # Folder already exists - reuse pass # Set the current working directory os.chdir(tempPath) layerName = shapefile # Check if file has already been downloaded fileName = zipfile if not os.path.isfile(fileName): # Download the file from gluon.tools import fetch current.log.debug("Downloading %s" % url) try: file = fetch(url) except URLError as exception: current.log.error(exception) return fp = StringIO(file) else: current.log.debug("Using existing file %s" % fileName) fp = open(fileName) # Unzip it current.log.debug("Unzipping %s" % layerName) import zipfile myfile = zipfile.ZipFile(fp) for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"): fileName = "%s.%s" % (layerName, ext) file = myfile.read(fileName) f = open(fileName, "w") f.write(file) f.close() myfile.close() # Use OGR to read Shapefile current.log.debug("Opening %s.shp" % layerName) ds = ogr.Open("%s.shp" % layerName) if ds is None: current.log.debug("Open failed.\n") return lyr = ds.GetLayerByName(layerName) lyr.ResetReading() for feat in lyr: code = feat.GetField(codeField) if not code: # Skip the entries which aren't countries continue if countries and code not in countries: # Skip the countries which we're not interested in continue geom = feat.GetGeometryRef() if geom is not None: if geom.GetGeometryType() == ogr.wkbPoint: pass else: ## FIXME ##query = (table.code == code) wkt = geom.ExportToWkt() if wkt.startswith("LINESTRING"): gis_feature_type = 2 elif wkt.startswith("POLYGON"): gis_feature_type = 3 elif wkt.startswith("MULTIPOINT"): gis_feature_type = 4 elif wkt.startswith("MULTILINESTRING"): gis_feature_type = 5 elif wkt.startswith("MULTIPOLYGON"): gis_feature_type = 6 elif wkt.startswith("GEOMETRYCOLLECTION"): gis_feature_type = 7 #code2 = feat.GetField(code2Field) #area = feat.GetField("Shape_Area") try: ## FIXME db(query).update(gis_feature_type=gis_feature_type, wkt=wkt) #code2=code2, #area=area except db._adapter.driver.OperationalError as exception: current.log.error(exception) else: current.log.debug("No geometry\n") # Close the shapefile ds.Destroy() db.commit() # Revert back to the working directory as before. os.chdir(old_working_directory) return
def import_gadm2(ogr, level="L0", countries=[]): """ Import Admin Boundaries into the Locations table from GADMv2 - designed to be called from import_admin_areas() - assumes that basic prepop has been done, so that no new L0 records need to be created Args: ogr - The OGR Python module level - The OGR Python module countries - List of ISO2 countrycodes to download data for defaults to all countries TODO Complete this - not currently possible to get all data from the 1 file easily - no ISO2 - needs updating for gis_location_tag model - only the lowest available levels accessible - use GADMv1 for L0, L1, L2 & GADMv2 for specific lower? """ if level == "L0": codeField = "ISO2" # This field is used to uniquely identify the L0 for updates code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s elif level == "L1": #nameField = "NAME_1" codeField = "ID_1" # This field is used to uniquely identify the L1 for updates code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s #parent = "L0" #parentCode = "code2" elif level == "L2": #nameField = "NAME_2" codeField = "ID_2" # This field is used to uniquely identify the L2 for updates code2Field = "ID_1" # This field is used to uniquely identify the L1 for parenting the L2s #parent = "L1" #parentCode = "code" else: current.log.error("Level %s not supported!" % level) return db = current.db s3db = current.s3db table = s3db.gis_location url = "http://gadm.org/data2/gadm_v2_shp.zip" zipfile = "gadm_v2_shp.zip" shapefile = "gadm2" # Copy the current working directory to revert back to later old_working_directory = os.getcwd() # Create the working directory if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv2 as a cache TEMP = os.path.join(os.getcwd(), "temp") else: import tempfile TEMP = tempfile.gettempdir() tempPath = os.path.join(TEMP, "GADMv2") try: os.mkdir(tempPath) except OSError: # Folder already exists - reuse pass # Set the current working directory os.chdir(tempPath) layerName = shapefile # Check if file has already been downloaded fileName = zipfile if not os.path.isfile(fileName): # Download the file from gluon.tools import fetch current.log.debug("Downloading %s" % url) try: file = fetch(url) except URLError as exception: current.log.error(exception) return fp = StringIO(file) else: current.log.debug("Using existing file %s" % fileName) fp = open(fileName) # Unzip it current.log.debug("Unzipping %s" % layerName) import zipfile myfile = zipfile.ZipFile(fp) for ext in ("dbf", "prj", "sbn", "sbx", "shp", "shx"): fileName = "%s.%s" % (layerName, ext) file = myfile.read(fileName) f = open(fileName, "w") f.write(file) f.close() myfile.close() # Use OGR to read Shapefile current.log.debug("Opening %s.shp" % layerName) ds = ogr.Open("%s.shp" % layerName) if ds is None: current.log.debug("Open failed.\n") return lyr = ds.GetLayerByName(layerName) lyr.ResetReading() for feat in lyr: code = feat.GetField(codeField) if not code: # Skip the entries which aren't countries continue if countries and code not in countries: # Skip the countries which we're not interested in continue geom = feat.GetGeometryRef() if geom is not None: if geom.GetGeometryType() == ogr.wkbPoint: pass else: ## FIXME ##query = (table.code == code) wkt = geom.ExportToWkt() if wkt.startswith("LINESTRING"): gis_feature_type = 2 elif wkt.startswith("POLYGON"): gis_feature_type = 3 elif wkt.startswith("MULTIPOINT"): gis_feature_type = 4 elif wkt.startswith("MULTILINESTRING"): gis_feature_type = 5 elif wkt.startswith("MULTIPOLYGON"): gis_feature_type = 6 elif wkt.startswith("GEOMETRYCOLLECTION"): gis_feature_type = 7 #code2 = feat.GetField(code2Field) #area = feat.GetField("Shape_Area") try: ## FIXME db(query).update(gis_feature_type=gis_feature_type, wkt=wkt) #code2=code2, #area=area except db._adapter.driver.OperationalError as exception: current.log.error(exception) else: current.log.debug("No geometry\n") # Close the shapefile ds.Destroy() db.commit() # Revert back to the working directory as before. os.chdir(old_working_directory) return
Python
def import_geonames(self, country, level=None): """ Import Locations from the Geonames database Args: country: the 2-letter country code level: the ADM level to import Designed to be run from the CLI Levels should be imported sequentially. It is assumed that L0 exists in the DB already L1-L3 may have been imported from Shapefiles with Polygon info Geonames can then be used to populate the lower levels of hierarchy """ import codecs from shapely.geometry import point from shapely.errors import ReadingError from shapely.wkt import loads as wkt_loads try: # Enable C-based speedups available from 1.2.10+ from shapely import speedups speedups.enable() except: current.log.info("S3GIS", "Upgrade Shapely for Performance enhancements") db = current.db s3db = current.s3db #cache = s3db.cache request = current.request #settings = current.deployment_settings table = s3db.gis_location ttable = s3db.gis_location_tag url = "http://download.geonames.org/export/dump/" + country + ".zip" cachepath = os.path.join(request.folder, "cache") filename = country + ".txt" filepath = os.path.join(cachepath, filename) if os.access(filepath, os.R_OK): cached = True else: cached = False if not os.access(cachepath, os.W_OK): current.log.error("Folder not writable", cachepath) return if not cached: # Download File from gluon.tools import fetch try: f = fetch(url) except HTTPError: e = sys.exc_info()[1] current.log.error("HTTP Error", e) return except URLError: e = sys.exc_info()[1] current.log.error("URL Error", e) return # Unzip File if f[:2] == "PK": # Unzip fp = StringIO(f) import zipfile myfile = zipfile.ZipFile(fp) try: # Python 2.6+ only :/ # For now, 2.5 users need to download/unzip manually to cache folder myfile.extract(filename, cachepath) myfile.close() except IOError: current.log.error("Zipfile contents don't seem correct!") myfile.close() return f = codecs.open(filepath, encoding="utf-8") # Downloaded file is worth keeping #os.remove(filepath) if level == "L1": fc = "ADM1" parent_level = "L0" elif level == "L2": fc = "ADM2" parent_level = "L1" elif level == "L3": fc = "ADM3" parent_level = "L2" elif level == "L4": fc = "ADM4" parent_level = "L3" else: # 5 levels of hierarchy or 4? # @ToDo make more extensible still #gis_location_hierarchy = self.get_location_hierarchy() try: #label = gis_location_hierarchy["L5"] level = "L5" parent_level = "L4" except: # ADM4 data in Geonames isn't always good (e.g. PK bad) level = "L4" parent_level = "L3" finally: fc = "PPL" deleted = (table.deleted == False) query = deleted & (table.level == parent_level) # Do the DB query once (outside loop) all_parents = db(query).select(table.wkt, table.lon_min, table.lon_max, table.lat_min, table.lat_max, table.id) if not all_parents: # No locations in the parent level found # - use the one higher instead parent_level = "L" + str(int(parent_level[1:]) + 1) query = deleted & (table.level == parent_level) all_parents = db(query).select(table.wkt, table.lon_min, table.lon_max, table.lat_min, table.lat_max, table.id) # Parse File current_row = 0 def in_bbox(row, bbox): return (row.lon_min < bbox[0]) & \ (row.lon_max > bbox[1]) & \ (row.lat_min < bbox[2]) & \ (row.lat_max > bbox[3]) for line in f: current_row += 1 # Format of file: http://download.geonames.org/export/dump/readme.txt # - tab-limited values, columns: # geonameid : integer id of record in geonames database # name : name of geographical point (utf8) # asciiname : name of geographical point in plain ascii characters # alternatenames : alternatenames, comma separated # latitude : latitude in decimal degrees (wgs84) # longitude : longitude in decimal degrees (wgs84) # feature class : see http://www.geonames.org/export/codes.html # feature code : see http://www.geonames.org/export/codes.html # country code : ISO-3166 2-letter country code # cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code # admin1 code : fipscode (subject to change to iso code) # admin2 code : code for the second administrative division # admin3 code : code for third level administrative division # admin4 code : code for fourth level administrative division # population : bigint # elevation : in meters # dem : digital elevation model, srtm3 or gtopo30 # timezone : the iana timezone id # modification date : date of last modification in yyyy-MM-dd format # parsed = line.split("\t") geonameid = parsed[0] name = parsed[1] lat = parsed[4] lon = parsed[5] feature_code = parsed[7] if feature_code == fc: # Add WKT lat = float(lat) lon = float(lon) wkt = self.latlon_to_wkt(lat, lon) shape = point.Point(lon, lat) # Add Bounds lon_min = lon_max = lon lat_min = lat_max = lat # Locate Parent parent = "" # 1st check for Parents whose bounds include this location (faster) for row in all_parents.find(lambda row: in_bbox(row, [lon_min, lon_max, lat_min, lat_max])): # Search within this subset with a full geometry check # Uses Shapely. # @ToDo provide option to use PostGIS/Spatialite try: parent_shape = wkt_loads(row.wkt) if parent_shape.intersects(shape): parent = row.id # Should be just a single parent break except ReadingError: current.log.error("Error reading wkt of location with id", row.id) # Add entry to database new_id = table.insert(name=name, level=level, parent=parent, lat=lat, lon=lon, wkt=wkt, lon_min=lon_min, lon_max=lon_max, lat_min=lat_min, lat_max=lat_max) ttable.insert(location_id=new_id, tag="geonames", value=geonameid) else: continue current.log.debug("All done!") return
def import_geonames(self, country, level=None): """ Import Locations from the Geonames database Args: country: the 2-letter country code level: the ADM level to import Designed to be run from the CLI Levels should be imported sequentially. It is assumed that L0 exists in the DB already L1-L3 may have been imported from Shapefiles with Polygon info Geonames can then be used to populate the lower levels of hierarchy """ import codecs from shapely.geometry import point from shapely.errors import ReadingError from shapely.wkt import loads as wkt_loads try: # Enable C-based speedups available from 1.2.10+ from shapely import speedups speedups.enable() except: current.log.info("S3GIS", "Upgrade Shapely for Performance enhancements") db = current.db s3db = current.s3db #cache = s3db.cache request = current.request #settings = current.deployment_settings table = s3db.gis_location ttable = s3db.gis_location_tag url = "http://download.geonames.org/export/dump/" + country + ".zip" cachepath = os.path.join(request.folder, "cache") filename = country + ".txt" filepath = os.path.join(cachepath, filename) if os.access(filepath, os.R_OK): cached = True else: cached = False if not os.access(cachepath, os.W_OK): current.log.error("Folder not writable", cachepath) return if not cached: # Download File from gluon.tools import fetch try: f = fetch(url) except HTTPError: e = sys.exc_info()[1] current.log.error("HTTP Error", e) return except URLError: e = sys.exc_info()[1] current.log.error("URL Error", e) return # Unzip File if f[:2] == "PK": # Unzip fp = StringIO(f) import zipfile myfile = zipfile.ZipFile(fp) try: # Python 2.6+ only :/ # For now, 2.5 users need to download/unzip manually to cache folder myfile.extract(filename, cachepath) myfile.close() except IOError: current.log.error("Zipfile contents don't seem correct!") myfile.close() return f = codecs.open(filepath, encoding="utf-8") # Downloaded file is worth keeping #os.remove(filepath) if level == "L1": fc = "ADM1" parent_level = "L0" elif level == "L2": fc = "ADM2" parent_level = "L1" elif level == "L3": fc = "ADM3" parent_level = "L2" elif level == "L4": fc = "ADM4" parent_level = "L3" else: # 5 levels of hierarchy or 4? # @ToDo make more extensible still #gis_location_hierarchy = self.get_location_hierarchy() try: #label = gis_location_hierarchy["L5"] level = "L5" parent_level = "L4" except: # ADM4 data in Geonames isn't always good (e.g. PK bad) level = "L4" parent_level = "L3" finally: fc = "PPL" deleted = (table.deleted == False) query = deleted & (table.level == parent_level) # Do the DB query once (outside loop) all_parents = db(query).select(table.wkt, table.lon_min, table.lon_max, table.lat_min, table.lat_max, table.id) if not all_parents: # No locations in the parent level found # - use the one higher instead parent_level = "L" + str(int(parent_level[1:]) + 1) query = deleted & (table.level == parent_level) all_parents = db(query).select(table.wkt, table.lon_min, table.lon_max, table.lat_min, table.lat_max, table.id) # Parse File current_row = 0 def in_bbox(row, bbox): return (row.lon_min < bbox[0]) & \ (row.lon_max > bbox[1]) & \ (row.lat_min < bbox[2]) & \ (row.lat_max > bbox[3]) for line in f: current_row += 1 # Format of file: http://download.geonames.org/export/dump/readme.txt # - tab-limited values, columns: # geonameid : integer id of record in geonames database # name : name of geographical point (utf8) # asciiname : name of geographical point in plain ascii characters # alternatenames : alternatenames, comma separated # latitude : latitude in decimal degrees (wgs84) # longitude : longitude in decimal degrees (wgs84) # feature class : see http://www.geonames.org/export/codes.html # feature code : see http://www.geonames.org/export/codes.html # country code : ISO-3166 2-letter country code # cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code # admin1 code : fipscode (subject to change to iso code) # admin2 code : code for the second administrative division # admin3 code : code for third level administrative division # admin4 code : code for fourth level administrative division # population : bigint # elevation : in meters # dem : digital elevation model, srtm3 or gtopo30 # timezone : the iana timezone id # modification date : date of last modification in yyyy-MM-dd format # parsed = line.split("\t") geonameid = parsed[0] name = parsed[1] lat = parsed[4] lon = parsed[5] feature_code = parsed[7] if feature_code == fc: # Add WKT lat = float(lat) lon = float(lon) wkt = self.latlon_to_wkt(lat, lon) shape = point.Point(lon, lat) # Add Bounds lon_min = lon_max = lon lat_min = lat_max = lat # Locate Parent parent = "" # 1st check for Parents whose bounds include this location (faster) for row in all_parents.find(lambda row: in_bbox(row, [lon_min, lon_max, lat_min, lat_max])): # Search within this subset with a full geometry check # Uses Shapely. # @ToDo provide option to use PostGIS/Spatialite try: parent_shape = wkt_loads(row.wkt) if parent_shape.intersects(shape): parent = row.id # Should be just a single parent break except ReadingError: current.log.error("Error reading wkt of location with id", row.id) # Add entry to database new_id = table.insert(name=name, level=level, parent=parent, lat=lat, lon=lon, wkt=wkt, lon_min=lon_min, lon_max=lon_max, lat_min=lat_min, lat_max=lat_max) ttable.insert(location_id=new_id, tag="geonames", value=geonameid) else: continue current.log.debug("All done!") return
Python
def parse_location(wkt, lon=None, lat=None): """ Parses a location from wkt, returning wkt, lat, lon, bounding box and type. For points, wkt may be None if lat and lon are provided; wkt will be generated. For lines and polygons, the lat, lon returned represent the shape's centroid. Centroid and bounding box will be None if Shapely is not available. """ if not wkt: if not lon is not None and lat is not None: raise RuntimeError("Need wkt or lon+lat to parse a location") wkt = "POINT (%f %f)" % (lon, lat) geom_type = GEOM_TYPES["point"] bbox = (lon, lat, lon, lat) else: try: from shapely.wkt import loads as wkt_loads SHAPELY = True except: SHAPELY = False if SHAPELY: shape = wkt_loads(wkt) centroid = shape.centroid lat = centroid.y lon = centroid.x geom_type = GEOM_TYPES[shape.type.lower()] bbox = shape.bounds else: lat = None lon = None geom_type = GEOM_TYPES[wkt.split("(")[0].lower()] bbox = None res = {"wkt": wkt, "lat": lat, "lon": lon, "gis_feature_type": geom_type} if bbox: res["lon_min"], res["lat_min"], res["lon_max"], res["lat_max"] = bbox return res
def parse_location(wkt, lon=None, lat=None): """ Parses a location from wkt, returning wkt, lat, lon, bounding box and type. For points, wkt may be None if lat and lon are provided; wkt will be generated. For lines and polygons, the lat, lon returned represent the shape's centroid. Centroid and bounding box will be None if Shapely is not available. """ if not wkt: if not lon is not None and lat is not None: raise RuntimeError("Need wkt or lon+lat to parse a location") wkt = "POINT (%f %f)" % (lon, lat) geom_type = GEOM_TYPES["point"] bbox = (lon, lat, lon, lat) else: try: from shapely.wkt import loads as wkt_loads SHAPELY = True except: SHAPELY = False if SHAPELY: shape = wkt_loads(wkt) centroid = shape.centroid lat = centroid.y lon = centroid.x geom_type = GEOM_TYPES[shape.type.lower()] bbox = shape.bounds else: lat = None lon = None geom_type = GEOM_TYPES[wkt.split("(")[0].lower()] bbox = None res = {"wkt": wkt, "lat": lat, "lon": lon, "gis_feature_type": geom_type} if bbox: res["lon_min"], res["lat_min"], res["lon_max"], res["lat_max"] = bbox return res
Python
def wkt_centroid(form): """ OnValidation callback: If a WKT is defined: validate the format, calculate the LonLat of the Centroid, and set bounds Else if a LonLat is defined: calculate the WKT for the Point. """ form_vars = form.vars if form_vars.get("gis_feature_type", None) == "1": # Point lat = form_vars.get("lat", None) lon = form_vars.get("lon", None) if (lon is None and lat is None) or \ (lon == "" and lat == ""): # No Geometry available # Don't clobber existing records (e.g. in Prepop) #form_vars.gis_feature_type = "0" # Cannot create WKT, so Skip return elif lat is None or lat == "": # Can't just have lon without lat form.errors["lat"] = current.messages.lat_empty elif lon is None or lon == "": form.errors["lon"] = current.messages.lon_empty else: form_vars.wkt = "POINT (%(lon)s %(lat)s)" % form_vars radius = form_vars.get("radius", None) if radius: bbox = GIS.get_bounds_from_radius(lat, lon, radius) form_vars.lat_min = bbox["lat_min"] form_vars.lon_min = bbox["lon_min"] form_vars.lat_max = bbox["lat_max"] form_vars.lon_max = bbox["lon_max"] else: if "lon_min" not in form_vars or form_vars.lon_min is None: form_vars.lon_min = lon if "lon_max" not in form_vars or form_vars.lon_max is None: form_vars.lon_max = lon if "lat_min" not in form_vars or form_vars.lat_min is None: form_vars.lat_min = lat if "lat_max" not in form_vars or form_vars.lat_max is None: form_vars.lat_max = lat else: wkt = form_vars.get("wkt", None) if wkt: if wkt[0] == "{": # This is a GeoJSON geometry from shapely.geometry import shape as shape_loads try: js = json.load(wkt) shape = shape_loads(js) except: form.errors["wkt"] = current.messages.invalid_wkt return else: form_vars.wkt = shape.wkt else: # Assume WKT warning = None from shapely.wkt import loads as wkt_loads try: shape = wkt_loads(wkt) except: # Perhaps this is really a LINESTRING (e.g. OSM import of an unclosed Way, some CAP areas) linestring = "LINESTRING%s" % wkt[8:-1] try: shape = wkt_loads(linestring) except: form.errors["wkt"] = current.messages.invalid_wkt return else: warning = s3_str(current.T("Source WKT has been converted from POLYGON to LINESTRING")) current.log.warning(warning) form_vars.wkt = linestring else: if shape.wkt != form_vars.wkt: # If this is too heavy a check for some deployments, add a deployment_setting to disable the check & just do it silently # Use Shapely to clean up the defective WKT (e.g. trailing chars) warning = s3_str(current.T("Source WKT has been cleaned by Shapely")) form_vars.wkt = shape.wkt if shape.has_z: # Shapely export of WKT is 2D only if warning: warning = "%s, %s" % s3_str(current.T("Only 2D geometry stored as PostGIS cannot handle 3D geometries")) else: warning = s3_str(current.T("Only 2D geometry stored as PostGIS cannot handle 3D geometries")) if warning: current.session.warning = warning gis_feature_type = shape.type if gis_feature_type == "Point": form_vars.gis_feature_type = 1 elif gis_feature_type == "LineString": form_vars.gis_feature_type = 2 elif gis_feature_type == "Polygon": form_vars.gis_feature_type = 3 elif gis_feature_type == "MultiPoint": form_vars.gis_feature_type = 4 elif gis_feature_type == "MultiLineString": form_vars.gis_feature_type = 5 elif gis_feature_type == "MultiPolygon": form_vars.gis_feature_type = 6 elif gis_feature_type == "GeometryCollection": form_vars.gis_feature_type = 7 try: centroid_point = shape.centroid form_vars.lon = centroid_point.x form_vars.lat = centroid_point.y bounds = shape.bounds if gis_feature_type != "Point" or \ "lon_min" not in form_vars or form_vars.lon_min is None or \ form_vars.lon_min == form_vars.lon_max: # Update bounds unless we have a 'Point' which has already got wider Bounds specified (such as a country) form_vars.lon_min = bounds[0] form_vars.lat_min = bounds[1] form_vars.lon_max = bounds[2] form_vars.lat_max = bounds[3] except: form.errors.gis_feature_type = current.messages.centroid_error else: lat = form_vars.get("lat", None) lon = form_vars.get("lon", None) if (lon is None and lat is None) or \ (lon == "" and lat == ""): # No Geometry available # Don't clobber existing records (e.g. in Prepop) #form_vars.gis_feature_type = "0" # Cannot create WKT, so Skip return else: # Point form_vars.gis_feature_type = "1" if lat is None or lat == "": form.errors["lat"] = current.messages.lat_empty elif lon is None or lon == "": form.errors["lon"] = current.messages.lon_empty else: form_vars.wkt = "POINT (%(lon)s %(lat)s)" % form_vars if "lon_min" not in form_vars or form_vars.lon_min is None: form_vars.lon_min = lon if "lon_max" not in form_vars or form_vars.lon_max is None: form_vars.lon_max = lon if "lat_min" not in form_vars or form_vars.lat_min is None: form_vars.lat_min = lat if "lat_max" not in form_vars or form_vars.lat_max is None: form_vars.lat_max = lat if current.deployment_settings.get_gis_spatialdb(): # Also populate the spatial field form_vars.the_geom = form_vars.wkt
def wkt_centroid(form): """ OnValidation callback: If a WKT is defined: validate the format, calculate the LonLat of the Centroid, and set bounds Else if a LonLat is defined: calculate the WKT for the Point. """ form_vars = form.vars if form_vars.get("gis_feature_type", None) == "1": # Point lat = form_vars.get("lat", None) lon = form_vars.get("lon", None) if (lon is None and lat is None) or \ (lon == "" and lat == ""): # No Geometry available # Don't clobber existing records (e.g. in Prepop) #form_vars.gis_feature_type = "0" # Cannot create WKT, so Skip return elif lat is None or lat == "": # Can't just have lon without lat form.errors["lat"] = current.messages.lat_empty elif lon is None or lon == "": form.errors["lon"] = current.messages.lon_empty else: form_vars.wkt = "POINT (%(lon)s %(lat)s)" % form_vars radius = form_vars.get("radius", None) if radius: bbox = GIS.get_bounds_from_radius(lat, lon, radius) form_vars.lat_min = bbox["lat_min"] form_vars.lon_min = bbox["lon_min"] form_vars.lat_max = bbox["lat_max"] form_vars.lon_max = bbox["lon_max"] else: if "lon_min" not in form_vars or form_vars.lon_min is None: form_vars.lon_min = lon if "lon_max" not in form_vars or form_vars.lon_max is None: form_vars.lon_max = lon if "lat_min" not in form_vars or form_vars.lat_min is None: form_vars.lat_min = lat if "lat_max" not in form_vars or form_vars.lat_max is None: form_vars.lat_max = lat else: wkt = form_vars.get("wkt", None) if wkt: if wkt[0] == "{": # This is a GeoJSON geometry from shapely.geometry import shape as shape_loads try: js = json.load(wkt) shape = shape_loads(js) except: form.errors["wkt"] = current.messages.invalid_wkt return else: form_vars.wkt = shape.wkt else: # Assume WKT warning = None from shapely.wkt import loads as wkt_loads try: shape = wkt_loads(wkt) except: # Perhaps this is really a LINESTRING (e.g. OSM import of an unclosed Way, some CAP areas) linestring = "LINESTRING%s" % wkt[8:-1] try: shape = wkt_loads(linestring) except: form.errors["wkt"] = current.messages.invalid_wkt return else: warning = s3_str(current.T("Source WKT has been converted from POLYGON to LINESTRING")) current.log.warning(warning) form_vars.wkt = linestring else: if shape.wkt != form_vars.wkt: # If this is too heavy a check for some deployments, add a deployment_setting to disable the check & just do it silently # Use Shapely to clean up the defective WKT (e.g. trailing chars) warning = s3_str(current.T("Source WKT has been cleaned by Shapely")) form_vars.wkt = shape.wkt if shape.has_z: # Shapely export of WKT is 2D only if warning: warning = "%s, %s" % s3_str(current.T("Only 2D geometry stored as PostGIS cannot handle 3D geometries")) else: warning = s3_str(current.T("Only 2D geometry stored as PostGIS cannot handle 3D geometries")) if warning: current.session.warning = warning gis_feature_type = shape.type if gis_feature_type == "Point": form_vars.gis_feature_type = 1 elif gis_feature_type == "LineString": form_vars.gis_feature_type = 2 elif gis_feature_type == "Polygon": form_vars.gis_feature_type = 3 elif gis_feature_type == "MultiPoint": form_vars.gis_feature_type = 4 elif gis_feature_type == "MultiLineString": form_vars.gis_feature_type = 5 elif gis_feature_type == "MultiPolygon": form_vars.gis_feature_type = 6 elif gis_feature_type == "GeometryCollection": form_vars.gis_feature_type = 7 try: centroid_point = shape.centroid form_vars.lon = centroid_point.x form_vars.lat = centroid_point.y bounds = shape.bounds if gis_feature_type != "Point" or \ "lon_min" not in form_vars or form_vars.lon_min is None or \ form_vars.lon_min == form_vars.lon_max: # Update bounds unless we have a 'Point' which has already got wider Bounds specified (such as a country) form_vars.lon_min = bounds[0] form_vars.lat_min = bounds[1] form_vars.lon_max = bounds[2] form_vars.lat_max = bounds[3] except: form.errors.gis_feature_type = current.messages.centroid_error else: lat = form_vars.get("lat", None) lon = form_vars.get("lon", None) if (lon is None and lat is None) or \ (lon == "" and lat == ""): # No Geometry available # Don't clobber existing records (e.g. in Prepop) #form_vars.gis_feature_type = "0" # Cannot create WKT, so Skip return else: # Point form_vars.gis_feature_type = "1" if lat is None or lat == "": form.errors["lat"] = current.messages.lat_empty elif lon is None or lon == "": form.errors["lon"] = current.messages.lon_empty else: form_vars.wkt = "POINT (%(lon)s %(lat)s)" % form_vars if "lon_min" not in form_vars or form_vars.lon_min is None: form_vars.lon_min = lon if "lon_max" not in form_vars or form_vars.lon_max is None: form_vars.lon_max = lon if "lat_min" not in form_vars or form_vars.lat_min is None: form_vars.lat_min = lat if "lat_max" not in form_vars or form_vars.lat_max is None: form_vars.lat_max = lat if current.deployment_settings.get_gis_spatialdb(): # Also populate the spatial field form_vars.the_geom = form_vars.wkt
Python
def testGetRoles(self): """ Test role lookup for a user """ auth = current.auth UUID = "TESTAUTOCREATEDROLE" role_id = auth.s3_create_role(UUID, uid=UUID) assertTrue = self.assertTrue assertFalse = self.assertFalse try: auth.s3_impersonate("[email protected]") user_id = auth.user.id auth.s3_assign_role(user_id, role_id, for_pe=None) roles = auth.s3_get_roles(user_id) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=None) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=0) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=1) assertFalse(role_id in roles) auth.s3_remove_role(user_id, role_id, for_pe=None) auth.s3_assign_role(user_id, role_id, for_pe=0) roles = auth.s3_get_roles(user_id) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=None) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=0) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=1) assertFalse(role_id in roles) auth.s3_remove_role(user_id, role_id, for_pe=0) auth.s3_assign_role(user_id, role_id, for_pe=1) roles = auth.s3_get_roles(user_id) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=None) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=0) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=1) assertTrue(role_id in roles) auth.s3_remove_role(user_id, role_id, for_pe=1) finally: auth.s3_delete_role(UUID) auth.s3_impersonate(None)
def testGetRoles(self): """ Test role lookup for a user """ auth = current.auth UUID = "TESTAUTOCREATEDROLE" role_id = auth.s3_create_role(UUID, uid=UUID) assertTrue = self.assertTrue assertFalse = self.assertFalse try: auth.s3_impersonate("[email protected]") user_id = auth.user.id auth.s3_assign_role(user_id, role_id, for_pe=None) roles = auth.s3_get_roles(user_id) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=None) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=0) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=1) assertFalse(role_id in roles) auth.s3_remove_role(user_id, role_id, for_pe=None) auth.s3_assign_role(user_id, role_id, for_pe=0) roles = auth.s3_get_roles(user_id) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=None) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=0) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=1) assertFalse(role_id in roles) auth.s3_remove_role(user_id, role_id, for_pe=0) auth.s3_assign_role(user_id, role_id, for_pe=1) roles = auth.s3_get_roles(user_id) assertTrue(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=None) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=0) assertFalse(role_id in roles) roles = auth.s3_get_roles(user_id, for_pe=1) assertTrue(role_id in roles) auth.s3_remove_role(user_id, role_id, for_pe=1) finally: auth.s3_delete_role(UUID) auth.s3_impersonate(None)
Python
def testOwnershipOrganisationOwnedRecord(self): """ Test group-ownership for an entity-owned record """ auth = current.auth s3_impersonate = auth.s3_impersonate is_owner = auth.permission.is_owner assertTrue = self.assertTrue assertFalse = self.assertFalse auth.s3_clear_session_ownership() table = self.table record_id = self.record_id # Assume we have at least one org org = current.s3db.pr_get_pe_id("org_organisation", 1) role = self.role_id # Make test role owner of the record and add to org's realm user_id = auth.s3_get_user_id("[email protected]") current.db(table.id == record_id).update(owned_by_user=user_id, owned_by_group=role, realm_entity=org) # Admin owns all records s3_impersonate("[email protected]") assertTrue(is_owner(table, record_id)) # Normal user does not own the record s3_impersonate("[email protected]") user_id = auth.user.id assertFalse(is_owner(table, record_id)) # ...unless they have the role for this org auth.s3_assign_role(user_id, role, for_pe=org) assertTrue(is_owner(table, record_id)) auth.s3_remove_role(user_id, role, for_pe=[]) assertFalse(is_owner(table, record_id)) # ....or have the role without limitation (any org) auth.s3_assign_role(user_id, role, for_pe=0) assertTrue(is_owner(table, record_id)) auth.s3_remove_role(user_id, role, for_pe=[]) assertFalse(is_owner(table, record_id)) # Unauthenticated does not own this record s3_impersonate(None) assertFalse(is_owner(table, record_id)) # ...unless the session owns the record auth.s3_make_session_owner(table, record_id) assertTrue(is_owner(table, record_id))
def testOwnershipOrganisationOwnedRecord(self): """ Test group-ownership for an entity-owned record """ auth = current.auth s3_impersonate = auth.s3_impersonate is_owner = auth.permission.is_owner assertTrue = self.assertTrue assertFalse = self.assertFalse auth.s3_clear_session_ownership() table = self.table record_id = self.record_id # Assume we have at least one org org = current.s3db.pr_get_pe_id("org_organisation", 1) role = self.role_id # Make test role owner of the record and add to org's realm user_id = auth.s3_get_user_id("[email protected]") current.db(table.id == record_id).update(owned_by_user=user_id, owned_by_group=role, realm_entity=org) # Admin owns all records s3_impersonate("[email protected]") assertTrue(is_owner(table, record_id)) # Normal user does not own the record s3_impersonate("[email protected]") user_id = auth.user.id assertFalse(is_owner(table, record_id)) # ...unless they have the role for this org auth.s3_assign_role(user_id, role, for_pe=org) assertTrue(is_owner(table, record_id)) auth.s3_remove_role(user_id, role, for_pe=[]) assertFalse(is_owner(table, record_id)) # ....or have the role without limitation (any org) auth.s3_assign_role(user_id, role, for_pe=0) assertTrue(is_owner(table, record_id)) auth.s3_remove_role(user_id, role, for_pe=[]) assertFalse(is_owner(table, record_id)) # Unauthenticated does not own this record s3_impersonate(None) assertFalse(is_owner(table, record_id)) # ...unless the session owns the record auth.s3_make_session_owner(table, record_id) assertTrue(is_owner(table, record_id))
Python
def testOwnershipOverride(self): """ Test override of owners in is_owner """ auth = current.auth is_owner = auth.permission.is_owner assertTrue = self.assertTrue assertFalse = self.assertFalse auth.s3_clear_session_ownership() table = self.table record_id = self.record_id org = current.s3db.pr_get_pe_id("org_organisation", 1) role = self.role_id user_id = auth.s3_get_user_id("[email protected]") current.db(table.id == record_id).update(realm_entity=org, owned_by_group=role, owned_by_user=user_id) # Normal user does not own the record auth.s3_impersonate("[email protected]") assertFalse(auth.permission.is_owner(table, record_id)) # ...unless we override the record's owner stamp owners_override = (None, None, None) assertTrue(is_owner(table, record_id, owners=owners_override))
def testOwnershipOverride(self): """ Test override of owners in is_owner """ auth = current.auth is_owner = auth.permission.is_owner assertTrue = self.assertTrue assertFalse = self.assertFalse auth.s3_clear_session_ownership() table = self.table record_id = self.record_id org = current.s3db.pr_get_pe_id("org_organisation", 1) role = self.role_id user_id = auth.s3_get_user_id("[email protected]") current.db(table.id == record_id).update(realm_entity=org, owned_by_group=role, owned_by_user=user_id) # Normal user does not own the record auth.s3_impersonate("[email protected]") assertFalse(auth.permission.is_owner(table, record_id)) # ...unless we override the record's owner stamp owners_override = (None, None, None) assertTrue(is_owner(table, record_id, owners=owners_override))
Python
def testTableSpecificRealmEntity(self): """ Test table-specific realm_entity hook """ s3db = current.s3db auth = current.auth otable = s3db.org_organisation record = otable[self.org_id] tname = "org_organisation" s3db.configure(tname, realm_entity = self.realm_entity) auth.s3_set_record_owner(otable, record, force_update=True) self.assertEqual(self.owned_record, (tname, record.id))
def testTableSpecificRealmEntity(self): """ Test table-specific realm_entity hook """ s3db = current.s3db auth = current.auth otable = s3db.org_organisation record = otable[self.org_id] tname = "org_organisation" s3db.configure(tname, realm_entity = self.realm_entity) auth.s3_set_record_owner(otable, record, force_update=True) self.assertEqual(self.owned_record, (tname, record.id))
Python
def testSetRealmEntityWithQueryAndOverride(self): """ Test that realm entity can be overridden by call """ s3db = current.s3db auth = current.auth settings = current.deployment_settings otable = s3db.org_organisation settings.auth.realm_entity = self.realm_entity assertEqual = self.assertEqual query = (otable.id == self.org_id) auth.set_realm_entity(otable, query, entity=4, force_update=True) assertEqual(self.owned_record, None) record = otable[self.org_id] assertEqual(record.realm_entity, 4)
def testSetRealmEntityWithQueryAndOverride(self): """ Test that realm entity can be overridden by call """ s3db = current.s3db auth = current.auth settings = current.deployment_settings otable = s3db.org_organisation settings.auth.realm_entity = self.realm_entity assertEqual = self.assertEqual query = (otable.id == self.org_id) auth.set_realm_entity(otable, query, entity=4, force_update=True) assertEqual(self.owned_record, None) record = otable[self.org_id] assertEqual(record.realm_entity, 4)
Python
def testSetRealmEntityWithQueryAndOverrideNone(self): """ Test that realm entity can be set to None """ s3db = current.s3db auth = current.auth settings = current.deployment_settings otable = s3db.org_organisation settings.auth.realm_entity = self.realm_entity assertEqual = self.assertEqual query = (otable.id == self.org_id) auth.set_realm_entity(otable, query, entity=None, force_update=True) assertEqual(self.owned_record, None) record = otable[self.org_id] assertEqual(record.realm_entity, None)
def testSetRealmEntityWithQueryAndOverrideNone(self): """ Test that realm entity can be set to None """ s3db = current.s3db auth = current.auth settings = current.deployment_settings otable = s3db.org_organisation settings.auth.realm_entity = self.realm_entity assertEqual = self.assertEqual query = (otable.id == self.org_id) auth.set_realm_entity(otable, query, entity=None, force_update=True) assertEqual(self.owned_record, None) record = otable[self.org_id] assertEqual(record.realm_entity, None)
Python
def testUpdateSharedFields(self): """ Test that realm entity gets set in super-entity """ s3db = current.s3db auth = current.auth ftable = s3db.org_office stable = s3db.org_site assertEqual = self.assertEqual row = ftable[self.office_id] row.update_record(realm_entity=row["pe_id"]) site_id = row["site_id"] auth.update_shared_fields(ftable, self.office_id, realm_entity=None) site = stable[site_id] assertEqual(site["realm_entity"], None) auth.update_shared_fields(ftable, self.office_id, realm_entity=row["realm_entity"]) site = stable[site_id] assertEqual(site["realm_entity"], row["realm_entity"])
def testUpdateSharedFields(self): """ Test that realm entity gets set in super-entity """ s3db = current.s3db auth = current.auth ftable = s3db.org_office stable = s3db.org_site assertEqual = self.assertEqual row = ftable[self.office_id] row.update_record(realm_entity=row["pe_id"]) site_id = row["site_id"] auth.update_shared_fields(ftable, self.office_id, realm_entity=None) site = stable[site_id] assertEqual(site["realm_entity"], None) auth.update_shared_fields(ftable, self.office_id, realm_entity=row["realm_entity"]) site = stable[site_id] assertEqual(site["realm_entity"], row["realm_entity"])
Python
def defaults(): """ Returns safe defaults in case the model has been deactivated. """ return {"cr_shelter_id": S3ReusableField.dummy("shelter_id"), }
def defaults(): """ Returns safe defaults in case the model has been deactivated. """ return {"cr_shelter_id": S3ReusableField.dummy("shelter_id"), }
Python
def shelter_onaccept(form): """ Onaccept of shelter - update PE hierarchy - update available capacity and create status entry - record org_site_event """ s3db = current.s3db shelter_id = get_form_record_id(form) # Update PE hierarchy s3db.org_update_affiliations("cr_shelter", form.vars) # Update population, available capacity and create status entry Shelter(shelter_id).update_population() if not current.response.s3.bulk: # Track site events stable = s3db.cr_shelter shelter = current.db(stable.id == shelter_id).select(stable.site_id, stable.status, stable.obsolete, limitby = (0, 1) ).first() record = form.record if record: # Update form obsolete = shelter.obsolete if obsolete != record.obsolete: s3db.org_site_event.insert(site_id = shelter.site_id, event = 4, # Obsolete Change comment = obsolete, ) status = shelter.status if status != record.status: s3db.org_site_event.insert(site_id = shelter.site_id, event = 1, # Status Change status = status, ) else: # Create form s3db.org_site_event.insert(site_id = shelter.site_id, event = 1, # Status Change status = shelter.status, )
def shelter_onaccept(form): """ Onaccept of shelter - update PE hierarchy - update available capacity and create status entry - record org_site_event """ s3db = current.s3db shelter_id = get_form_record_id(form) # Update PE hierarchy s3db.org_update_affiliations("cr_shelter", form.vars) # Update population, available capacity and create status entry Shelter(shelter_id).update_population() if not current.response.s3.bulk: # Track site events stable = s3db.cr_shelter shelter = current.db(stable.id == shelter_id).select(stable.site_id, stable.status, stable.obsolete, limitby = (0, 1) ).first() record = form.record if record: # Update form obsolete = shelter.obsolete if obsolete != record.obsolete: s3db.org_site_event.insert(site_id = shelter.site_id, event = 4, # Obsolete Change comment = obsolete, ) status = shelter.status if status != record.status: s3db.org_site_event.insert(site_id = shelter.site_id, event = 1, # Status Change status = status, ) else: # Create form s3db.org_site_event.insert(site_id = shelter.site_id, event = 1, # Status Change status = shelter.status, )
Python
def shelter_population_onaccept(form): """ Onaccept of shelter population: - updates the total population (if separate per age group) - updates shelter population totals """ record_id = get_form_record_id(form) if not record_id: return by_age_group = current.deployment_settings.get_cr_shelter_population_by_age_group() table = current.s3db.cr_shelter_population query = (table.id == record_id) & \ (table.deleted == False) fields = [table.id, table.shelter_id] if by_age_group: fields += [table.population_adults, table.population_children] row = current.db(query).select(*fields, limitby = (0, 1)).first() if not row: return if by_age_group: a = row.population_adults c = row.population_children population = (a if a else 0) + (c if c else 0) row.update_record(population = population) shelter_id = row.shelter_id if shelter_id: Shelter(shelter_id).update_population()
def shelter_population_onaccept(form): """ Onaccept of shelter population: - updates the total population (if separate per age group) - updates shelter population totals """ record_id = get_form_record_id(form) if not record_id: return by_age_group = current.deployment_settings.get_cr_shelter_population_by_age_group() table = current.s3db.cr_shelter_population query = (table.id == record_id) & \ (table.deleted == False) fields = [table.id, table.shelter_id] if by_age_group: fields += [table.population_adults, table.population_children] row = current.db(query).select(*fields, limitby = (0, 1)).first() if not row: return if by_age_group: a = row.population_adults c = row.population_children population = (a if a else 0) + (c if c else 0) row.update_record(population = population) shelter_id = row.shelter_id if shelter_id: Shelter(shelter_id).update_population()
Python
def shelter_population_ondelete(row): """ Ondelete of shelter population: - updates shelter population totals """ shelter_id = row.shelter_id if shelter_id: Shelter(shelter_id).update_population()
def shelter_population_ondelete(row): """ Ondelete of shelter population: - updates shelter population totals """ shelter_id = row.shelter_id if shelter_id: Shelter(shelter_id).update_population()
Python
def defaults(): """ Returns safe defaults in case the model has been deactivated. """ dummy = S3ReusableField.dummy return {"cr_shelter_unit_id": dummy("shelter_unit_id"), }
def defaults(): """ Returns safe defaults in case the model has been deactivated. """ dummy = S3ReusableField.dummy return {"cr_shelter_unit_id": dummy("shelter_unit_id"), }
Python
def shelter_unit_onaccept(form): """ Onaccept of shelter unit: - updates population and available capacity of unit - updates shelter population - updates shelter capacity """ record_id = get_form_record_id(form) if not record_id: return HousingUnit(record_id).update_population() table = current.s3db.cr_shelter_unit query = (table.id == record_id) & \ (table.deleted == False) unit = current.db(query).select(table.shelter_id, limitby = (0, 1), ).first() shelter_id = unit.shelter_id if unit else None if shelter_id: shelter = Shelter(shelter_id) if not current.deployment_settings.get_cr_shelter_registration(): shelter.update_population(update_status=False) shelter.update_capacity()
def shelter_unit_onaccept(form): """ Onaccept of shelter unit: - updates population and available capacity of unit - updates shelter population - updates shelter capacity """ record_id = get_form_record_id(form) if not record_id: return HousingUnit(record_id).update_population() table = current.s3db.cr_shelter_unit query = (table.id == record_id) & \ (table.deleted == False) unit = current.db(query).select(table.shelter_id, limitby = (0, 1), ).first() shelter_id = unit.shelter_id if unit else None if shelter_id: shelter = Shelter(shelter_id) if not current.deployment_settings.get_cr_shelter_registration(): shelter.update_population(update_status=False) shelter.update_capacity()
Python
def shelter_unit_ondelete(row): """ Ondelete of shelter unit: - updates shelter population - updates shelter capacity """ shelter_id = row.shelter_id if shelter_id: shelter = Shelter(shelter_id) if not current.deployment_settings.get_cr_shelter_registration(): shelter.update_population(update_status=False) shelter.update_capacity()
def shelter_unit_ondelete(row): """ Ondelete of shelter unit: - updates shelter population - updates shelter capacity """ shelter_id = row.shelter_id if shelter_id: shelter = Shelter(shelter_id) if not current.deployment_settings.get_cr_shelter_registration(): shelter.update_population(update_status=False) shelter.update_capacity()
Python
def shelter_inspection_task_ondelete_cascade(row, tablename=None): """ Ondelete-cascade method for inspection task links: - closes the linked task if there are no other unresolved flags linked to it """ db = current.db s3db = current.s3db # Get the task_id ltable = s3db.cr_shelter_inspection_task query = (ltable.id == row.id) link = db(query).select(ltable.id, ltable.task_id, limitby = (0, 1), ).first() task_id = link.task_id # Are there any other unresolved flags linked to the same task? ftable = s3db.cr_shelter_inspection_flag ttable = s3db.project_task query = (ltable.task_id == task_id) & \ (ltable.id != link.id) & \ (ltable.deleted != True) & \ (ftable.id == ltable.inspection_flag_id) & \ ((ftable.resolved == False) | (ftable.resolved == None)) other = db(query).select(ltable.id, limitby = (0, 1) ).first() if not other: # Set task to completed status closed = current.deployment_settings \ .get_cr_shelter_inspection_task_completed_status() db(ttable.id == task_id).update(status = closed) # Remove task_id (to allow deletion of the link) link.update_record(task_id = None)
def shelter_inspection_task_ondelete_cascade(row, tablename=None): """ Ondelete-cascade method for inspection task links: - closes the linked task if there are no other unresolved flags linked to it """ db = current.db s3db = current.s3db # Get the task_id ltable = s3db.cr_shelter_inspection_task query = (ltable.id == row.id) link = db(query).select(ltable.id, ltable.task_id, limitby = (0, 1), ).first() task_id = link.task_id # Are there any other unresolved flags linked to the same task? ftable = s3db.cr_shelter_inspection_flag ttable = s3db.project_task query = (ltable.task_id == task_id) & \ (ltable.id != link.id) & \ (ltable.deleted != True) & \ (ftable.id == ltable.inspection_flag_id) & \ ((ftable.resolved == False) | (ftable.resolved == None)) other = db(query).select(ltable.id, limitby = (0, 1) ).first() if not other: # Set task to completed status closed = current.deployment_settings \ .get_cr_shelter_inspection_task_completed_status() db(ttable.id == task_id).update(status = closed) # Remove task_id (to allow deletion of the link) link.update_record(task_id = None)
Python
def shelter_registration_onvalidation(form): """ Checks if the housing unit belongs to the requested shelter """ db = current.db s3db = current.s3db record_id = get_form_record_id(form) if hasattr(form, "record") and form.record: record = form.record else: record = None table = s3db.cr_shelter_registration form_vars = form.vars lookup = [] def get_field_value(fn): if fn in form_vars: # Modified by form => use form.vars value = form_vars[fn] elif record_id: # Existing record => use form.record or lookup if record and fn in record: value = record[fn] else: lookup.append(table[fn]) value = None else: # New record => use table default value = table[fn].default return value shelter_id = get_field_value("shelter_id") shelter_unit_id = get_field_value("shelter_unit_id") if record_id and lookup: # Lookup from record row = db(table.id == record_id).select(*lookup, limitby=(0, 1)).first() if row: if "shelter_id" in row: shelter_id = row.shelter_id if "shelter_unit_id" in row: shelter_unit_id = row.shelter_unit_id if shelter_id and shelter_unit_id: # Verify that they match utable = s3db.cr_shelter_unit row = db(utable.id == shelter_unit_id).select(utable.shelter_id, limitby = (0, 1), ).first() if row and row.shelter_id != shelter_id: msg = current.T("You have to select a housing unit belonging to the shelter") form.errors.shelter_unit_id = msg elif not shelter_id and not shelter_unit_id: msg = current.T("Shelter or housing unit required") form.errors.shelter_id = \ form.errors.shelter_unit_id = msg
def shelter_registration_onvalidation(form): """ Checks if the housing unit belongs to the requested shelter """ db = current.db s3db = current.s3db record_id = get_form_record_id(form) if hasattr(form, "record") and form.record: record = form.record else: record = None table = s3db.cr_shelter_registration form_vars = form.vars lookup = [] def get_field_value(fn): if fn in form_vars: # Modified by form => use form.vars value = form_vars[fn] elif record_id: # Existing record => use form.record or lookup if record and fn in record: value = record[fn] else: lookup.append(table[fn]) value = None else: # New record => use table default value = table[fn].default return value shelter_id = get_field_value("shelter_id") shelter_unit_id = get_field_value("shelter_unit_id") if record_id and lookup: # Lookup from record row = db(table.id == record_id).select(*lookup, limitby=(0, 1)).first() if row: if "shelter_id" in row: shelter_id = row.shelter_id if "shelter_unit_id" in row: shelter_unit_id = row.shelter_unit_id if shelter_id and shelter_unit_id: # Verify that they match utable = s3db.cr_shelter_unit row = db(utable.id == shelter_unit_id).select(utable.shelter_id, limitby = (0, 1), ).first() if row and row.shelter_id != shelter_id: msg = current.T("You have to select a housing unit belonging to the shelter") form.errors.shelter_unit_id = msg elif not shelter_id and not shelter_unit_id: msg = current.T("Shelter or housing unit required") form.errors.shelter_id = \ form.errors.shelter_unit_id = msg
Python
def shelter_registration_onaccept(cls, form): """ Onaccept of shelter registration: - updates registration history - updates shelter / housing unit census """ record_id = get_form_record_id(form) # Get the registration db = current.db s3db = current.s3db # Get the current status table = s3db.cr_shelter_registration query = (table.id == record_id) & \ (table.deleted != True) registration = db(query).select(table.id, table.shelter_id, table.shelter_unit_id, table.last_shelter_id, table.last_shelter_unit_id, table.registration_status, table.check_in_date, table.check_out_date, table.modified_on, table.person_id, limitby = (0, 1), ).first() if not registration: return person_id = registration.person_id shelter_id = registration.shelter_id unit_id = registration.shelter_unit_id last_unit_id = registration.last_shelter_unit_id last_shelter_id = registration.last_shelter_id update = {} # Add shelter ID if missing if unit_id and not shelter_id: utable = s3db.cr_shelter_unit unit = db(utable.id == unit_id).select(utable.shelter_id, limitby = (0, 1), ).first() if unit: shelter_id = update["shelter_id"] = unit.shelter_id # Get the last registration history entry htable = s3db.cr_shelter_registration_history query = (htable.person_id == person_id) & \ (htable.shelter_id == shelter_id) & \ (htable.deleted != True) row = db(query).select(htable.status, htable.date, orderby = ~htable.created_on, limitby = (0, 1) ).first() if row: previous_status = row.status previous_date = row.date else: previous_status = None previous_date = None # Get the effective date field current_status = registration.registration_status if current_status == 2: effective_date_field = "check_in_date" elif current_status == 3: effective_date_field = "check_out_date" else: effective_date_field = None # Get effective date if effective_date_field: if effective_date_field in form.vars: effective_date = registration[effective_date_field] else: effective_date = None if not effective_date or \ previous_date and effective_date < previous_date: effective_date = current.request.utcnow update[effective_date_field] = effective_date else: effective_date = registration.modified_on if current_status != previous_status: # Insert new history entry htable.insert(previous_status = previous_status, status = current_status, date = effective_date, person_id = person_id, shelter_id = shelter_id, ) # Update last_seen_on if current.deployment_settings.has_module("dvr"): s3db.dvr_update_last_seen(person_id) # Update registration update["last_shelter_id"] = shelter_id update["last_shelter_unit_id"] = unit_id registration.update_record(**update) # Update housing unit census if last_unit_id and last_unit_id != unit_id: HousingUnit(last_unit_id).update_population() if unit_id: HousingUnit(unit_id).update_population() # Update shelter census if last_shelter_id and last_shelter_id != shelter_id: Shelter(last_shelter_id).update_population() if shelter_id: Shelter(shelter_id).update_population() # Warn user if shelter / housing unit is full cr_warn_if_full(shelter_id, unit_id)
def shelter_registration_onaccept(cls, form): """ Onaccept of shelter registration: - updates registration history - updates shelter / housing unit census """ record_id = get_form_record_id(form) # Get the registration db = current.db s3db = current.s3db # Get the current status table = s3db.cr_shelter_registration query = (table.id == record_id) & \ (table.deleted != True) registration = db(query).select(table.id, table.shelter_id, table.shelter_unit_id, table.last_shelter_id, table.last_shelter_unit_id, table.registration_status, table.check_in_date, table.check_out_date, table.modified_on, table.person_id, limitby = (0, 1), ).first() if not registration: return person_id = registration.person_id shelter_id = registration.shelter_id unit_id = registration.shelter_unit_id last_unit_id = registration.last_shelter_unit_id last_shelter_id = registration.last_shelter_id update = {} # Add shelter ID if missing if unit_id and not shelter_id: utable = s3db.cr_shelter_unit unit = db(utable.id == unit_id).select(utable.shelter_id, limitby = (0, 1), ).first() if unit: shelter_id = update["shelter_id"] = unit.shelter_id # Get the last registration history entry htable = s3db.cr_shelter_registration_history query = (htable.person_id == person_id) & \ (htable.shelter_id == shelter_id) & \ (htable.deleted != True) row = db(query).select(htable.status, htable.date, orderby = ~htable.created_on, limitby = (0, 1) ).first() if row: previous_status = row.status previous_date = row.date else: previous_status = None previous_date = None # Get the effective date field current_status = registration.registration_status if current_status == 2: effective_date_field = "check_in_date" elif current_status == 3: effective_date_field = "check_out_date" else: effective_date_field = None # Get effective date if effective_date_field: if effective_date_field in form.vars: effective_date = registration[effective_date_field] else: effective_date = None if not effective_date or \ previous_date and effective_date < previous_date: effective_date = current.request.utcnow update[effective_date_field] = effective_date else: effective_date = registration.modified_on if current_status != previous_status: # Insert new history entry htable.insert(previous_status = previous_status, status = current_status, date = effective_date, person_id = person_id, shelter_id = shelter_id, ) # Update last_seen_on if current.deployment_settings.has_module("dvr"): s3db.dvr_update_last_seen(person_id) # Update registration update["last_shelter_id"] = shelter_id update["last_shelter_unit_id"] = unit_id registration.update_record(**update) # Update housing unit census if last_unit_id and last_unit_id != unit_id: HousingUnit(last_unit_id).update_population() if unit_id: HousingUnit(unit_id).update_population() # Update shelter census if last_shelter_id and last_shelter_id != shelter_id: Shelter(last_shelter_id).update_population() if shelter_id: Shelter(shelter_id).update_population() # Warn user if shelter / housing unit is full cr_warn_if_full(shelter_id, unit_id)
Python
def shelter_registration_ondelete(row): """ Ondelete of shelter registration: - updates census of housing unit and shelter """ unit_id = row.shelter_unit_id if unit_id: HousingUnit(unit_id).update_population() Shelter(row.shelter_id).update_population()
def shelter_registration_ondelete(row): """ Ondelete of shelter registration: - updates census of housing unit and shelter """ unit_id = row.shelter_unit_id if unit_id: HousingUnit(unit_id).update_population() Shelter(row.shelter_id).update_population()
Python
def shelter_allocation_onaccept(cls, form): """ Onaccept if shelter allocation: - updates available shelter capacity """ record_id = get_form_record_id(form) if not record_id: return table = current.s3db.cr_shelter_allocation query = (table.id == record_id) & \ (table.deleted == False) row = current.db(query).select(table.shelter_id, limitby = (0, 1), ).first() shelter_id = row.shelter_id if row else None if shelter_id: Shelter(row.shelter_id).update_available_capacity() cr_warn_if_full(shelter_id, None)
def shelter_allocation_onaccept(cls, form): """ Onaccept if shelter allocation: - updates available shelter capacity """ record_id = get_form_record_id(form) if not record_id: return table = current.s3db.cr_shelter_allocation query = (table.id == record_id) & \ (table.deleted == False) row = current.db(query).select(table.shelter_id, limitby = (0, 1), ).first() shelter_id = row.shelter_id if row else None if shelter_id: Shelter(row.shelter_id).update_available_capacity() cr_warn_if_full(shelter_id, None)
Python
def shelter_allocation_ondelete(row): """ Ondelete of shelter allocation: - updates available shelter capacity """ shelter_id = row.shelter_id if shelter_id: Shelter(shelter_id).update_available_capacity()
def shelter_allocation_ondelete(row): """ Ondelete of shelter allocation: - updates available shelter capacity """ shelter_id = row.shelter_id if shelter_id: Shelter(shelter_id).update_available_capacity()
Python
def update_status(self, date=None): """ Updates the status record of the shelter; creates one if none exists for the date yet Args: date: the date of the status record (default: today) """ db = current.db s3db = current.s3db shelter_id = self.shelter_id track_fields = ("status", "capacity", "blocked_capacity", "population", "population_adults", "population_children", ) stable = s3db.cr_shelter fields = [stable.id] + [stable[fn] for fn in track_fields] query = (stable.id == shelter_id) & \ (stable.deleted == False) shelter = db(query).select(*fields, limitby = (0, 1)).first() if not shelter: return status = {fn: shelter[fn] for fn in track_fields} if not date: date = current.request.utcnow.date() status["shelter_id"] = shelter_id status["date"] = date rtable = s3db.cr_shelter_status query = (rtable.shelter_id == shelter_id) & \ (rtable.date == date) & \ (rtable.deleted == False) report = db(query).select(rtable.id, limitby = (0, 1)).first() if report: report.update_record(**status) status["id"] = report.id s3db.onaccept(rtable, status, method="update") else: status_id = status["id"] = rtable.insert(**status) s3db.update_super(rtable, status) current.auth.s3_set_record_owner(rtable, status_id) s3db.onaccept(rtable, status, method="create")
def update_status(self, date=None): """ Updates the status record of the shelter; creates one if none exists for the date yet Args: date: the date of the status record (default: today) """ db = current.db s3db = current.s3db shelter_id = self.shelter_id track_fields = ("status", "capacity", "blocked_capacity", "population", "population_adults", "population_children", ) stable = s3db.cr_shelter fields = [stable.id] + [stable[fn] for fn in track_fields] query = (stable.id == shelter_id) & \ (stable.deleted == False) shelter = db(query).select(*fields, limitby = (0, 1)).first() if not shelter: return status = {fn: shelter[fn] for fn in track_fields} if not date: date = current.request.utcnow.date() status["shelter_id"] = shelter_id status["date"] = date rtable = s3db.cr_shelter_status query = (rtable.shelter_id == shelter_id) & \ (rtable.date == date) & \ (rtable.deleted == False) report = db(query).select(rtable.id, limitby = (0, 1)).first() if report: report.update_record(**status) status["id"] = report.id s3db.onaccept(rtable, status, method="update") else: status_id = status["id"] = rtable.insert(**status) s3db.update_super(rtable, status) current.auth.s3_set_record_owner(rtable, status_id) s3db.onaccept(rtable, status, method="create")
Python
def update_capacity(self, update_status=True): """ Updates the total capacity of the shelter Args: update_status: also update available capacity and status record """ db = current.db s3db = current.s3db shelter_id = self.shelter_id if self.manage_units: utable = s3db.cr_shelter_unit query = (utable.shelter_id == shelter_id) & \ (utable.status == 1) & \ (utable.deleted != True) total_capacity = utable.capacity.sum() total_blocked_capacity = utable.blocked_capacity.sum() row = db(query).select(total_capacity, total_blocked_capacity, ).first() if row: capacity = row[total_capacity] blocked_capacity = row[total_blocked_capacity] else: capacity = blocked_capacity = 0 stable = s3db.cr_shelter db(stable.id == shelter_id).update(capacity = capacity, blocked_capacity = blocked_capacity, ) self.update_available_capacity() self.update_status() else: # Capacity directly editable pass
def update_capacity(self, update_status=True): """ Updates the total capacity of the shelter Args: update_status: also update available capacity and status record """ db = current.db s3db = current.s3db shelter_id = self.shelter_id if self.manage_units: utable = s3db.cr_shelter_unit query = (utable.shelter_id == shelter_id) & \ (utable.status == 1) & \ (utable.deleted != True) total_capacity = utable.capacity.sum() total_blocked_capacity = utable.blocked_capacity.sum() row = db(query).select(total_capacity, total_blocked_capacity, ).first() if row: capacity = row[total_capacity] blocked_capacity = row[total_blocked_capacity] else: capacity = blocked_capacity = 0 stable = s3db.cr_shelter db(stable.id == shelter_id).update(capacity = capacity, blocked_capacity = blocked_capacity, ) self.update_available_capacity() self.update_status() else: # Capacity directly editable pass
Python
def update_population(self, update_status=True): """ Updates the population totals for this shelter Args: update_status: also update available capacity and status record """ db = current.db s3db = current.s3db table = s3db.cr_shelter shelter_id = self.shelter_id update = {} if self.manage_registrations: # Get current population from registration count rtable = s3db.cr_shelter_registration query = (rtable.shelter_id == shelter_id) & \ (rtable.deleted == False) if self.check_out_is_final: query &= (rtable.registration_status != 3) cnt = rtable.id.count() row = db(query).select(cnt).first() update["population"] = row[cnt] if row else 0 elif self.manage_units: # Update from subtotals per housing unit utable = s3db.cr_shelter_unit query = (utable.shelter_id == shelter_id) & \ (utable.deleted == False) if self.population_by_age_group: cnt_a = utable.population_adults.sum() cnt_c = utable.population_children.sum() row = db(query).select(cnt_a, cnt_c).first() if row: a, c = row[cnt_a], row[cnt_c] else: a = c = 0 update = {"population": a + c, "population_adults": a, "population_children": c, } else: cnt = utable.population.sum() row = db(query).select(cnt).first() update["population"] = row[cnt] if row else 0 elif self.population_by_type: # Update from subtotals per population type rtable = s3db.cr_shelter_population query = (rtable.shelter_id == shelter_id) & \ (rtable.deleted == False) if self.population_by_age_group: cnt_a = rtable.population_adults.sum() cnt_c = rtable.population_children.sum() row = db(query).select(cnt_a, cnt_c).first() if row: a, c = row[cnt_a], row[cnt_c] else: a = c = 0 update = {"population": (a if a else 0) + (c if c else 0), "population_adults": a, "population_children": c, } else: cnt = rtable.population.sum() row = db(query).select(cnt).first() update["population"] = row[cnt] if row else 0 elif self.population_by_age_group: # Update total from subtotal per age group shelter = db(table.id == shelter_id).select(table.population_adults, table.population_children, limitby = (0, 1), ).first() a = shelter.population_adults c = shelter.population_children update["population"] = (a if a else 0) + (c if c else 0) else: # Total population directly editable pass if update: db(table.id == shelter_id).update(**update) if update_status: self.update_available_capacity() self.update_status()
def update_population(self, update_status=True): """ Updates the population totals for this shelter Args: update_status: also update available capacity and status record """ db = current.db s3db = current.s3db table = s3db.cr_shelter shelter_id = self.shelter_id update = {} if self.manage_registrations: # Get current population from registration count rtable = s3db.cr_shelter_registration query = (rtable.shelter_id == shelter_id) & \ (rtable.deleted == False) if self.check_out_is_final: query &= (rtable.registration_status != 3) cnt = rtable.id.count() row = db(query).select(cnt).first() update["population"] = row[cnt] if row else 0 elif self.manage_units: # Update from subtotals per housing unit utable = s3db.cr_shelter_unit query = (utable.shelter_id == shelter_id) & \ (utable.deleted == False) if self.population_by_age_group: cnt_a = utable.population_adults.sum() cnt_c = utable.population_children.sum() row = db(query).select(cnt_a, cnt_c).first() if row: a, c = row[cnt_a], row[cnt_c] else: a = c = 0 update = {"population": a + c, "population_adults": a, "population_children": c, } else: cnt = utable.population.sum() row = db(query).select(cnt).first() update["population"] = row[cnt] if row else 0 elif self.population_by_type: # Update from subtotals per population type rtable = s3db.cr_shelter_population query = (rtable.shelter_id == shelter_id) & \ (rtable.deleted == False) if self.population_by_age_group: cnt_a = rtable.population_adults.sum() cnt_c = rtable.population_children.sum() row = db(query).select(cnt_a, cnt_c).first() if row: a, c = row[cnt_a], row[cnt_c] else: a = c = 0 update = {"population": (a if a else 0) + (c if c else 0), "population_adults": a, "population_children": c, } else: cnt = rtable.population.sum() row = db(query).select(cnt).first() update["population"] = row[cnt] if row else 0 elif self.population_by_age_group: # Update total from subtotal per age group shelter = db(table.id == shelter_id).select(table.population_adults, table.population_children, limitby = (0, 1), ).first() a = shelter.population_adults c = shelter.population_children update["population"] = (a if a else 0) + (c if c else 0) else: # Total population directly editable pass if update: db(table.id == shelter_id).update(**update) if update_status: self.update_available_capacity() self.update_status()
Python
def update_available_capacity(self): """ Updates the available capacity of the shelter """ shelter_id = self.shelter_id db = current.db s3db = current.s3db table = s3db.cr_shelter query = (table.id == shelter_id) shelter = db(query).select(table.id, table.capacity, table.population, table.blocked_capacity, table.available_capacity, limitby = (0, 1), ).first() if not shelter: return update = {} # Compute available capacity capacity = shelter.capacity if capacity is None: capacity = update["capacity"] = 0 if current.deployment_settings.get_cr_shelter_blocked_capacity(): blocked_capacity = shelter.blocked_capacity if blocked_capacity is None: blocked_capacity = update["blocked_capacity"] = 0 capacity -= blocked_capacity population = shelter.population if population is None: population = update["population"] = 0 available_capacity = max(capacity - population, 0) if self.manage_allocations: # Look up allocation total atable = s3db.cr_shelter_allocation query = (atable.shelter_id == shelter_id) & \ (atable.status.belongs((1, 2, 3, 4))) & \ (atable.deleted == False) cnt = atable.group_size_day.sum() row = db(query).select(cnt).first() allocated_capacity = row[cnt] if row else 0 # Subtract allocation total from available capacity available_capacity = max(available_capacity - allocated_capacity, 0) if available_capacity != shelter.available_capacity: update["available_capacity"] = available_capacity if update: shelter.update_record(**update)
def update_available_capacity(self): """ Updates the available capacity of the shelter """ shelter_id = self.shelter_id db = current.db s3db = current.s3db table = s3db.cr_shelter query = (table.id == shelter_id) shelter = db(query).select(table.id, table.capacity, table.population, table.blocked_capacity, table.available_capacity, limitby = (0, 1), ).first() if not shelter: return update = {} # Compute available capacity capacity = shelter.capacity if capacity is None: capacity = update["capacity"] = 0 if current.deployment_settings.get_cr_shelter_blocked_capacity(): blocked_capacity = shelter.blocked_capacity if blocked_capacity is None: blocked_capacity = update["blocked_capacity"] = 0 capacity -= blocked_capacity population = shelter.population if population is None: population = update["population"] = 0 available_capacity = max(capacity - population, 0) if self.manage_allocations: # Look up allocation total atable = s3db.cr_shelter_allocation query = (atable.shelter_id == shelter_id) & \ (atable.status.belongs((1, 2, 3, 4))) & \ (atable.deleted == False) cnt = atable.group_size_day.sum() row = db(query).select(cnt).first() allocated_capacity = row[cnt] if row else 0 # Subtract allocation total from available capacity available_capacity = max(available_capacity - allocated_capacity, 0) if available_capacity != shelter.available_capacity: update["available_capacity"] = available_capacity if update: shelter.update_record(**update)
Python
def update_population(self): """ Updates total population and available capacity of this unit """ unit_id = self.unit_id db = current.db s3db = current.s3db # Lookup shelter unit table = s3db.cr_shelter_unit query = (table.id == unit_id) unit = db(query).select(table.id, table.capacity, table.population, table.population_adults, table.population_children, table.blocked_capacity, table.available_capacity, limitby = (0, 1), ).first() if not unit: return if self.manage_registrations: # Get current population from registration count rtable = s3db.cr_shelter_registration query = (rtable.shelter_unit_id == unit_id) & \ (rtable.deleted == False) if self.check_out_is_final: query &= (rtable.registration_status != 3) cnt = rtable.id.count() row = db(query).select(cnt).first() population = row[cnt] if row else 0 else: if self.population_by_age_group: a = unit.population_adults c = unit.population_children population = (a if a else 0) + (c if c else 0) else: population = unit.population if population is None: population = 0 update = {} # Compute available capacity capacity = unit.capacity if capacity is None: capacity = update["capacity"] = 0 if current.deployment_settings.get_cr_shelter_blocked_capacity(): blocked_capacity = unit.blocked_capacity if blocked_capacity is None: blocked_capacity = update["blocked_capacity"] = 0 capacity -= blocked_capacity available_capacity = max(capacity - population, 0) # Update unit if required if population != unit.population: update["population"] = population if available_capacity != unit.available_capacity: update["available_capacity"] = available_capacity if update: unit.update_record(**update)
def update_population(self): """ Updates total population and available capacity of this unit """ unit_id = self.unit_id db = current.db s3db = current.s3db # Lookup shelter unit table = s3db.cr_shelter_unit query = (table.id == unit_id) unit = db(query).select(table.id, table.capacity, table.population, table.population_adults, table.population_children, table.blocked_capacity, table.available_capacity, limitby = (0, 1), ).first() if not unit: return if self.manage_registrations: # Get current population from registration count rtable = s3db.cr_shelter_registration query = (rtable.shelter_unit_id == unit_id) & \ (rtable.deleted == False) if self.check_out_is_final: query &= (rtable.registration_status != 3) cnt = rtable.id.count() row = db(query).select(cnt).first() population = row[cnt] if row else 0 else: if self.population_by_age_group: a = unit.population_adults c = unit.population_children population = (a if a else 0) + (c if c else 0) else: population = unit.population if population is None: population = 0 update = {} # Compute available capacity capacity = unit.capacity if capacity is None: capacity = update["capacity"] = 0 if current.deployment_settings.get_cr_shelter_blocked_capacity(): blocked_capacity = unit.blocked_capacity if blocked_capacity is None: blocked_capacity = update["blocked_capacity"] = 0 capacity -= blocked_capacity available_capacity = max(capacity - population, 0) # Update unit if required if population != unit.population: update["population"] = population if available_capacity != unit.available_capacity: update["available_capacity"] = available_capacity if update: unit.update_record(**update)
Python
def cr_resolve_shelter_flags(task_id): """ If a task is set to an inactive status, then marks all linked shelter inspection flags as resolved Args: task_id: the task record ID """ db = current.db s3db = current.s3db active_statuses = current.deployment_settings \ .get_cr_shelter_inspection_task_active_statuses() # Get the task ttable = s3db.project_task query = (ttable.id == task_id) task = db(query).select(ttable.id, ttable.status, limitby = (0, 1), ).first() if task and task.status not in active_statuses: # Mark all shelter inspection flags as resolved ltable = s3db.cr_shelter_inspection_task ftable = s3db.cr_shelter_inspection_flag query = (ltable.task_id == task.id) & \ (ftable.id == ltable.inspection_flag_id) & \ ((ftable.resolved == False) | (ftable.resolved == None)) rows = db(query).select(ftable.id) ids = set(row.id for row in rows) db(ftable.id.belongs(ids)).update(resolved=True)
def cr_resolve_shelter_flags(task_id): """ If a task is set to an inactive status, then marks all linked shelter inspection flags as resolved Args: task_id: the task record ID """ db = current.db s3db = current.s3db active_statuses = current.deployment_settings \ .get_cr_shelter_inspection_task_active_statuses() # Get the task ttable = s3db.project_task query = (ttable.id == task_id) task = db(query).select(ttable.id, ttable.status, limitby = (0, 1), ).first() if task and task.status not in active_statuses: # Mark all shelter inspection flags as resolved ltable = s3db.cr_shelter_inspection_task ftable = s3db.cr_shelter_inspection_flag query = (ltable.task_id == task.id) & \ (ftable.id == ltable.inspection_flag_id) & \ ((ftable.resolved == False) | (ftable.resolved == None)) rows = db(query).select(ftable.id) ids = set(row.id for row in rows) db(ftable.id.belongs(ids)).update(resolved=True)
Python
def cr_warn_if_full(shelter_id, unit_id): """ Generates a response.warning if housing unit / shelter is at or over capacity Args: shelter_id: the shelter ID unit_id: the housing unit ID """ if current.auth.permission.format != "html": return s3db = current.s3db if unit_id: table = s3db.cr_shelter_unit query = (table.id == unit_id) elif shelter_id: table = s3db.cr_shelter query = (table.id == shelter_id) else: return row = current.db(query).select(table.available_capacity, limitby = (0, 1), ).first() available_capacity = row.available_capacity if row else None full = available_capacity is None or available_capacity <= 0 warning = None if full: T = current.T if unit_id: warning = T("Warning: this housing unit is full") else: warning = T("Warning: this shelter is full") response = current.response response_warning = response.warning if response_warning: response.warning = "%s - %s" % (response_warning, warning) else: response.warning = warning
def cr_warn_if_full(shelter_id, unit_id): """ Generates a response.warning if housing unit / shelter is at or over capacity Args: shelter_id: the shelter ID unit_id: the housing unit ID """ if current.auth.permission.format != "html": return s3db = current.s3db if unit_id: table = s3db.cr_shelter_unit query = (table.id == unit_id) elif shelter_id: table = s3db.cr_shelter query = (table.id == shelter_id) else: return row = current.db(query).select(table.available_capacity, limitby = (0, 1), ).first() available_capacity = row.available_capacity if row else None full = available_capacity is None or available_capacity <= 0 warning = None if full: T = current.T if unit_id: warning = T("Warning: this housing unit is full") else: warning = T("Warning: this shelter is full") response = current.response response_warning = response.warning if response_warning: response.warning = "%s - %s" % (response_warning, warning) else: response.warning = warning
Python
def apply_method(self, r, **attr): """ Applies the method (controller entry point). Args: r: the CRUDRequest attr: controller arguments """ try: person_id = int(r.get_vars["person_id"]) except (AttributeError, ValueError, TypeError): r.error(400, current.messages.BAD_REQUEST) self.settings = current.response.s3.crud sqlform = self.resource.get_config("crud_form") self.sqlform = sqlform if sqlform else S3SQLDefaultForm() self.data = None # Create or Update? table = current.s3db.cr_shelter_registration query = (table.deleted == False) & \ (table.person_id == person_id) exists = current.db(query).select(table.id, limitby=(0, 1)).first() if exists: # Update form r.method = "update" # Ensure correct View template is used self.record_id = exists.id output = self.update(r, **attr) else: # Create form r.method = "create" # Ensure correct View template is used self.data = {"person_id": person_id} output = self.create(r, **attr) return output
def apply_method(self, r, **attr): """ Applies the method (controller entry point). Args: r: the CRUDRequest attr: controller arguments """ try: person_id = int(r.get_vars["person_id"]) except (AttributeError, ValueError, TypeError): r.error(400, current.messages.BAD_REQUEST) self.settings = current.response.s3.crud sqlform = self.resource.get_config("crud_form") self.sqlform = sqlform if sqlform else S3SQLDefaultForm() self.data = None # Create or Update? table = current.s3db.cr_shelter_registration query = (table.deleted == False) & \ (table.person_id == person_id) exists = current.db(query).select(table.id, limitby=(0, 1)).first() if exists: # Update form r.method = "update" # Ensure correct View template is used self.record_id = exists.id output = self.update(r, **attr) else: # Create form r.method = "create" # Ensure correct View template is used self.data = {"person_id": person_id} output = self.create(r, **attr) return output
Python
def link(self, k, v, row=None): """ Links inspection flag representations to the inspection record Args: k: the inspection flag ID v: the representation row: the row from lookup_rows """ if row: inspection_id = row.cr_shelter_inspection.id if inspection_id: return A(v, _href=URL(c="cr", f="shelter_inspection", args=[inspection_id], ), ) return v
def link(self, k, v, row=None): """ Links inspection flag representations to the inspection record Args: k: the inspection flag ID v: the representation row: the row from lookup_rows """ if row: inspection_id = row.cr_shelter_inspection.id if inspection_id: return A(v, _href=URL(c="cr", f="shelter_inspection", args=[inspection_id], ), ) return v
Python
def permitted(): """ Checks if the user is permitted to use this method """ # @todo: implement return True
def permitted(): """ Checks if the user is permitted to use this method """ # @todo: implement return True
Python
def inspection_ajax(r, **attr): """ Ajax-registration of shelter inspection Args: r: the CRUDRequest instance attr: controller parameters """ T = current.T db = current.db s3db = current.s3db # Load JSON data from request body s = r.body s.seek(0) try: data = json.load(s) except (ValueError, TypeError): r.error(400, current.ERROR.BAD_REQUEST) shelter_unit_id = data.get("u") if shelter_unit_id: # Register shelter inspection error = False # Read comments comments = data.get("c") # Find inspection record update = False itable = s3db.cr_shelter_inspection query = (itable.shelter_unit_id == shelter_unit_id) & \ (itable.date == current.request.utcnow.date()) & \ (itable.deleted != True) row = db(query).select(itable.id, limitby = (0, 1), ).first() if row: # Update this inspection update = True inspection_id = row.id row.update_record(comments = comments) else: # Create a new inspection inspection_id = itable.insert(shelter_unit_id = shelter_unit_id, comments = comments, ) if inspection_id: # Currently selected flags flag_ids = data.get("f") if update: # Remove all flags linked to the current inspection # which are not in the current selection query = (FS("inspection_id") == inspection_id) if flag_ids: query &= ~(FS("flag_id").belongs(flag_ids)) fresource = s3db.resource("cr_shelter_inspection_flag", filter = query, ) fresource.delete(cascade=True) if flag_ids: # Determine which flags have been newly selected ftable = s3db.cr_shelter_inspection_flag if update: query = (ftable.inspection_id == inspection_id) & \ (ftable.deleted == False) rows = db(query).select(ftable.flag_id) new = set(flag_ids) - set(row.flag_id for row in rows) else: new = set(flag_ids) # Create links to newly selected flags ftable = s3db.cr_shelter_inspection_flag data = {"inspection_id": inspection_id, } for flag_id in new: data["flag_id"] = flag_id success = ftable.insert(**data) if not success: error = True break else: # Call onaccept to auto-create tasks record = Storage(data) record["id"] = success s3db.onaccept(ftable, record) else: error = True if error: db.rollback() output = {"a": s3_str(T("Error registering shelter inspection")), } else: output = {"m": s3_str(T("Registration successful")), } else: # Error - no shelter unit selected output = {"a": s3_str(T("No shelter unit selected")), } return json.dumps(output)
def inspection_ajax(r, **attr): """ Ajax-registration of shelter inspection Args: r: the CRUDRequest instance attr: controller parameters """ T = current.T db = current.db s3db = current.s3db # Load JSON data from request body s = r.body s.seek(0) try: data = json.load(s) except (ValueError, TypeError): r.error(400, current.ERROR.BAD_REQUEST) shelter_unit_id = data.get("u") if shelter_unit_id: # Register shelter inspection error = False # Read comments comments = data.get("c") # Find inspection record update = False itable = s3db.cr_shelter_inspection query = (itable.shelter_unit_id == shelter_unit_id) & \ (itable.date == current.request.utcnow.date()) & \ (itable.deleted != True) row = db(query).select(itable.id, limitby = (0, 1), ).first() if row: # Update this inspection update = True inspection_id = row.id row.update_record(comments = comments) else: # Create a new inspection inspection_id = itable.insert(shelter_unit_id = shelter_unit_id, comments = comments, ) if inspection_id: # Currently selected flags flag_ids = data.get("f") if update: # Remove all flags linked to the current inspection # which are not in the current selection query = (FS("inspection_id") == inspection_id) if flag_ids: query &= ~(FS("flag_id").belongs(flag_ids)) fresource = s3db.resource("cr_shelter_inspection_flag", filter = query, ) fresource.delete(cascade=True) if flag_ids: # Determine which flags have been newly selected ftable = s3db.cr_shelter_inspection_flag if update: query = (ftable.inspection_id == inspection_id) & \ (ftable.deleted == False) rows = db(query).select(ftable.flag_id) new = set(flag_ids) - set(row.flag_id for row in rows) else: new = set(flag_ids) # Create links to newly selected flags ftable = s3db.cr_shelter_inspection_flag data = {"inspection_id": inspection_id, } for flag_id in new: data["flag_id"] = flag_id success = ftable.insert(**data) if not success: error = True break else: # Call onaccept to auto-create tasks record = Storage(data) record["id"] = success s3db.onaccept(ftable, record) else: error = True if error: db.rollback() output = {"a": s3_str(T("Error registering shelter inspection")), } else: output = {"m": s3_str(T("Registration successful")), } else: # Error - no shelter unit selected output = {"a": s3_str(T("No shelter unit selected")), } return json.dumps(output)
Python
def available_capacity_represent(value, row=None): """ Color-coded representation of available shelter capacities """ if value is None: return "-" if value == 0: css = "shelter-full" elif value < 4: css = "shelter-low" else: css = "shelter-available" return SPAN(value, _class=css)
def available_capacity_represent(value, row=None): """ Color-coded representation of available shelter capacities """ if value is None: return "-" if value == 0: css = "shelter-full" elif value < 4: css = "shelter-low" else: css = "shelter-available" return SPAN(value, _class=css)
Python
def filter_query(self, query, join = None, left = None, getids = False, limitby = None, orderby = None, ): """ Execute a query to determine the number/record IDs of all matching rows Args: query: the filter query join: the inner joins for the query left: the left joins for the query getids: extract the IDs of matching records limitby: tuple of indices (start, end) to extract only a limited set of IDs orderby: ORDERBY expression for the query Returns: tuple of (TotalNumberOfRecords, RecordIDs) """ db = current.db table = self.table # Temporarily deactivate virtual fields vf = table.virtualfields osetattr(table, "virtualfields", []) if getids and limitby: # Large result sets expected on average (settings.base.bigtable) # => effort almost independent of result size, much faster # for large and very large filter results start = limitby[0] limit = limitby[1] - start # Don't penalize the smallest filter results (=effective filtering) if limit: maxids = max(limit, 200) limitby_ = (start, start + maxids) else: limitby_ = None # Extract record IDs field = table._id rows = db(query).select(field, join = join, left = left, limitby = limitby_, orderby = orderby, groupby = field, cacheable = True, ) pkey = str(field) results = rows[:limit] if limit else rows ids = [row[pkey] for row in results] totalids = len(rows) if limit and totalids >= maxids or start != 0 and not totalids: # Count all matching records cnt = table._id.count(distinct=True) row = db(query).select(cnt, join = join, left = left, cacheable = True, ).first() totalrows = row[cnt] else: # We already know how many there are totalrows = start + totalids elif getids: # Extract all matching IDs, then count them in Python # => effort proportional to result size, slightly faster # than counting separately for small filter results field = table._id rows = db(query).select(field, join=join, left=left, orderby = orderby, groupby = field, cacheable = True, ) pkey = str(field) ids = [row[pkey] for row in rows] totalrows = len(ids) else: # Only count, do not extract any IDs (constant effort) field = table._id.count(distinct=True) rows = db(query).select(field, join = join, left = left, cacheable = True, ) ids = None totalrows = rows.first()[field] # Restore the virtual fields osetattr(table, "virtualfields", vf) return totalrows, ids
def filter_query(self, query, join = None, left = None, getids = False, limitby = None, orderby = None, ): """ Execute a query to determine the number/record IDs of all matching rows Args: query: the filter query join: the inner joins for the query left: the left joins for the query getids: extract the IDs of matching records limitby: tuple of indices (start, end) to extract only a limited set of IDs orderby: ORDERBY expression for the query Returns: tuple of (TotalNumberOfRecords, RecordIDs) """ db = current.db table = self.table # Temporarily deactivate virtual fields vf = table.virtualfields osetattr(table, "virtualfields", []) if getids and limitby: # Large result sets expected on average (settings.base.bigtable) # => effort almost independent of result size, much faster # for large and very large filter results start = limitby[0] limit = limitby[1] - start # Don't penalize the smallest filter results (=effective filtering) if limit: maxids = max(limit, 200) limitby_ = (start, start + maxids) else: limitby_ = None # Extract record IDs field = table._id rows = db(query).select(field, join = join, left = left, limitby = limitby_, orderby = orderby, groupby = field, cacheable = True, ) pkey = str(field) results = rows[:limit] if limit else rows ids = [row[pkey] for row in results] totalids = len(rows) if limit and totalids >= maxids or start != 0 and not totalids: # Count all matching records cnt = table._id.count(distinct=True) row = db(query).select(cnt, join = join, left = left, cacheable = True, ).first() totalrows = row[cnt] else: # We already know how many there are totalrows = start + totalids elif getids: # Extract all matching IDs, then count them in Python # => effort proportional to result size, slightly faster # than counting separately for small filter results field = table._id rows = db(query).select(field, join=join, left=left, orderby = orderby, groupby = field, cacheable = True, ) pkey = str(field) ids = [row[pkey] for row in rows] totalrows = len(ids) else: # Only count, do not extract any IDs (constant effort) field = table._id.count(distinct=True) rows = db(query).select(field, join = join, left = left, cacheable = True, ) ids = None totalrows = rows.first()[field] # Restore the virtual fields osetattr(table, "virtualfields", vf) return totalrows, ids
Python
def extract(self, rows, pkey, columns, join = True, records = None, represent = False ): """ Extract the data from rows and store them in self.field_data Args: rows: the rows pkey: the primary key columns: the columns to extract join: the rows are the result of a join query records: the records dict to merge the data into represent: collect unique values per field and estimate representation efforts for list:types """ field_data = self.field_data effort = self.effort if records is None: records = {} def get(key): t, f = key.split(".", 1) if join: def getter(row): return ogetattr(ogetattr(row, t), f) else: def getter(row): return ogetattr(row, f) return getter getkey = get(pkey) getval = [get(c) for c in columns] from itertools import groupby for k, g in groupby(rows, key=getkey): group = list(g) record = records.get(k, {}) for idx, col in enumerate(columns): fvalues, frecords, joined, list_type, virtual, json_type = field_data[col] values = record.get(col, {}) lazy = False for row in group: try: value = getval[idx](row) except AttributeError: current.log.warning("Warning CRUDResource.extract: column %s not in row" % col) value = None if lazy or callable(value): # Lazy virtual field value = value() lazy = True if virtual and not list_type and type(value) is list: # Virtual field that returns a list list_type = True if list_type and value is not None: if represent and value: effort[col] += 30 + len(value) for v in value: if v not in values: values[v] = None if represent and v not in fvalues: fvalues[v] = None elif json_type: # Returns unhashable types value = json.dumps(value) if value not in values: values[value] = None if represent and value not in fvalues: fvalues[value] = None else: if value not in values: values[value] = None if represent and value not in fvalues: fvalues[value] = None record[col] = values if k not in frecords: frecords[k] = record[col] records[k] = record return records
def extract(self, rows, pkey, columns, join = True, records = None, represent = False ): """ Extract the data from rows and store them in self.field_data Args: rows: the rows pkey: the primary key columns: the columns to extract join: the rows are the result of a join query records: the records dict to merge the data into represent: collect unique values per field and estimate representation efforts for list:types """ field_data = self.field_data effort = self.effort if records is None: records = {} def get(key): t, f = key.split(".", 1) if join: def getter(row): return ogetattr(ogetattr(row, t), f) else: def getter(row): return ogetattr(row, f) return getter getkey = get(pkey) getval = [get(c) for c in columns] from itertools import groupby for k, g in groupby(rows, key=getkey): group = list(g) record = records.get(k, {}) for idx, col in enumerate(columns): fvalues, frecords, joined, list_type, virtual, json_type = field_data[col] values = record.get(col, {}) lazy = False for row in group: try: value = getval[idx](row) except AttributeError: current.log.warning("Warning CRUDResource.extract: column %s not in row" % col) value = None if lazy or callable(value): # Lazy virtual field value = value() lazy = True if virtual and not list_type and type(value) is list: # Virtual field that returns a list list_type = True if list_type and value is not None: if represent and value: effort[col] += 30 + len(value) for v in value: if v not in values: values[v] = None if represent and v not in fvalues: fvalues[v] = None elif json_type: # Returns unhashable types value = json.dumps(value) if value not in values: values[value] = None if represent and value not in fvalues: fvalues[value] = None else: if value not in values: values[value] = None if represent and value not in fvalues: fvalues[value] = None record[col] = values if k not in frecords: frecords[k] = record[col] records[k] = record return records
Python
def widget(self, resource, values): """ Render this widget as HTML helper object(s) Args: resource: the resource values: the search values from the URL query """ css_base = self.css_base attr = self.attr opts_get = self.opts.get # CSS class and element ID css = attr.get("class") _class = "%s %s" % (css, css_base) if css else css_base _id = attr["_id"] if not resource and not _id: raise SyntaxError("%s: _id parameter required when rendered without resource." % \ self.__class__.__name__) # Picker options clear_text = opts_get("clear_text", None) hide_time = opts_get("hide_time", False) # Selectable Range if self._auto_range(): minimum, maximum = self._options(resource) else: minimum = maximum = None # Generate the input elements filter_widget = DIV(_id=_id, _class=_class) append = filter_widget.append # Classes and labels for the individual date/time inputs T = current.T input_class = "%s-%s" % (css_base, "input") input_labels = self.input_labels get_variable = self._variable fields = self.field if type(fields) is not list: fields = [fields] selector = self.selector else: selectors = self.selector.split("|") start = True for field in fields: # Determine the field type if resource: rfield = S3ResourceField(resource, field) field = rfield.field else: rfield = field = None if not field: if rfield: # Virtual field tname, fname = rfield.tname, rfield.fname else: # Filter form without resource tname, fname = "notable", "datetime" ftype = opts_get("fieldtype", "datetime") # S3CalendarWidget requires a Field field = Field(fname, ftype, requires = IS_UTC_DATE()) field.tablename = field._tablename = tname else: ftype = rfield.ftype if len(fields) == 1: operators = self.operator elif start: operators = ["ge"] selector = selectors[0] start = False else: operators = ["le"] selector = selectors[1] input_class += " end_date" # Do we want a timepicker? timepicker = False if ftype == "date" or hide_time else True if timepicker and "datetimepicker" not in input_class: input_class += " datetimepicker" if ftype != "date" and hide_time: # Indicate that this filter is for a datetime field but # with a hidden time selector (so it shall add a suitable # time fragment automatically) input_class += " hide-time" for operator in operators: input_id = "%s-%s" % (_id, operator) # Make the two inputs constrain each other set_min = set_max = None if operator == "ge": set_min = "#%s-%s" % (_id, "le") elif operator == "le": set_max = "#%s-%s" % (_id, "ge") # Instantiate the widget widget = S3CalendarWidget(timepicker = timepicker, minimum = minimum, maximum = maximum, set_min = set_min, set_max = set_max, clear_text = clear_text, ) # Currently selected value dtstr = self._format_value(values, get_variable(selector, operator), timepicker = timepicker, ) # Render the widget picker = widget(field, dtstr, _class = input_class, _id = input_id, _name = input_id, ) if operator in input_labels: label = DIV(LABEL("%s:" % T(input_labels[operator]), _for = input_id, ), _class = "range-filter-label", ) else: label = "" # Append label and widget append(DIV(label, DIV(picker, _class = "range-filter-widget", ), _class = "range-filter-field", )) return filter_widget
def widget(self, resource, values): """ Render this widget as HTML helper object(s) Args: resource: the resource values: the search values from the URL query """ css_base = self.css_base attr = self.attr opts_get = self.opts.get # CSS class and element ID css = attr.get("class") _class = "%s %s" % (css, css_base) if css else css_base _id = attr["_id"] if not resource and not _id: raise SyntaxError("%s: _id parameter required when rendered without resource." % \ self.__class__.__name__) # Picker options clear_text = opts_get("clear_text", None) hide_time = opts_get("hide_time", False) # Selectable Range if self._auto_range(): minimum, maximum = self._options(resource) else: minimum = maximum = None # Generate the input elements filter_widget = DIV(_id=_id, _class=_class) append = filter_widget.append # Classes and labels for the individual date/time inputs T = current.T input_class = "%s-%s" % (css_base, "input") input_labels = self.input_labels get_variable = self._variable fields = self.field if type(fields) is not list: fields = [fields] selector = self.selector else: selectors = self.selector.split("|") start = True for field in fields: # Determine the field type if resource: rfield = S3ResourceField(resource, field) field = rfield.field else: rfield = field = None if not field: if rfield: # Virtual field tname, fname = rfield.tname, rfield.fname else: # Filter form without resource tname, fname = "notable", "datetime" ftype = opts_get("fieldtype", "datetime") # S3CalendarWidget requires a Field field = Field(fname, ftype, requires = IS_UTC_DATE()) field.tablename = field._tablename = tname else: ftype = rfield.ftype if len(fields) == 1: operators = self.operator elif start: operators = ["ge"] selector = selectors[0] start = False else: operators = ["le"] selector = selectors[1] input_class += " end_date" # Do we want a timepicker? timepicker = False if ftype == "date" or hide_time else True if timepicker and "datetimepicker" not in input_class: input_class += " datetimepicker" if ftype != "date" and hide_time: # Indicate that this filter is for a datetime field but # with a hidden time selector (so it shall add a suitable # time fragment automatically) input_class += " hide-time" for operator in operators: input_id = "%s-%s" % (_id, operator) # Make the two inputs constrain each other set_min = set_max = None if operator == "ge": set_min = "#%s-%s" % (_id, "le") elif operator == "le": set_max = "#%s-%s" % (_id, "ge") # Instantiate the widget widget = S3CalendarWidget(timepicker = timepicker, minimum = minimum, maximum = maximum, set_min = set_min, set_max = set_max, clear_text = clear_text, ) # Currently selected value dtstr = self._format_value(values, get_variable(selector, operator), timepicker = timepicker, ) # Render the widget picker = widget(field, dtstr, _class = input_class, _id = input_id, _name = input_id, ) if operator in input_labels: label = DIV(LABEL("%s:" % T(input_labels[operator]), _for = input_id, ), _class = "range-filter-label", ) else: label = "" # Append label and widget append(DIV(label, DIV(picker, _class = "range-filter-widget", ), _class = "range-filter-field", )) return filter_widget