language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def custom_lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for activity rows, does a left join with
the parent project.
Args:
values: the activity IDs
"""
s3db = current.s3db
atable = s3db.project_activity
ptable = s3db.project_project
left = ptable.on(ptable.id == atable.project_id)
qty = len(values)
if qty == 1:
query = (atable.id == values[0])
limitby = (0, 1)
else:
query = (atable.id.belongs(values))
limitby = (0, qty)
rows = current.db(query).select(atable.id,
atable.name,
ptable.code,
left=left,
limitby=limitby)
self.queries += 1
return rows | def custom_lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for activity rows, does a left join with
the parent project.
Args:
values: the activity IDs
"""
s3db = current.s3db
atable = s3db.project_activity
ptable = s3db.project_project
left = ptable.on(ptable.id == atable.project_id)
qty = len(values)
if qty == 1:
query = (atable.id == values[0])
limitby = (0, 1)
else:
query = (atable.id.belongs(values))
limitby = (0, qty)
rows = current.db(query).select(atable.id,
atable.name,
ptable.code,
left=left,
limitby=limitby)
self.queries += 1
return rows |
Python | def project_activity_year_options():
"""
returns a dict of the options for the year virtual field
used by the search widget
orderby needed for postgres
@ToDo: Migrate to stats_year_options()
"""
db = current.db
table = current.s3db.project_activity
query = (table.deleted == False)
min_field = table.date.min()
start_date_min = db(query).select(min_field,
orderby=min_field,
limitby=(0, 1)
).first()[min_field]
if start_date_min:
start_year = start_date_min.year
else:
start_year = None
max_field = table.end_date.max()
end_date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()[max_field]
if end_date_max:
end_year = end_date_max.year
else:
end_year = None
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in range(start_year, end_year + 1):
years[year] = year
return years | def project_activity_year_options():
"""
returns a dict of the options for the year virtual field
used by the search widget
orderby needed for postgres
@ToDo: Migrate to stats_year_options()
"""
db = current.db
table = current.s3db.project_activity
query = (table.deleted == False)
min_field = table.date.min()
start_date_min = db(query).select(min_field,
orderby=min_field,
limitby=(0, 1)
).first()[min_field]
if start_date_min:
start_year = start_date_min.year
else:
start_year = None
max_field = table.end_date.max()
end_date_max = db(query).select(max_field,
orderby=max_field,
limitby=(0, 1)
).first()[max_field]
if end_date_max:
end_year = end_date_max.year
else:
end_year = None
if not start_year or not end_year:
return {start_year:start_year} or {end_year:end_year}
years = {}
for year in range(start_year, end_year + 1):
years[year] = year
return years |
Python | def project_time_day(row):
"""
Virtual field for project_time - abbreviated string format for
date, allows grouping per day instead of the individual datetime,
used for project time report. Requires "date" to be in the additional
report_fields
Args:
row: the Row
"""
try:
thisdate = row["project_time.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
return thisdate.date().strftime("%d %B %y") | def project_time_day(row):
"""
Virtual field for project_time - abbreviated string format for
date, allows grouping per day instead of the individual datetime,
used for project time report. Requires "date" to be in the additional
report_fields
Args:
row: the Row
"""
try:
thisdate = row["project_time.date"]
except AttributeError:
return current.messages["NONE"]
if not thisdate:
return current.messages["NONE"]
return thisdate.date().strftime("%d %B %y") |
Python | def project_task_controller():
"""
Tasks Controller, defined in the model for use from
multiple controllers for unified menus
"""
T = current.T
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
get_vars = current.request.get_vars
# Pre-process
def prep(r):
tablename = "project_task"
table = s3db.project_task
statuses = s3.project_task_active_statuses
crud_strings = s3.crud_strings[tablename]
if r.record:
if r.interactive:
# Put the Comments in the RFooter
project_ckeditor()
s3.rfooter = LOAD("project", "comments.load",
args=[r.id],
ajax=True)
if r.method == "datalist":
# Set list_fields for renderer (project_task_list_layout)
list_fields = ["name",
"description",
"location_id",
"date_due",
"pe_id",
"status",
#"organisation_id$logo",
"modified_by",
]
if current.deployment_settings.get_project_projects():
list_fields.insert(5, (T("Project"), "project_id"))
s3db.configure("project_task",
list_fields = list_fields,
)
elif r.method in ("create", "create.popup"):
project_id = r.get_vars.get("project_id", None)
if project_id:
# Coming from a profile page
s3db.project_task.project_id.default = project_id
# Can't do this for an inline form
#field.readable = field.writable = False
elif "mine" in get_vars:
# Show open tasks assigned to the current user
# Show only open tasks
query = (FS("status").belongs(statuses))
if auth.user:
hide_fields = ("pe_id", "status")
if current.deployment_settings \
.get_project_my_tasks_include_team_tasks():
# Include tasks assigned to the current user's teams
# Look up all teams the current user is member of
mtable = s3db.pr_group_membership
gtable = s3db.pr_group
gquery = (mtable.person_id == auth.s3_logged_in_person()) & \
(mtable.deleted == False) & \
(gtable.id == mtable.group_id) & \
(gtable.group_type == 3)
groups = current.db(gquery).select(gtable.pe_id)
# Filter query
pe_ids = set(group.pe_id for group in groups)
if pe_ids:
# Show assignee if teams are included
hide_fields = ("status",)
pe_ids.add(auth.user.pe_id)
query &= (FS("pe_id").belongs(pe_ids))
else:
query &= (FS("pe_id") == auth.user.pe_id)
else:
# Filter by user pe_id
query &= (FS("pe_id") == auth.user.pe_id)
# No need for assignee (always us) or status (always "assigned"
# or "reopened") in list fields:
list_fields = s3db.get_config(tablename, "list_fields")
if list_fields:
list_fields[:] = (fn for fn in list_fields
if fn not in hide_fields)
# Adapt CRUD strings
crud_strings.title_list = T("My Open Tasks")
crud_strings.msg_list_empty = T("No Tasks Assigned")
else:
# Not logged-in, showing all open tasks
crud_strings.title_list = T("Open Tasks")
r.resource.add_filter(query)
# Can not add tasks in this list
s3db.configure(tablename,
copyable = False,
listadd = False,
)
elif "project" in get_vars:
# Show Open Tasks for this Project
project = get_vars.project
ptable = s3db.project_project
try:
name = current.db(ptable.id == project).select(ptable.name,
limitby=(0, 1)
).first().name
except AttributeError:
current.session.error = T("Project not Found")
redirect(URL(args=None, vars=None))
query = (FS("project_id") == project) & \
(FS("status").belongs(statuses))
r.resource.add_filter(query)
crud_strings.title_list = T("Open Tasks for %(project)s") % {"project": name}
crud_strings.msg_list_empty = T("No Open Tasks for %(project)s") % {"project": name}
# Add Activity
list_fields = s3db.get_config(tablename,
"list_fields")
try:
# Hide the project column since we know that already
list_fields.remove((T("Project"), "project_id"))
except ValueError:
# Already removed
pass
s3db.configure(tablename,
copyable = False,
deletable = False,
# Block Add until we get the injectable component lookups
insertable = False,
list_fields = list_fields,
)
elif "open" in get_vars:
# Show Only Open Tasks
crud_strings.title_list = T("All Open Tasks")
r.resource.add_filter(table.status.belongs(statuses))
if r.component:
if r.component_name == "req":
if current.deployment_settings.has_module("hrm"):
r.component.table.type.default = 3
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
s3db.req_create_form_mods()
elif r.component_name == "human_resource":
r.component.table.type.default = 2
else:
if not auth.s3_has_role("STAFF"):
# Hide fields to avoid confusion (both of inputters & recipients)
table = r.table
field = table.time_actual
field.readable = field.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component and r.method != "import":
# Maintain vars: why?
update_url = URL(args=["[id]"], vars=get_vars)
S3CRUD.action_buttons(r, update_url=update_url)
return output
s3.postp = postp
if "mine" in get_vars or "project" in get_vars:
# Show no filters in pre-filtered views
hide_filter = True
else:
hide_filter = None
return current.crud_controller("project", "task",
hide_filter = hide_filter,
rheader = s3db.project_rheader,
) | def project_task_controller():
"""
Tasks Controller, defined in the model for use from
multiple controllers for unified menus
"""
T = current.T
s3db = current.s3db
auth = current.auth
s3 = current.response.s3
get_vars = current.request.get_vars
# Pre-process
def prep(r):
tablename = "project_task"
table = s3db.project_task
statuses = s3.project_task_active_statuses
crud_strings = s3.crud_strings[tablename]
if r.record:
if r.interactive:
# Put the Comments in the RFooter
project_ckeditor()
s3.rfooter = LOAD("project", "comments.load",
args=[r.id],
ajax=True)
if r.method == "datalist":
# Set list_fields for renderer (project_task_list_layout)
list_fields = ["name",
"description",
"location_id",
"date_due",
"pe_id",
"status",
#"organisation_id$logo",
"modified_by",
]
if current.deployment_settings.get_project_projects():
list_fields.insert(5, (T("Project"), "project_id"))
s3db.configure("project_task",
list_fields = list_fields,
)
elif r.method in ("create", "create.popup"):
project_id = r.get_vars.get("project_id", None)
if project_id:
# Coming from a profile page
s3db.project_task.project_id.default = project_id
# Can't do this for an inline form
#field.readable = field.writable = False
elif "mine" in get_vars:
# Show open tasks assigned to the current user
# Show only open tasks
query = (FS("status").belongs(statuses))
if auth.user:
hide_fields = ("pe_id", "status")
if current.deployment_settings \
.get_project_my_tasks_include_team_tasks():
# Include tasks assigned to the current user's teams
# Look up all teams the current user is member of
mtable = s3db.pr_group_membership
gtable = s3db.pr_group
gquery = (mtable.person_id == auth.s3_logged_in_person()) & \
(mtable.deleted == False) & \
(gtable.id == mtable.group_id) & \
(gtable.group_type == 3)
groups = current.db(gquery).select(gtable.pe_id)
# Filter query
pe_ids = set(group.pe_id for group in groups)
if pe_ids:
# Show assignee if teams are included
hide_fields = ("status",)
pe_ids.add(auth.user.pe_id)
query &= (FS("pe_id").belongs(pe_ids))
else:
query &= (FS("pe_id") == auth.user.pe_id)
else:
# Filter by user pe_id
query &= (FS("pe_id") == auth.user.pe_id)
# No need for assignee (always us) or status (always "assigned"
# or "reopened") in list fields:
list_fields = s3db.get_config(tablename, "list_fields")
if list_fields:
list_fields[:] = (fn for fn in list_fields
if fn not in hide_fields)
# Adapt CRUD strings
crud_strings.title_list = T("My Open Tasks")
crud_strings.msg_list_empty = T("No Tasks Assigned")
else:
# Not logged-in, showing all open tasks
crud_strings.title_list = T("Open Tasks")
r.resource.add_filter(query)
# Can not add tasks in this list
s3db.configure(tablename,
copyable = False,
listadd = False,
)
elif "project" in get_vars:
# Show Open Tasks for this Project
project = get_vars.project
ptable = s3db.project_project
try:
name = current.db(ptable.id == project).select(ptable.name,
limitby=(0, 1)
).first().name
except AttributeError:
current.session.error = T("Project not Found")
redirect(URL(args=None, vars=None))
query = (FS("project_id") == project) & \
(FS("status").belongs(statuses))
r.resource.add_filter(query)
crud_strings.title_list = T("Open Tasks for %(project)s") % {"project": name}
crud_strings.msg_list_empty = T("No Open Tasks for %(project)s") % {"project": name}
# Add Activity
list_fields = s3db.get_config(tablename,
"list_fields")
try:
# Hide the project column since we know that already
list_fields.remove((T("Project"), "project_id"))
except ValueError:
# Already removed
pass
s3db.configure(tablename,
copyable = False,
deletable = False,
# Block Add until we get the injectable component lookups
insertable = False,
list_fields = list_fields,
)
elif "open" in get_vars:
# Show Only Open Tasks
crud_strings.title_list = T("All Open Tasks")
r.resource.add_filter(table.status.belongs(statuses))
if r.component:
if r.component_name == "req":
if current.deployment_settings.has_module("hrm"):
r.component.table.type.default = 3
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
s3db.req_create_form_mods()
elif r.component_name == "human_resource":
r.component.table.type.default = 2
else:
if not auth.s3_has_role("STAFF"):
# Hide fields to avoid confusion (both of inputters & recipients)
table = r.table
field = table.time_actual
field.readable = field.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component and r.method != "import":
# Maintain vars: why?
update_url = URL(args=["[id]"], vars=get_vars)
S3CRUD.action_buttons(r, update_url=update_url)
return output
s3.postp = postp
if "mine" in get_vars or "project" in get_vars:
# Show no filters in pre-filtered views
hide_filter = True
else:
hide_filter = None
return current.crud_controller("project", "task",
hide_filter = hide_filter,
rheader = s3db.project_rheader,
) |
Python | def project_project_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Projects on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_project.id"]
item_class = "thumbnail"
author = record["project_project.modified_by"]
#date = record["project_project.modified_on"]
name = record["project_project.name"]
description = record["project_project.description"]
start_date = record["project_project.start_date"]
organisation = record["project_project.organisation_id"]
organisation_id = raw["project_project.organisation_id"]
location = record["project_location.location_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
org_logo = raw["org_organisation.logo"]
if org_logo:
org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
# @ToDo: use a dummy logo image
org_logo = A(IMG(_class="media-object"),
_href=org_url,
_class="pull-left",
)
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_project
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="project",
args=[record_id, "update.popup"]
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(A(name,
_href = URL(c="project", f="project",
args=[record_id, "profile"])),
_class="card-title"),
SPAN(location, _class="location-title"),
SPAN(start_date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
DIV(DIV((description or ""),
DIV(author or "",
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item | def project_project_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Projects on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_project.id"]
item_class = "thumbnail"
author = record["project_project.modified_by"]
#date = record["project_project.modified_on"]
name = record["project_project.name"]
description = record["project_project.description"]
start_date = record["project_project.start_date"]
organisation = record["project_project.organisation_id"]
organisation_id = raw["project_project.organisation_id"]
location = record["project_location.location_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
org_logo = raw["org_organisation.logo"]
if org_logo:
org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
# @ToDo: use a dummy logo image
org_logo = A(IMG(_class="media-object"),
_href=org_url,
_class="pull-left",
)
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_project
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="project",
args=[record_id, "update.popup"]
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(A(name,
_href = URL(c="project", f="project",
args=[record_id, "profile"])),
_class="card-title"),
SPAN(location, _class="location-title"),
SPAN(start_date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
DIV(DIV((description or ""),
DIV(author or "",
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item |
Python | def project_activity_list_layout(list_id, item_id, resource, rfields, record,
icon="activity"):
"""
Default dataList item renderer for Incidents on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_activity.id"]
item_class = "thumbnail"
author = record["project_activity.modified_by"]
#date = record["project_activity.modified_on"]
name = record["project_activity.name"]
description = record["project_activity.comments"]
start_date = record["project_activity.date"]
location = record["project_activity.location_id"]
organisation_id = raw["project_activity_organisation.organisation_id"]
if organisation_id:
organisation = record["project_activity_organisation.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
org_logo = raw["org_organisation.logo"]
if org_logo:
org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
# @ToDo: use a dummy logo image
org_logo = A(IMG(_class="media-object"),
_href=org_url,
_class="pull-left",
)
organisation = A(organisation,
_href=org_url,
_class="card-organisation",
)
else:
organisation = ""
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_activity
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="activity",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(start_date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(A(name,
_href=URL(c="project", f="activity",
args=[record_id, "profile"])),
_class="card-title"),
DIV(DIV((description or ""),
DIV(author or "",
" - ",
organisation,
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item | def project_activity_list_layout(list_id, item_id, resource, rfields, record,
icon="activity"):
"""
Default dataList item renderer for Incidents on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_activity.id"]
item_class = "thumbnail"
author = record["project_activity.modified_by"]
#date = record["project_activity.modified_on"]
name = record["project_activity.name"]
description = record["project_activity.comments"]
start_date = record["project_activity.date"]
location = record["project_activity.location_id"]
organisation_id = raw["project_activity_organisation.organisation_id"]
if organisation_id:
organisation = record["project_activity_organisation.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
org_logo = raw["org_organisation.logo"]
if org_logo:
org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
# @ToDo: use a dummy logo image
org_logo = A(IMG(_class="media-object"),
_href=org_url,
_class="pull-left",
)
organisation = A(organisation,
_href=org_url,
_class="card-organisation",
)
else:
organisation = ""
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_activity
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="activity",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(start_date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(DIV(A(name,
_href=URL(c="project", f="activity",
args=[record_id, "profile"])),
_class="card-title"),
DIV(DIV((description or ""),
DIV(author or "",
" - ",
organisation,
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item |
Python | def project_task_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Tasks on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_task.id"]
item_class = "thumbnail"
author = record["project_task.modified_by"]
name = record["project_task.name"]
assigned_to = record["project_task.pe_id"] or ""
description = record["project_task.description"]
date_due = record["project_task.date_due"]
source_url = raw["project_task.source_url"]
status = raw["project_task.status"]
priority = raw["project_task.priority"]
project_id = raw["project_task.project_id"]
if project_id:
project = record["project_task.project_id"]
project = SPAN(A(project,
_href = URL(c="project", f="project",
args=[project_id, "profile"])
),
" > ",
_class="task_project_title"
)
else:
project = ""
if priority in (1, 2):
# Urgent / High
priority_icon = DIV(ICON("exclamation"),
_class="task_priority")
elif priority == 4:
# Low
priority_icon = DIV(ICON("arrow-down"),
_class="task_priority")
else:
priority_icon = ""
# @ToDo: Support more than just the Wrike/MCOP statuses
status_icon_colour = {2: "#AFC1E5",
6: "#C8D571",
7: "#CEC1FF",
12: "#C6C6C6",
}
active_statuses = current.s3db.project_task_active_statuses
status_icon = DIV(ICON("active" if status in active_statuses else "inactive"),
_class="task_status",
_style="background-color:%s" % (status_icon_colour.get(status, "none"))
)
location = record["project_task.location_id"]
org_logo = ""
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
#org_logo = raw["org_organisation.logo"]
#if org_logo:
# org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
# _class="media-object",
# ),
# _href=org_url,
# _class="pull-left",
# )
#else:
# # @ToDo: use a dummy logo image
# org_logo = A(IMG(_class="media-object"),
# _href=org_url,
# _class="pull-left",
# )
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_task
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="task",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
if source_url:
source_btn = A(ICON("link"),
_title=source_url,
_href=source_url,
_target="_blank"
)
else:
source_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
source_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(date_due, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
priority_icon,
DIV(project,
name, _class="card-title task_priority"),
status_icon,
DIV(DIV((description or ""),
DIV(author,
" - ",
assigned_to,
#A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item | def project_task_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Tasks on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_task.id"]
item_class = "thumbnail"
author = record["project_task.modified_by"]
name = record["project_task.name"]
assigned_to = record["project_task.pe_id"] or ""
description = record["project_task.description"]
date_due = record["project_task.date_due"]
source_url = raw["project_task.source_url"]
status = raw["project_task.status"]
priority = raw["project_task.priority"]
project_id = raw["project_task.project_id"]
if project_id:
project = record["project_task.project_id"]
project = SPAN(A(project,
_href = URL(c="project", f="project",
args=[project_id, "profile"])
),
" > ",
_class="task_project_title"
)
else:
project = ""
if priority in (1, 2):
# Urgent / High
priority_icon = DIV(ICON("exclamation"),
_class="task_priority")
elif priority == 4:
# Low
priority_icon = DIV(ICON("arrow-down"),
_class="task_priority")
else:
priority_icon = ""
# @ToDo: Support more than just the Wrike/MCOP statuses
status_icon_colour = {2: "#AFC1E5",
6: "#C8D571",
7: "#CEC1FF",
12: "#C6C6C6",
}
active_statuses = current.s3db.project_task_active_statuses
status_icon = DIV(ICON("active" if status in active_statuses else "inactive"),
_class="task_status",
_style="background-color:%s" % (status_icon_colour.get(status, "none"))
)
location = record["project_task.location_id"]
org_logo = ""
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
#org_logo = raw["org_organisation.logo"]
#if org_logo:
# org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
# _class="media-object",
# ),
# _href=org_url,
# _class="pull-left",
# )
#else:
# # @ToDo: use a dummy logo image
# org_logo = A(IMG(_class="media-object"),
# _href=org_url,
# _class="pull-left",
# )
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_task
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="task",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
if source_url:
source_btn = A(ICON("link"),
_title=source_url,
_href=source_url,
_target="_blank"
)
else:
source_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
source_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(date_due, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
priority_icon,
DIV(project,
name, _class="card-title task_priority"),
status_icon,
DIV(DIV((description or ""),
DIV(author,
" - ",
assigned_to,
#A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item |
Python | def task_notify(form):
"""
If the task is assigned to someone then notify them
"""
form_vars = form.vars
record = form.record
pe_id = form_vars.pe_id
if not pe_id:
# Not assigned to anyone
return
user = current.auth.user
if user and user.pe_id == pe_id:
# Don't notify the user when they assign themselves tasks
return
status = form_vars.status
if status is not None:
status = int(status)
else:
if record and "status" in record:
status = record.status
else:
table = current.s3db.project_task
status = table.status.default
if status not in current.response.s3.project_task_active_statuses:
# No need to notify about closed tasks
return
if record is None or (int(pe_id) != record.pe_id):
# Assignee has changed
settings = current.deployment_settings
if settings.has_module("msg"):
# Notify assignee
subject = "%s: Task assigned to you" % settings.get_system_name_short()
url = "%s%s" % (settings.get_base_public_url(),
URL(c="project", f="task", args=[form_vars.id]))
priority = form_vars.priority
if priority is not None:
priority = current.s3db.project_task.priority.represent(int(priority))
else:
priority = "unknown"
message = "You have been assigned a Task:\n\n%s\n\n%s\n\n%s\n\n%s" % \
(url,
"%s priority" % priority,
form_vars.name,
form_vars.description or "")
current.msg.send_by_pe_id(pe_id, subject, message) | def task_notify(form):
"""
If the task is assigned to someone then notify them
"""
form_vars = form.vars
record = form.record
pe_id = form_vars.pe_id
if not pe_id:
# Not assigned to anyone
return
user = current.auth.user
if user and user.pe_id == pe_id:
# Don't notify the user when they assign themselves tasks
return
status = form_vars.status
if status is not None:
status = int(status)
else:
if record and "status" in record:
status = record.status
else:
table = current.s3db.project_task
status = table.status.default
if status not in current.response.s3.project_task_active_statuses:
# No need to notify about closed tasks
return
if record is None or (int(pe_id) != record.pe_id):
# Assignee has changed
settings = current.deployment_settings
if settings.has_module("msg"):
# Notify assignee
subject = "%s: Task assigned to you" % settings.get_system_name_short()
url = "%s%s" % (settings.get_base_public_url(),
URL(c="project", f="task", args=[form_vars.id]))
priority = form_vars.priority
if priority is not None:
priority = current.s3db.project_task.priority.represent(int(priority))
else:
priority = "unknown"
message = "You have been assigned a Task:\n\n%s\n\n%s\n\n%s\n\n%s" % \
(url,
"%s priority" % priority,
form_vars.name,
form_vars.description or "")
current.msg.send_by_pe_id(pe_id, subject, message) |
Python | def security_staff_type_multirepresent(opt):
""" Represent a staff type in list views """
db = current.db
table = db.security_staff_type
names = db(table.id > 0).select(table.id,
table.name,
).as_dict()
if isinstance(opt, (list, tuple)):
opts = opt
vals = [str(names.get(o)["name"]) for o in opts]
multiple = True
elif isinstance(opt, int):
opts = [opt]
vals = str(names.get(opt)["name"])
multiple = False
else:
try:
opt = int(opt)
except (ValueError, TypeError):
return current.messages["NONE"]
else:
opts = [opt]
vals = str(names.get(opt)["name"])
multiple = False
if multiple:
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = vals[0] if vals else ""
return vals | def security_staff_type_multirepresent(opt):
""" Represent a staff type in list views """
db = current.db
table = db.security_staff_type
names = db(table.id > 0).select(table.id,
table.name,
).as_dict()
if isinstance(opt, (list, tuple)):
opts = opt
vals = [str(names.get(o)["name"]) for o in opts]
multiple = True
elif isinstance(opt, int):
opts = [opt]
vals = str(names.get(opt)["name"])
multiple = False
else:
try:
opt = int(opt)
except (ValueError, TypeError):
return current.messages["NONE"]
else:
opts = [opt]
vals = str(names.get(opt)["name"])
multiple = False
if multiple:
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = vals[0] if vals else ""
return vals |
Python | def defaults():
""" Safe defaults for names in case the module is disabled """
return {"security_seized_item_status_opts": {},
} | def defaults():
""" Safe defaults for names in case the module is disabled """
return {"security_seized_item_status_opts": {},
} |
Python | def accessible_pe_query(table = None,
instance_types = None,
method = "update",
c = None,
f = None,
):
"""
Construct a query for accessible person entities (pe_ids),
for pe_id-based filters and selectors
Args:
table: the table to query (default: pr_pentity)
instance_types: the instance types to authorize
method: the access method for which permission is required
c: override current.request.controller for permission check
f: override current.request.function for permission check
Returns:
the Query
"""
if instance_types is None:
instance_types = ("org_organisation",)
db = current.db
s3db = current.s3db
if table is None:
table = s3db.pr_pentity
query = None
accessible_query = current.auth.s3_accessible_query
for instance_type in instance_types:
itable = s3db.table(instance_type)
if not itable:
continue
dbset = db(accessible_query(method, itable, c=c, f=f))._select(itable.pe_id)
subquery = table.pe_id.belongs(dbset)
query = subquery if query is None else (query | subquery)
return query | def accessible_pe_query(table = None,
instance_types = None,
method = "update",
c = None,
f = None,
):
"""
Construct a query for accessible person entities (pe_ids),
for pe_id-based filters and selectors
Args:
table: the table to query (default: pr_pentity)
instance_types: the instance types to authorize
method: the access method for which permission is required
c: override current.request.controller for permission check
f: override current.request.function for permission check
Returns:
the Query
"""
if instance_types is None:
instance_types = ("org_organisation",)
db = current.db
s3db = current.s3db
if table is None:
table = s3db.pr_pentity
query = None
accessible_query = current.auth.s3_accessible_query
for instance_type in instance_types:
itable = s3db.table(instance_type)
if not itable:
continue
dbset = db(accessible_query(method, itable, c=c, f=f))._select(itable.pe_id)
subquery = table.pe_id.belongs(dbset)
query = subquery if query is None else (query | subquery)
return query |
Python | def remove_last_record_id(tablename=None):
"""
Removes the ID of the last processed record of a table from the session
Args:
tablename: the tablename
Note:
- if no tablename is specified, all last record IDs will be removed
"""
session_s3 = current.session.s3
last_id = session_s3.get(LAST_ID)
if last_id is not None:
if tablename:
last_id.pop(tablename, None)
else:
del session_s3[LAST_ID] | def remove_last_record_id(tablename=None):
"""
Removes the ID of the last processed record of a table from the session
Args:
tablename: the tablename
Note:
- if no tablename is specified, all last record IDs will be removed
"""
session_s3 = current.session.s3
last_id = session_s3.get(LAST_ID)
if last_id is not None:
if tablename:
last_id.pop(tablename, None)
else:
del session_s3[LAST_ID] |
Python | def s3_validate(table, field, value, record=None):
"""
Validates a value for a field
Args:
table: Table
field: Field or name of the field
value: value to validate
record: the existing database record, if available
Returns:
tuple (value, error)
"""
default = (value, None)
if isinstance(field, str):
fieldname = field
if fieldname in table.fields:
field = table[fieldname]
else:
return default
else:
fieldname = field.name
self_id = None
if record is not None:
try:
v = record[field]
except: # KeyError is now AttributeError
v = None
if v and v == value:
return default
try:
self_id = record[table._id]
except: # KeyError is now AttributeError
pass
requires = field.requires
if field.unique and not requires:
# Prevent unique-constraint violations
field.requires = IS_NOT_IN_DB(current.db, str(field))
if self_id:
field.requires.set_self_id(self_id)
elif self_id:
# Initialize all validators for self_id
if not isinstance(requires, (list, tuple)):
requires = [requires]
for r in requires:
if hasattr(r, "set_self_id"):
r.set_self_id(self_id)
if hasattr(r, "other") and \
hasattr(r.other, "set_self_id"):
r.other.set_self_id(self_id)
try:
value, error = field.validate(value)
except:
# Oops - something went wrong in the validator:
# write out a debug message, and continue anyway
current.log.error("Validate %s: %s (ignored)" %
(field, sys.exc_info()[1]))
return (None, None)
else:
return (value, error) | def s3_validate(table, field, value, record=None):
"""
Validates a value for a field
Args:
table: Table
field: Field or name of the field
value: value to validate
record: the existing database record, if available
Returns:
tuple (value, error)
"""
default = (value, None)
if isinstance(field, str):
fieldname = field
if fieldname in table.fields:
field = table[fieldname]
else:
return default
else:
fieldname = field.name
self_id = None
if record is not None:
try:
v = record[field]
except: # KeyError is now AttributeError
v = None
if v and v == value:
return default
try:
self_id = record[table._id]
except: # KeyError is now AttributeError
pass
requires = field.requires
if field.unique and not requires:
# Prevent unique-constraint violations
field.requires = IS_NOT_IN_DB(current.db, str(field))
if self_id:
field.requires.set_self_id(self_id)
elif self_id:
# Initialize all validators for self_id
if not isinstance(requires, (list, tuple)):
requires = [requires]
for r in requires:
if hasattr(r, "set_self_id"):
r.set_self_id(self_id)
if hasattr(r, "other") and \
hasattr(r.other, "set_self_id"):
r.other.set_self_id(self_id)
try:
value, error = field.validate(value)
except:
# Oops - something went wrong in the validator:
# write out a debug message, and continue anyway
current.log.error("Validate %s: %s (ignored)" %
(field, sys.exc_info()[1]))
return (None, None)
else:
return (value, error) |
Python | def s3_dev_toolbar():
"""
Developer Toolbar - ported from gluon.Response.toolbar()
Shows useful stuff at the bottom of the page in Debug mode
"""
from gluon.dal import DAL
from gluon.utils import web2py_uuid
#admin = URL("admin", "default", "design", extension="html",
# args=current.request.application)
BUTTON = TAG.button
dbstats = []
dbtables = {}
infos = DAL.get_instances()
for k, v in infos.items():
dbstats.append(TABLE(*[TR(PRE(row[0]), "%.2fms" %
(row[1] * 1000))
for row in v["dbstats"]]))
dbtables[k] = {"defined": v["dbtables"]["defined"] or "[no defined tables]",
"lazy": v["dbtables"]["lazy"] or "[no lazy tables]",
}
u = web2py_uuid()
backtotop = A("Back to top", _href="#totop-%s" % u)
# Convert lazy request.vars from property to Storage so they
# will be displayed in the toolbar.
request = copy.copy(current.request)
request.update(vars=current.request.vars,
get_vars=current.request.get_vars,
post_vars=current.request.post_vars)
# Filter out sensitive session details
def no_sensitives(key):
if key in ("hmac_key", "password") or \
key[:8] == "_formkey" or \
key[-4:] == "_key" or \
key[-5:] == "token":
return None
return key
return DIV(
#BUTTON("design", _onclick="document.location='%s'" % admin),
BUTTON("request",
_onclick="$('#request-%s').slideToggle().removeClass('hide')" % u),
#BUTTON("response",
# _onclick="$('#response-%s').slideToggle().removeClass('hide')" % u),
BUTTON("session",
_onclick="$('#session-%s').slideToggle().removeClass('hide')" % u),
BUTTON("db tables",
_onclick="$('#db-tables-%s').slideToggle().removeClass('hide')" % u),
BUTTON("db stats",
_onclick="$('#db-stats-%s').slideToggle().removeClass('hide')" % u),
DIV(BEAUTIFY(request), backtotop,
_class="hide", _id="request-%s" % u),
#DIV(BEAUTIFY(current.response), backtotop,
# _class="hide", _id="response-%s" % u),
DIV(BEAUTIFY(current.session, keyfilter=no_sensitives), backtotop,
_class="hide", _id="session-%s" % u),
DIV(BEAUTIFY(dbtables), backtotop,
_class="hide", _id="db-tables-%s" % u),
DIV(BEAUTIFY(dbstats), backtotop,
_class="hide", _id="db-stats-%s" % u),
_id="totop-%s" % u
) | def s3_dev_toolbar():
"""
Developer Toolbar - ported from gluon.Response.toolbar()
Shows useful stuff at the bottom of the page in Debug mode
"""
from gluon.dal import DAL
from gluon.utils import web2py_uuid
#admin = URL("admin", "default", "design", extension="html",
# args=current.request.application)
BUTTON = TAG.button
dbstats = []
dbtables = {}
infos = DAL.get_instances()
for k, v in infos.items():
dbstats.append(TABLE(*[TR(PRE(row[0]), "%.2fms" %
(row[1] * 1000))
for row in v["dbstats"]]))
dbtables[k] = {"defined": v["dbtables"]["defined"] or "[no defined tables]",
"lazy": v["dbtables"]["lazy"] or "[no lazy tables]",
}
u = web2py_uuid()
backtotop = A("Back to top", _href="#totop-%s" % u)
# Convert lazy request.vars from property to Storage so they
# will be displayed in the toolbar.
request = copy.copy(current.request)
request.update(vars=current.request.vars,
get_vars=current.request.get_vars,
post_vars=current.request.post_vars)
# Filter out sensitive session details
def no_sensitives(key):
if key in ("hmac_key", "password") or \
key[:8] == "_formkey" or \
key[-4:] == "_key" or \
key[-5:] == "token":
return None
return key
return DIV(
#BUTTON("design", _onclick="document.location='%s'" % admin),
BUTTON("request",
_onclick="$('#request-%s').slideToggle().removeClass('hide')" % u),
#BUTTON("response",
# _onclick="$('#response-%s').slideToggle().removeClass('hide')" % u),
BUTTON("session",
_onclick="$('#session-%s').slideToggle().removeClass('hide')" % u),
BUTTON("db tables",
_onclick="$('#db-tables-%s').slideToggle().removeClass('hide')" % u),
BUTTON("db stats",
_onclick="$('#db-stats-%s').slideToggle().removeClass('hide')" % u),
DIV(BEAUTIFY(request), backtotop,
_class="hide", _id="request-%s" % u),
#DIV(BEAUTIFY(current.response), backtotop,
# _class="hide", _id="response-%s" % u),
DIV(BEAUTIFY(current.session, keyfilter=no_sensitives), backtotop,
_class="hide", _id="session-%s" % u),
DIV(BEAUTIFY(dbtables), backtotop,
_class="hide", _id="db-tables-%s" % u),
DIV(BEAUTIFY(dbstats), backtotop,
_class="hide", _id="db-stats-%s" % u),
_id="totop-%s" % u
) |
Python | def s3_mark_required(fields,
mark_required=None,
label_html=None,
map_names=None):
"""
Add asterisk to field label if a field is required
Args:
fields: list of fields (or a table)
mark_required: list of field names which are always required
label_html: function to render labels of requried fields
map_names: dict of alternative field names and labels
{fname: (name, label)}, used for inline components
Returns:
tuple, (dict of form labels, has_required) with has_required
indicating whether there are required fields in this form
"""
if not mark_required:
mark_required = ()
if label_html is None:
# @ToDo: DRY this setting with s3.ui.locationselector.js
label_html = s3_required_label
labels = {}
# Do we have any required fields?
_required = False
for field in fields:
if map_names:
fname, flabel = map_names[field.name]
else:
fname, flabel = field.name, field.label
if not flabel:
labels[fname] = ""
continue
if field.writable:
validators = field.requires
if isinstance(validators, IS_EMPTY_OR) and field.name not in mark_required:
# Allow notnull fields to be marked as not required
# if we populate them onvalidation
labels[fname] = "%s:" % flabel
continue
else:
required = field.required or field.notnull or \
field.name in mark_required
if not validators and not required:
labels[fname] = "%s:" % flabel
continue
if not required:
if not isinstance(validators, (list, tuple)):
validators = [validators]
for v in validators:
if hasattr(v, "options"):
if hasattr(v, "zero") and v.zero is None:
continue
if hasattr(v, "mark_required"):
if v.mark_required:
required = True
break
else:
continue
try:
error = v("")[1]
except TypeError:
# default validator takes no args
pass
else:
if error:
required = True
break
if required:
_required = True
labels[fname] = label_html(flabel)
else:
labels[fname] = "%s:" % flabel
else:
labels[fname] = "%s:" % flabel
return (labels, _required) | def s3_mark_required(fields,
mark_required=None,
label_html=None,
map_names=None):
"""
Add asterisk to field label if a field is required
Args:
fields: list of fields (or a table)
mark_required: list of field names which are always required
label_html: function to render labels of requried fields
map_names: dict of alternative field names and labels
{fname: (name, label)}, used for inline components
Returns:
tuple, (dict of form labels, has_required) with has_required
indicating whether there are required fields in this form
"""
if not mark_required:
mark_required = ()
if label_html is None:
# @ToDo: DRY this setting with s3.ui.locationselector.js
label_html = s3_required_label
labels = {}
# Do we have any required fields?
_required = False
for field in fields:
if map_names:
fname, flabel = map_names[field.name]
else:
fname, flabel = field.name, field.label
if not flabel:
labels[fname] = ""
continue
if field.writable:
validators = field.requires
if isinstance(validators, IS_EMPTY_OR) and field.name not in mark_required:
# Allow notnull fields to be marked as not required
# if we populate them onvalidation
labels[fname] = "%s:" % flabel
continue
else:
required = field.required or field.notnull or \
field.name in mark_required
if not validators and not required:
labels[fname] = "%s:" % flabel
continue
if not required:
if not isinstance(validators, (list, tuple)):
validators = [validators]
for v in validators:
if hasattr(v, "options"):
if hasattr(v, "zero") and v.zero is None:
continue
if hasattr(v, "mark_required"):
if v.mark_required:
required = True
break
else:
continue
try:
error = v("")[1]
except TypeError:
# default validator takes no args
pass
else:
if error:
required = True
break
if required:
_required = True
labels[fname] = label_html(flabel)
else:
labels[fname] = "%s:" % flabel
else:
labels[fname] = "%s:" % flabel
return (labels, _required) |
Python | def s3_addrow(form, label, widget, comment, formstyle, row_id, position=-1):
"""
Add a row to a form, applying formstyle
Args:
form: the FORM
label: the label
widget: the widget
comment: the comment
formstyle: the formstyle
row_id: the form row HTML id
position: position where to insert the row
"""
if callable(formstyle):
row = formstyle(row_id, label, widget, comment)
if isinstance(row, (tuple, list)):
for subrow in row:
form[0].insert(position, subrow)
if position >= 0:
position += 1
else:
form[0].insert(position, row)
else:
addrow(form, label, widget, comment, formstyle, row_id,
position = position)
return | def s3_addrow(form, label, widget, comment, formstyle, row_id, position=-1):
"""
Add a row to a form, applying formstyle
Args:
form: the FORM
label: the label
widget: the widget
comment: the comment
formstyle: the formstyle
row_id: the form row HTML id
position: position where to insert the row
"""
if callable(formstyle):
row = formstyle(row_id, label, widget, comment)
if isinstance(row, (tuple, list)):
for subrow in row:
form[0].insert(position, subrow)
if position >= 0:
position += 1
else:
form[0].insert(position, row)
else:
addrow(form, label, widget, comment, formstyle, row_id,
position = position)
return |
Python | def s3_keep_messages():
"""
Retain user messages from previous request - prevents the messages
from being swallowed by overhanging Ajax requests or intermediate
pages with mandatory redirection (see s3_redirect_default)
"""
response = current.response
session = current.session
session.confirmation = response.confirmation
session.error = response.error
session.flash = response.flash
session.information = response.information
session.warning = response.warning | def s3_keep_messages():
"""
Retain user messages from previous request - prevents the messages
from being swallowed by overhanging Ajax requests or intermediate
pages with mandatory redirection (see s3_redirect_default)
"""
response = current.response
session = current.session
session.confirmation = response.confirmation
session.error = response.error
session.flash = response.flash
session.information = response.information
session.warning = response.warning |
Python | def s3_redirect_default(location="", how=303, client_side=False, headers=None):
"""
Redirect preserving response messages, useful when redirecting from
index() controllers.
Args:
location: the url where to redirect
how: what HTTP status code to use when redirecting
client_side: if set to True, it triggers a reload of
the entire page when the fragment has been
loaded as a component
headers: response headers
"""
s3_keep_messages()
redirect(location,
how = how,
client_side = client_side,
headers = headers,
) | def s3_redirect_default(location="", how=303, client_side=False, headers=None):
"""
Redirect preserving response messages, useful when redirecting from
index() controllers.
Args:
location: the url where to redirect
how: what HTTP status code to use when redirecting
client_side: if set to True, it triggers a reload of
the entire page when the fragment has been
loaded as a component
headers: response headers
"""
s3_keep_messages()
redirect(location,
how = how,
client_side = client_side,
headers = headers,
) |
Python | def s3_has_foreign_key(field, m2m=True):
"""
Check whether a field contains a foreign key constraint
Args:
field: the field (Field instance)
m2m: also detect many-to-many links
Note:
many-to-many references (list:reference) are not DB constraints,
but pseudo-references implemented by the DAL. If you only want
to find real foreign key constraints, then set m2m=False.
"""
try:
ftype = str(field.type)
except:
# Virtual Field
return False
if ftype[:9] == "reference" or \
m2m and ftype[:14] == "list:reference" or \
current.s3db.virtual_reference(field):
return True
return False | def s3_has_foreign_key(field, m2m=True):
"""
Check whether a field contains a foreign key constraint
Args:
field: the field (Field instance)
m2m: also detect many-to-many links
Note:
many-to-many references (list:reference) are not DB constraints,
but pseudo-references implemented by the DAL. If you only want
to find real foreign key constraints, then set m2m=False.
"""
try:
ftype = str(field.type)
except:
# Virtual Field
return False
if ftype[:9] == "reference" or \
m2m and ftype[:14] == "list:reference" or \
current.s3db.virtual_reference(field):
return True
return False |
Python | def s3_get_foreign_key(field, m2m=True):
"""
Resolve a field type into the name of the referenced table,
the referenced key and the reference type (M:1 or M:N)
Args:
field: the field (Field instance)
m2m: also detect many-to-many references
Returns:
tuple (tablename, key, multiple), where tablename is
the name of the referenced table (or None if this field
has no foreign key constraint), key is the field name of
the referenced key, and multiple indicates whether this is
a many-to-many reference (list:reference) or not.
Note:
many-to-many references (list:reference) are not DB constraints,
but pseudo-references implemented by the DAL. If you only want
to find real foreign key constraints, then set m2m=False.
"""
ftype = str(field.type)
multiple = False
if ftype[:9] == "reference":
key = ftype[10:]
elif m2m and ftype[:14] == "list:reference":
key = ftype[15:]
multiple = True
else:
key = current.s3db.virtual_reference(field)
if not key:
return (None, None, None)
if "." in key:
rtablename, key = key.split(".")
else:
rtablename = key
rtable = current.s3db.table(rtablename)
if rtable:
key = rtable._id.name
else:
key = None
return (rtablename, key, multiple) | def s3_get_foreign_key(field, m2m=True):
"""
Resolve a field type into the name of the referenced table,
the referenced key and the reference type (M:1 or M:N)
Args:
field: the field (Field instance)
m2m: also detect many-to-many references
Returns:
tuple (tablename, key, multiple), where tablename is
the name of the referenced table (or None if this field
has no foreign key constraint), key is the field name of
the referenced key, and multiple indicates whether this is
a many-to-many reference (list:reference) or not.
Note:
many-to-many references (list:reference) are not DB constraints,
but pseudo-references implemented by the DAL. If you only want
to find real foreign key constraints, then set m2m=False.
"""
ftype = str(field.type)
multiple = False
if ftype[:9] == "reference":
key = ftype[10:]
elif m2m and ftype[:14] == "list:reference":
key = ftype[15:]
multiple = True
else:
key = current.s3db.virtual_reference(field)
if not key:
return (None, None, None)
if "." in key:
rtablename, key = key.split(".")
else:
rtablename = key
rtable = current.s3db.table(rtablename)
if rtable:
key = rtable._id.name
else:
key = None
return (rtablename, key, multiple) |
Python | def s3_flatlist(nested):
""" Iterator to flatten mixed iterables of arbitrary depth """
for item in nested:
if isinstance(item, collections.Iterable) and \
not isinstance(item, str):
for sub in s3_flatlist(item):
yield sub
else:
yield item | def s3_flatlist(nested):
""" Iterator to flatten mixed iterables of arbitrary depth """
for item in nested:
if isinstance(item, collections.Iterable) and \
not isinstance(item, str):
for sub in s3_flatlist(item):
yield sub
else:
yield item |
Python | def s3_orderby_fields(table, orderby, expr=False):
"""
Introspect and yield all fields involved in a DAL orderby
expression.
Args:
table: the Table
orderby: the orderby expression
expr: True to yield asc/desc expressions as they are,
False to yield only Fields
"""
if not orderby:
return
adapter = S3DAL()
COMMA = adapter.COMMA
INVERT = adapter.INVERT
if isinstance(orderby, str):
items = orderby.split(",")
elif type(orderby) is Expression:
def expand(e):
if isinstance(e, Field):
return [e]
if e.op == COMMA:
return expand(e.first) + expand(e.second)
elif e.op == INVERT:
return [e] if expr else [e.first]
return []
items = expand(orderby)
elif not isinstance(orderby, (list, tuple)):
items = [orderby]
else:
items = orderby
s3db = current.s3db
tablename = table._tablename if table else None
for item in items:
if type(item) is Expression:
if not isinstance(item.first, Field):
continue
f = item if expr else item.first
elif isinstance(item, Field):
f = item
elif isinstance(item, str):
fn, direction = (item.strip().split() + ["asc"])[:2]
tn, fn = ([tablename] + fn.split(".", 1))[-2:]
if tn:
try:
f = s3db.table(tn, db_only=True)[fn]
except (AttributeError, KeyError):
continue
else:
if current.response.s3.debug:
raise SyntaxError('Tablename prefix required for orderby="%s"' % item)
else:
# Ignore
continue
if expr and direction[:3] == "des":
f = ~f
else:
continue
yield f | def s3_orderby_fields(table, orderby, expr=False):
"""
Introspect and yield all fields involved in a DAL orderby
expression.
Args:
table: the Table
orderby: the orderby expression
expr: True to yield asc/desc expressions as they are,
False to yield only Fields
"""
if not orderby:
return
adapter = S3DAL()
COMMA = adapter.COMMA
INVERT = adapter.INVERT
if isinstance(orderby, str):
items = orderby.split(",")
elif type(orderby) is Expression:
def expand(e):
if isinstance(e, Field):
return [e]
if e.op == COMMA:
return expand(e.first) + expand(e.second)
elif e.op == INVERT:
return [e] if expr else [e.first]
return []
items = expand(orderby)
elif not isinstance(orderby, (list, tuple)):
items = [orderby]
else:
items = orderby
s3db = current.s3db
tablename = table._tablename if table else None
for item in items:
if type(item) is Expression:
if not isinstance(item.first, Field):
continue
f = item if expr else item.first
elif isinstance(item, Field):
f = item
elif isinstance(item, str):
fn, direction = (item.strip().split() + ["asc"])[:2]
tn, fn = ([tablename] + fn.split(".", 1))[-2:]
if tn:
try:
f = s3db.table(tn, db_only=True)[fn]
except (AttributeError, KeyError):
continue
else:
if current.response.s3.debug:
raise SyntaxError('Tablename prefix required for orderby="%s"' % item)
else:
# Ignore
continue
if expr and direction[:3] == "des":
f = ~f
else:
continue
yield f |
Python | def s3_get_extension(request=None):
"""
Get the file extension in the path of the request
Args:
request: the request object (web2py request or CRUDRequest),
defaults to current.request
"""
if request is None:
request = current.request
extension = request.extension
if request.function == "ticket" and request.controller == "admin":
extension = "html"
elif "format" in request.get_vars:
ext = request.get_vars.format
if isinstance(ext, list):
ext = ext[-1]
extension = ext.lower() or extension
else:
ext = None
for arg in request.args[::-1]:
if "." in arg:
ext = arg.rsplit(".", 1)[1].lower()
break
if ext:
extension = ext
return extension | def s3_get_extension(request=None):
"""
Get the file extension in the path of the request
Args:
request: the request object (web2py request or CRUDRequest),
defaults to current.request
"""
if request is None:
request = current.request
extension = request.extension
if request.function == "ticket" and request.controller == "admin":
extension = "html"
elif "format" in request.get_vars:
ext = request.get_vars.format
if isinstance(ext, list):
ext = ext[-1]
extension = ext.lower() or extension
else:
ext = None
for arg in request.args[::-1]:
if "." in arg:
ext = arg.rsplit(".", 1)[1].lower()
break
if ext:
extension = ext
return extension |
Python | def s3_get_extension_from_url(url):
"""
Helper to read the format extension from a URL string
Args:
url: the URL string
Returns:
the format extension as string, if any
"""
ext = None
if not url:
return ext
try:
parsed = urlparse.urlparse(url)
except (ValueError, AttributeError):
pass
else:
if parsed.query:
params = parsed.query.split(",")
for param in params[::-1]:
k, v = param.split("=") if "=" in param else None, None
if k == "format":
ext = v.lower()
break
if not ext:
args = parsed.path.split("/")
for arg in args[::-1]:
if "." in arg:
ext = arg.rsplit(".", 1)[-1]
break
return ext | def s3_get_extension_from_url(url):
"""
Helper to read the format extension from a URL string
Args:
url: the URL string
Returns:
the format extension as string, if any
"""
ext = None
if not url:
return ext
try:
parsed = urlparse.urlparse(url)
except (ValueError, AttributeError):
pass
else:
if parsed.query:
params = parsed.query.split(",")
for param in params[::-1]:
k, v = param.split("=") if "=" in param else None, None
if k == "format":
ext = v.lower()
break
if not ext:
args = parsed.path.split("/")
for arg in args[::-1]:
if "." in arg:
ext = arg.rsplit(".", 1)[-1]
break
return ext |
Python | def s3_set_extension(url, extension=None):
"""
Add a file extension to the path of a url, replacing all
other extensions in the path.
Args:
url: the URL (as string)
extension: the extension, defaults to the extension
of current. request
"""
if extension == None:
extension = s3_get_extension()
#if extension == "html":
#extension = ""
u = urlparse.urlparse(url)
path = u.path
if path:
if "." in path:
elements = [p.split(".")[0] for p in path.split("/")]
else:
elements = path.split("/")
if extension and elements[-1]:
elements[-1] += ".%s" % extension
path = "/".join(elements)
return urlparse.urlunparse((u.scheme,
u.netloc,
path,
u.params,
u.query,
u.fragment)) | def s3_set_extension(url, extension=None):
"""
Add a file extension to the path of a url, replacing all
other extensions in the path.
Args:
url: the URL (as string)
extension: the extension, defaults to the extension
of current. request
"""
if extension == None:
extension = s3_get_extension()
#if extension == "html":
#extension = ""
u = urlparse.urlparse(url)
path = u.path
if path:
if "." in path:
elements = [p.split(".")[0] for p in path.split("/")]
else:
elements = path.split("/")
if extension and elements[-1]:
elements[-1] += ".%s" % extension
path = "/".join(elements)
return urlparse.urlunparse((u.scheme,
u.netloc,
path,
u.params,
u.query,
u.fragment)) |
Python | def make_link(self, path):
""" Create a link from a path """
tryFile = path.replace("\\", "/")
if os.path.isabs(tryFile) and os.path.isfile(tryFile):
folder, filename = os.path.split(tryFile)
ext = os.path.splitext(filename)[1]
app = current.request.args[0]
editable = {"controllers": ".py", "models": ".py", "views": ".html"}
l_ext = ext.lower()
f_endswith = folder.endswith
for key in editable.keys():
check_extension = f_endswith("%s/%s" % (app, key))
if l_ext == editable[key] and check_extension:
edit_url = URL(a = "admin",
c = "default",
f = "edit",
args = [app, key, filename],
)
return A('"' + tryFile + '"',
_href = edit_url,
_target = "_blank",
).xml()
return "" | def make_link(self, path):
""" Create a link from a path """
tryFile = path.replace("\\", "/")
if os.path.isabs(tryFile) and os.path.isfile(tryFile):
folder, filename = os.path.split(tryFile)
ext = os.path.splitext(filename)[1]
app = current.request.args[0]
editable = {"controllers": ".py", "models": ".py", "views": ".html"}
l_ext = ext.lower()
f_endswith = folder.endswith
for key in editable.keys():
check_extension = f_endswith("%s/%s" % (app, key))
if l_ext == editable[key] and check_extension:
edit_url = URL(a = "admin",
c = "default",
f = "edit",
args = [app, key, filename],
)
return A('"' + tryFile + '"',
_href = edit_url,
_target = "_blank",
).xml()
return "" |
Python | def make_links(self, traceback):
""" Make links using the given traceback """
lwords = traceback.split('"')
# Make the short circuit compatible with <= python2.4
result = lwords[0] if len(lwords) else ""
i = 1
while i < len(lwords):
link = self.make_link(lwords[i])
if link == "":
result += '"' + lwords[i]
else:
result += s3_str(link)
if i + 1 < len(lwords):
result += lwords[i + 1]
i = i + 1
i = i + 1
return result | def make_links(self, traceback):
""" Make links using the given traceback """
lwords = traceback.split('"')
# Make the short circuit compatible with <= python2.4
result = lwords[0] if len(lwords) else ""
i = 1
while i < len(lwords):
link = self.make_link(lwords[i])
if link == "":
result += '"' + lwords[i]
else:
result += s3_str(link)
if i + 1 < len(lwords):
result += lwords[i + 1]
i = i + 1
i = i + 1
return result |
Python | def URL2(a=None, c=None, r=None):
"""
Modified version of URL from gluon/html.py
- used by views/layout_iframe.html for our jquery function
@example:
>>> URL(a="a",c="c")
"/a/c"
generates a url "/a/c" corresponding to application a & controller c
If r=request is passed, a & c are set, respectively,
to r.application, r.controller
The more typical usage is:
URL(r=request) that generates a base url with the present
application and controller.
The function (& optionally args/vars) are expected to be added
via jquery based on attributes of the item.
"""
application = controller = None
if r:
application = r.application
controller = r.controller
if a:
application = a
if c:
controller = c
if not (application and controller):
raise SyntaxError("not enough information to build the url")
#other = ""
url = "/%s/%s" % (application, controller)
return url | def URL2(a=None, c=None, r=None):
"""
Modified version of URL from gluon/html.py
- used by views/layout_iframe.html for our jquery function
@example:
>>> URL(a="a",c="c")
"/a/c"
generates a url "/a/c" corresponding to application a & controller c
If r=request is passed, a & c are set, respectively,
to r.application, r.controller
The more typical usage is:
URL(r=request) that generates a base url with the present
application and controller.
The function (& optionally args/vars) are expected to be added
via jquery based on attributes of the item.
"""
application = controller = None
if r:
application = r.application
controller = r.controller
if a:
application = a
if c:
controller = c
if not (application and controller):
raise SyntaxError("not enough information to build the url")
#other = ""
url = "/%s/%s" % (application, controller)
return url |
Python | def keys(cls, template):
"""
Get the keys from a string template
Returns:
a list of keys (in order of appearance),
None for invalid string templates
Example:
keys = StringTemplateParser.keys("%(first_name)s %(last_name)s")
# Returns: ["first_name", "last_name"]
"""
parser = cls()
try:
template % parser
except TypeError:
return None
return parser._keys | def keys(cls, template):
"""
Get the keys from a string template
Returns:
a list of keys (in order of appearance),
None for invalid string templates
Example:
keys = StringTemplateParser.keys("%(first_name)s %(last_name)s")
# Returns: ["first_name", "last_name"]
"""
parser = cls()
try:
template % parser
except TypeError:
return None
return parser._keys |
Python | def generate(self):
"""
Generates the form key and store it in the current session.
Returns:
the form key as str
Note:
The form key shall be stored in the GET response (e.g. as
hidden input in the form), and then sent back by the client
with the POST request that shall be protected.
Example:
formkey = FormKey("modify-record/%s" % record_id).generate()
form.hidden = {"_formkey": formkey}
"""
from uuid import uuid4
formkey = uuid4().hex
keyname = "_formkey[%s]" % self.formname
session = current.session
session[keyname] = session.get(keyname, [])[-9:] + [formkey]
return formkey | def generate(self):
"""
Generates the form key and store it in the current session.
Returns:
the form key as str
Note:
The form key shall be stored in the GET response (e.g. as
hidden input in the form), and then sent back by the client
with the POST request that shall be protected.
Example:
formkey = FormKey("modify-record/%s" % record_id).generate()
form.hidden = {"_formkey": formkey}
"""
from uuid import uuid4
formkey = uuid4().hex
keyname = "_formkey[%s]" % self.formname
session = current.session
session[keyname] = session.get(keyname, [])[-9:] + [formkey]
return formkey |
Python | def verify(self, post_vars, variable="_formkey", invalidate=True):
"""
Verify the form key returned from the client.
Args:
post_vars: the POST vars dict
variable: the name of the POST variable containing the form key
invalidate: remove the form key when successful, so that it
cannot be reused for another submission of the
same form; this may need disabling for Ajax (unless
key renewal is implemented)
Returns:
True|False whether the formkey is valid
Example:
formkey = FormKey("modify-record/%s" % record_id)
if not formkey.verify(request.post_vars):
raise HTTP(403)
"""
formkey = post_vars.get(variable)
keyname = "_formkey[%s]" % self.formname
keys = current.session.get(keyname, [])
if not formkey or formkey not in keys:
return False
else:
keys.remove(formkey)
return True | def verify(self, post_vars, variable="_formkey", invalidate=True):
"""
Verify the form key returned from the client.
Args:
post_vars: the POST vars dict
variable: the name of the POST variable containing the form key
invalidate: remove the form key when successful, so that it
cannot be reused for another submission of the
same form; this may need disabling for Ajax (unless
key renewal is implemented)
Returns:
True|False whether the formkey is valid
Example:
formkey = FormKey("modify-record/%s" % record_id)
if not formkey.verify(request.post_vars):
raise HTTP(403)
"""
formkey = post_vars.get(variable)
keyname = "_formkey[%s]" % self.formname
keys = current.session.get(keyname, [])
if not formkey or formkey not in keys:
return False
else:
keys.remove(formkey)
return True |
Python | def system_info():
"""
System Information, for issue reporting and support; visible e.g. on
the default/about page
Returns:
a DIV with the system information
"""
request = current.request
settings = current.deployment_settings
INCORRECT = "Not installed or incorrectly configured."
UNKNOWN = "?"
subheader = lambda title: TR(TD(title, _colspan="2"), _class="about-subheader")
item = lambda title, value: TR(TD(title), TD(value))
# Technical Support Details
system_info = DIV(_class="system-info")
# Application version
try:
with open(os.path.join(request.folder, "VERSION"), "r") as version_file:
app_version = version_file.read().strip("\n")
except IOError:
app_version = UNKNOWN
template = settings.get_template()
if isinstance(template, (tuple, list)):
template = ", ".join(template)
trows = [subheader(settings.get_system_name_short()),
item("Template", template),
item("Version", app_version),
]
# Server Components
base_version = ".".join(map(str, version_info()))
try:
with open(os.path.join(request.env.web2py_path, "VERSION"), "r") as version_file:
web2py_version = version_file.read()[8:].strip("\n")
except IOError:
web2py_version = UNKNOWN
os_version = platform.platform()
trows.extend([subheader("Server"),
item("Base Release", base_version),
item("Web2Py", web2py_version),
item("HTTP Server", request.env.server_software),
item("Operating System", os_version),
])
# Database
db_info = [subheader("Database")]
dbtype = settings.get_database_type()
if dbtype == "sqlite":
try:
import sqlite3
sqlite_version = sqlite3.version
except (ImportError, AttributeError):
sqlite_version = UNKNOWN
db_info.extend([item("SQLite", sqlite_version),
])
elif dbtype == "mysql":
database_name = settings.database.get("database", "sahana")
try:
# @ToDo: Support using pymysql & Warn
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except (ImportError, AttributeError):
mysqldb_version = INCORRECT
mysql_version = UNKNOWN
else:
#mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
con = MySQLdb.connect(host = settings.database.get("host", "localhost"),
port = settings.database.get("port", None) or 3306,
db = database_name,
user = settings.database.get("username", "sahana"),
passwd = settings.database.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT VERSION()")
mysql_version = cur.fetchone()
db_info.extend([item("MySQL", mysql_version),
item("MySQLdb python driver", mysqldb_version),
])
else:
# Postgres
try:
import psycopg2
psycopg_version = psycopg2.__version__
except (ImportError, AttributeError):
psycopg_version = INCORRECT
pgsql_version = UNKNOWN
else:
con = psycopg2.connect(host = settings.db_params.get("host", "localhost"),
port = settings.db_params.get("port", None) or 5432,
database = settings.db_params.get("database", "eden"),
user = settings.db_params.get("username", "eden"),
password = settings.db_params.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT version()")
pgsql_version = cur.fetchone()
db_info.extend([item("PostgreSQL", pgsql_version),
item("psycopg2 python driver", psycopg_version),
])
trows.extend(db_info)
# Python and Libraries
python_version = platform.python_version()
try:
from lxml import etree
lxml_version = ".".join([str(i) for i in etree.LXML_VERSION])
except (ImportError, AttributeError):
lxml_version = INCORRECT
try:
import reportlab
reportlab_version = reportlab.Version
except (ImportError, AttributeError):
reportlab_version = INCORRECT
try:
import shapely
shapely_version = shapely.__version__
except (ImportError, AttributeError):
shapely_version = INCORRECT
try:
import xlrd
xlrd_version = xlrd.__VERSION__
except (ImportError, AttributeError):
xlrd_version = INCORRECT
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except (ImportError, AttributeError):
xlwt_version = INCORRECT
trows.extend([subheader("Python"),
item("Python", python_version),
item("lxml", lxml_version),
item("ReportLab", reportlab_version),
item("Shapely", shapely_version),
item("xlrd", xlrd_version),
item("xlwt", xlwt_version),
])
system_info.append(TABLE(*trows))
return system_info | def system_info():
"""
System Information, for issue reporting and support; visible e.g. on
the default/about page
Returns:
a DIV with the system information
"""
request = current.request
settings = current.deployment_settings
INCORRECT = "Not installed or incorrectly configured."
UNKNOWN = "?"
subheader = lambda title: TR(TD(title, _colspan="2"), _class="about-subheader")
item = lambda title, value: TR(TD(title), TD(value))
# Technical Support Details
system_info = DIV(_class="system-info")
# Application version
try:
with open(os.path.join(request.folder, "VERSION"), "r") as version_file:
app_version = version_file.read().strip("\n")
except IOError:
app_version = UNKNOWN
template = settings.get_template()
if isinstance(template, (tuple, list)):
template = ", ".join(template)
trows = [subheader(settings.get_system_name_short()),
item("Template", template),
item("Version", app_version),
]
# Server Components
base_version = ".".join(map(str, version_info()))
try:
with open(os.path.join(request.env.web2py_path, "VERSION"), "r") as version_file:
web2py_version = version_file.read()[8:].strip("\n")
except IOError:
web2py_version = UNKNOWN
os_version = platform.platform()
trows.extend([subheader("Server"),
item("Base Release", base_version),
item("Web2Py", web2py_version),
item("HTTP Server", request.env.server_software),
item("Operating System", os_version),
])
# Database
db_info = [subheader("Database")]
dbtype = settings.get_database_type()
if dbtype == "sqlite":
try:
import sqlite3
sqlite_version = sqlite3.version
except (ImportError, AttributeError):
sqlite_version = UNKNOWN
db_info.extend([item("SQLite", sqlite_version),
])
elif dbtype == "mysql":
database_name = settings.database.get("database", "sahana")
try:
# @ToDo: Support using pymysql & Warn
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except (ImportError, AttributeError):
mysqldb_version = INCORRECT
mysql_version = UNKNOWN
else:
#mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
con = MySQLdb.connect(host = settings.database.get("host", "localhost"),
port = settings.database.get("port", None) or 3306,
db = database_name,
user = settings.database.get("username", "sahana"),
passwd = settings.database.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT VERSION()")
mysql_version = cur.fetchone()
db_info.extend([item("MySQL", mysql_version),
item("MySQLdb python driver", mysqldb_version),
])
else:
# Postgres
try:
import psycopg2
psycopg_version = psycopg2.__version__
except (ImportError, AttributeError):
psycopg_version = INCORRECT
pgsql_version = UNKNOWN
else:
con = psycopg2.connect(host = settings.db_params.get("host", "localhost"),
port = settings.db_params.get("port", None) or 5432,
database = settings.db_params.get("database", "eden"),
user = settings.db_params.get("username", "eden"),
password = settings.db_params.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT version()")
pgsql_version = cur.fetchone()
db_info.extend([item("PostgreSQL", pgsql_version),
item("psycopg2 python driver", psycopg_version),
])
trows.extend(db_info)
# Python and Libraries
python_version = platform.python_version()
try:
from lxml import etree
lxml_version = ".".join([str(i) for i in etree.LXML_VERSION])
except (ImportError, AttributeError):
lxml_version = INCORRECT
try:
import reportlab
reportlab_version = reportlab.Version
except (ImportError, AttributeError):
reportlab_version = INCORRECT
try:
import shapely
shapely_version = shapely.__version__
except (ImportError, AttributeError):
shapely_version = INCORRECT
try:
import xlrd
xlrd_version = xlrd.__VERSION__
except (ImportError, AttributeError):
xlrd_version = INCORRECT
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except (ImportError, AttributeError):
xlwt_version = INCORRECT
trows.extend([subheader("Python"),
item("Python", python_version),
item("lxml", lxml_version),
item("ReportLab", reportlab_version),
item("Shapely", shapely_version),
item("xlrd", xlrd_version),
item("xlwt", xlwt_version),
])
system_info.append(TABLE(*trows))
return system_info |
Python | def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
Args:
resource: the resource
values: the search values from the URL query
"""
ftype, levels, noopt = self._options(resource, values=values)
if noopt:
return SPAN(noopt, _class="no-options-available")
T = current.T
s3 = current.response.s3
attr = self._attr(resource)
# Filter class (default+custom)
css = attr.get("class")
_class = "%s %s" % (css, self.css_base) if css else self.css_base
attr["_class"] = _class
if "multiselect-filter-widget" not in _class:
_class = "%s multiselect-filter-widget" % _class
opts = self.opts
if not opts.get("hidden") and "active" not in _class:
_class = "%s active" % _class
# Header-option for multiselect
header_opt = opts.get("header", False)
if header_opt is False or header_opt is True:
setting = current.deployment_settings \
.get_ui_location_filter_bulk_select_option()
if setting is not None:
header_opt = setting
# Add one multi-select widget per level
field_name = self.field
fname = self._prefix(field_name) if resource else field_name
operator = self.operator
base_id = attr["_id"]
base_name = attr["_name"]
widgets = []
w_append = widgets.append
for index, level in enumerate(levels):
w_attr = dict(attr)
# Unique ID/name
w_attr["_id"] = "%s-%s" % (base_id, level)
w_attr["_name"] = name = "%s-%s" % (base_name, level)
# Dummy field
dummy_field = Storage(name=name, type=ftype)
# Find relevant values to pre-populate the widget
level_values = values.get("%s$%s__%s" % (fname, level, operator))
placeholder = T("Select %(location)s") % {"location": levels[level]["label"]}
w = S3MultiSelectWidget(search = opts.get("search", "auto"),
header = header_opt,
selectedList = opts.get("selectedList", 3),
noneSelectedText = placeholder,
)
if index == 0:
# Visible Multiselect Widget added to the page
w_attr["_class"] = _class
options = levels[level]["options"]
dummy_field.requires = IS_IN_SET(options, multiple=True)
widget = w(dummy_field, level_values, **w_attr)
else:
# Hidden+empty dropdown added to the page, options and
# multiselect will be activated when the higher level
# is selected
w_attr["_class"] = "%s hide" % _class
# Store the current jquery_ready
jquery_ready = s3.jquery_ready
s3.jquery_ready = []
# Build the widget with the MultiSelect activation script
dummy_field.requires = IS_IN_SET([], multiple=True)
widget = w(dummy_field, level_values, **w_attr)
# Extract the MultiSelect activation script from updated jquery_ready
script = s3.jquery_ready[0]
s3.jquery_ready = jquery_ready
# Wrap the script & reinsert
script = '''S3.%s=function(){%s}''' % (name.replace("-", "_"), script)
s3.js_global.append(script)
w_append(widget)
return TAG[""](*widgets) | def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
Args:
resource: the resource
values: the search values from the URL query
"""
ftype, levels, noopt = self._options(resource, values=values)
if noopt:
return SPAN(noopt, _class="no-options-available")
T = current.T
s3 = current.response.s3
attr = self._attr(resource)
# Filter class (default+custom)
css = attr.get("class")
_class = "%s %s" % (css, self.css_base) if css else self.css_base
attr["_class"] = _class
if "multiselect-filter-widget" not in _class:
_class = "%s multiselect-filter-widget" % _class
opts = self.opts
if not opts.get("hidden") and "active" not in _class:
_class = "%s active" % _class
# Header-option for multiselect
header_opt = opts.get("header", False)
if header_opt is False or header_opt is True:
setting = current.deployment_settings \
.get_ui_location_filter_bulk_select_option()
if setting is not None:
header_opt = setting
# Add one multi-select widget per level
field_name = self.field
fname = self._prefix(field_name) if resource else field_name
operator = self.operator
base_id = attr["_id"]
base_name = attr["_name"]
widgets = []
w_append = widgets.append
for index, level in enumerate(levels):
w_attr = dict(attr)
# Unique ID/name
w_attr["_id"] = "%s-%s" % (base_id, level)
w_attr["_name"] = name = "%s-%s" % (base_name, level)
# Dummy field
dummy_field = Storage(name=name, type=ftype)
# Find relevant values to pre-populate the widget
level_values = values.get("%s$%s__%s" % (fname, level, operator))
placeholder = T("Select %(location)s") % {"location": levels[level]["label"]}
w = S3MultiSelectWidget(search = opts.get("search", "auto"),
header = header_opt,
selectedList = opts.get("selectedList", 3),
noneSelectedText = placeholder,
)
if index == 0:
# Visible Multiselect Widget added to the page
w_attr["_class"] = _class
options = levels[level]["options"]
dummy_field.requires = IS_IN_SET(options, multiple=True)
widget = w(dummy_field, level_values, **w_attr)
else:
# Hidden+empty dropdown added to the page, options and
# multiselect will be activated when the higher level
# is selected
w_attr["_class"] = "%s hide" % _class
# Store the current jquery_ready
jquery_ready = s3.jquery_ready
s3.jquery_ready = []
# Build the widget with the MultiSelect activation script
dummy_field.requires = IS_IN_SET([], multiple=True)
widget = w(dummy_field, level_values, **w_attr)
# Extract the MultiSelect activation script from updated jquery_ready
script = s3.jquery_ready[0]
s3.jquery_ready = jquery_ready
# Wrap the script & reinsert
script = '''S3.%s=function(){%s}''' % (name.replace("-", "_"), script)
s3.js_global.append(script)
w_append(widget)
return TAG[""](*widgets) |
Python | def data_element(self, variable):
"""
Construct the hidden element that holds the
URL query term corresponding to an input element in the widget.
Args:
variable: the URL query variable
Returns:
list of hidden inputs
"""
widget_id = self.attr["_id"]
return [INPUT(_type = "hidden",
_id = "%s-%s-data" % (widget_id, level),
_class = "filter-widget-data %s-data" % self.css_base,
_value = variable[i],
)
for i, level in enumerate(self.levels)
] | def data_element(self, variable):
"""
Construct the hidden element that holds the
URL query term corresponding to an input element in the widget.
Args:
variable: the URL query variable
Returns:
list of hidden inputs
"""
widget_id = self.attr["_id"]
return [INPUT(_type = "hidden",
_id = "%s-%s-data" % (widget_id, level),
_class = "filter-widget-data %s-data" % self.css_base,
_value = variable[i],
)
for i, level in enumerate(self.levels)
] |
Python | def ajax_options(self, resource):
"""
Look up filter options, to Ajax-update the filter widget
when resource data have changed
Args:
resource: the CRUDResource to look up the options from
Returns:
the options as dict
{selector_id: [name, ...] or {name: local_name: ...}}
"""
attr = self._attr(resource)
levels, noopt = self._options(resource, inject_hierarchy=False)[1:3]
opts = {}
base_id = attr["_id"]
for level in levels:
if noopt:
opts["%s-%s" % (base_id, level)] = str(noopt)
else:
options = levels[level]["options"]
opts["%s-%s" % (base_id, level)] = options
return opts | def ajax_options(self, resource):
"""
Look up filter options, to Ajax-update the filter widget
when resource data have changed
Args:
resource: the CRUDResource to look up the options from
Returns:
the options as dict
{selector_id: [name, ...] or {name: local_name: ...}}
"""
attr = self._attr(resource)
levels, noopt = self._options(resource, inject_hierarchy=False)[1:3]
opts = {}
base_id = attr["_id"]
for level in levels:
if noopt:
opts["%s-%s" % (base_id, level)] = str(noopt)
else:
options = levels[level]["options"]
opts["%s-%s" % (base_id, level)] = options
return opts |
Python | def levels(self):
"""
Get the (initialized) levels options
Returns:
an ordered dict {Lx: {"label": label, options: [] or {}}}
"""
levels = self._levels
if levels is None:
opts = self.opts
# Lookup the appropriate labels from the GIS configuration
if "levels" in opts:
hierarchy = current.gis.get_location_hierarchy()
levels = OrderedDict()
for level in opts.levels:
levels[level] = hierarchy.get(level, level)
else:
levels = current.gis.get_relevant_hierarchy_levels(as_dict=True)
translate = self.translate
for level in levels:
levels[level] = {"label": levels[level],
"options": {} if translate else [],
}
self._levels = levels
return levels | def levels(self):
"""
Get the (initialized) levels options
Returns:
an ordered dict {Lx: {"label": label, options: [] or {}}}
"""
levels = self._levels
if levels is None:
opts = self.opts
# Lookup the appropriate labels from the GIS configuration
if "levels" in opts:
hierarchy = current.gis.get_location_hierarchy()
levels = OrderedDict()
for level in opts.levels:
levels[level] = hierarchy.get(level, level)
else:
levels = current.gis.get_relevant_hierarchy_levels(as_dict=True)
translate = self.translate
for level in levels:
levels[level] = {"label": levels[level],
"options": {} if translate else [],
}
self._levels = levels
return levels |
Python | def _options(self, resource, values=None, inject_hierarchy=True):
"""
Generate the options for the filter
Args:
resource: the resource to look up the options from
values: the currently selected values, a dict {selector: [values]}
inject_hierarchy: add the location hierarchy to global JS
Returns:
a tuple (ftype, levels, no_opts)
Notes:
- levels is a dict like:
{Lx: {"label": label,
"options": [name, ...] or {name: local_name, ...},
}}
- the injected hierarchy is a nested JSON object like:
{topLx: {name: {name: {name: ...}}}}
- the injected local names are a map:
{name: local_name, ...}
"""
s3db = current.s3db
opts = self.opts
translate = self.translate
ftype = "reference gis_location"
levels = self.levels
no_opts = opts.get("no_opts")
if not no_opts:
no_opts = current.T("No options available")
default = (ftype, levels, no_opts)
# Resolve the field selector
selector = None
if resource is None:
rname = opts.get("resource")
if rname:
resource = s3db.resource(rname)
selector = opts.get("lookup", "location_id")
else:
selector = self.field
filters_added = False
options = opts.get("options")
if options:
# Fixed options (=list of location IDs)
resource = s3db.resource("gis_location", id=options)
elif selector:
# Resolve selector against resource
rfield = S3ResourceField(resource, selector)
if not rfield.field or rfield.ftype != ftype:
raise TypeError("invalid selector: %s" % selector)
# Exclude empty FKs
resource.add_filter(FS(selector) != None)
# Filter out old Locations
resource.add_filter(FS("%s$end_date" % selector) == None)
filters_added = True
else:
# Neither fixed options nor resource to look them up
return default
# Lookup options
rows = self._lookup_options(levels,
resource,
selector = selector,
location_ids = options,
path = translate,
)
if filters_added:
# Remove them
rfilter = resource.rfilter
rfilter.filters.pop()
rfilter.filters.pop()
rfilter.query = None
rfilter.transformed = None
# Make sure the selected options are in the available options
if values:
rows = self._add_selected(rows, values, levels, translate)
if not rows:
# No options
return default
# Generate a name localization lookup dict
local_names = self._get_local_names(rows) if translate else {}
# Populate levels-options and hierarchy
toplevel = list(levels.keys())[0]
hierarchy = {toplevel: {}}
for row in rows:
h = hierarchy[toplevel]
for level in levels:
name = row[level]
if not name:
continue
options = levels[level]["options"]
if name not in options:
if translate:
options[name] = local_names.get(name, name)
else:
options.append(name)
if inject_hierarchy:
if name not in h:
h[name] = {}
h = h[name]
# Sort options
self._sort_options(levels, translate=translate)
# Inject the location hierarchy
if inject_hierarchy:
js_global = current.response.s3.js_global
jsons = lambda v: json.dumps(v, separators=JSONSEPARATORS)
hierarchy = "S3.location_filter_hierarchy=%s" % jsons(hierarchy)
js_global.append(hierarchy)
if translate:
# Also inject local names map
local_names = "S3.location_name_l10n=%s" % jsons(local_names)
js_global.append(local_names)
return (ftype, levels, None) | def _options(self, resource, values=None, inject_hierarchy=True):
"""
Generate the options for the filter
Args:
resource: the resource to look up the options from
values: the currently selected values, a dict {selector: [values]}
inject_hierarchy: add the location hierarchy to global JS
Returns:
a tuple (ftype, levels, no_opts)
Notes:
- levels is a dict like:
{Lx: {"label": label,
"options": [name, ...] or {name: local_name, ...},
}}
- the injected hierarchy is a nested JSON object like:
{topLx: {name: {name: {name: ...}}}}
- the injected local names are a map:
{name: local_name, ...}
"""
s3db = current.s3db
opts = self.opts
translate = self.translate
ftype = "reference gis_location"
levels = self.levels
no_opts = opts.get("no_opts")
if not no_opts:
no_opts = current.T("No options available")
default = (ftype, levels, no_opts)
# Resolve the field selector
selector = None
if resource is None:
rname = opts.get("resource")
if rname:
resource = s3db.resource(rname)
selector = opts.get("lookup", "location_id")
else:
selector = self.field
filters_added = False
options = opts.get("options")
if options:
# Fixed options (=list of location IDs)
resource = s3db.resource("gis_location", id=options)
elif selector:
# Resolve selector against resource
rfield = S3ResourceField(resource, selector)
if not rfield.field or rfield.ftype != ftype:
raise TypeError("invalid selector: %s" % selector)
# Exclude empty FKs
resource.add_filter(FS(selector) != None)
# Filter out old Locations
resource.add_filter(FS("%s$end_date" % selector) == None)
filters_added = True
else:
# Neither fixed options nor resource to look them up
return default
# Lookup options
rows = self._lookup_options(levels,
resource,
selector = selector,
location_ids = options,
path = translate,
)
if filters_added:
# Remove them
rfilter = resource.rfilter
rfilter.filters.pop()
rfilter.filters.pop()
rfilter.query = None
rfilter.transformed = None
# Make sure the selected options are in the available options
if values:
rows = self._add_selected(rows, values, levels, translate)
if not rows:
# No options
return default
# Generate a name localization lookup dict
local_names = self._get_local_names(rows) if translate else {}
# Populate levels-options and hierarchy
toplevel = list(levels.keys())[0]
hierarchy = {toplevel: {}}
for row in rows:
h = hierarchy[toplevel]
for level in levels:
name = row[level]
if not name:
continue
options = levels[level]["options"]
if name not in options:
if translate:
options[name] = local_names.get(name, name)
else:
options.append(name)
if inject_hierarchy:
if name not in h:
h[name] = {}
h = h[name]
# Sort options
self._sort_options(levels, translate=translate)
# Inject the location hierarchy
if inject_hierarchy:
js_global = current.response.s3.js_global
jsons = lambda v: json.dumps(v, separators=JSONSEPARATORS)
hierarchy = "S3.location_filter_hierarchy=%s" % jsons(hierarchy)
js_global.append(hierarchy)
if translate:
# Also inject local names map
local_names = "S3.location_name_l10n=%s" % jsons(local_names)
js_global.append(local_names)
return (ftype, levels, None) |
Python | def _lookup_options(levels, resource, selector=None, location_ids=None, path=False):
"""
Look up the filter options from the resource: i.e. the immediate Lx
ancestors for all locations referenced by selector
Args:
levels: the relevant Lx levels, tuple of "L1", "L2" etc
resource: the master resource
selector: the selector for the location reference
location_ids: use these location_ids rather than looking them
up from the resource
path: include the Lx path in the result rows, to lookup
local names for options (which is done via IDs in
the path)
Returns:
gis_location Rows, or None
Note:
path=True potentially requires additional iterations in order
to reduce the paths to only relevant Lx levels (so that fewer
local names would be extracted) - which though limits the
performance gain if there actually are only few or no translations.
If that becomes a problem somewhere, we can make the iteration
mode controllable by a separate parameter.
"""
db = current.db
s3db = current.s3db
ltable = s3db.gis_location
if location_ids:
# Fixed set
location_ids = set(location_ids)
else:
# Lookup from resource
location_ids = set()
# Resolve the selector
rfield = resource.resolve_selector(selector)
# Get the joins for the selector
from ..resource import S3Joins
joins = S3Joins(resource.tablename)
joins.extend(rfield._joins)
join = joins.as_list()
# Add a join for gis_location
join.append(ltable.on(ltable.id == rfield.field))
# Accessible query for the master table
query = resource.get_query()
# Fields we want to extract for Lx ancestors
fields = [ltable.id] + [ltable[level] for level in levels]
if path:
fields.append(ltable.path)
# Suppress instantiation of LazySets in rows (we don't need them)
rname = db._referee_name
db._referee_name = None
rows = None
while True:
if location_ids:
query = ltable.id.belongs(location_ids)
join = None
# Extract all target locations resp. parents which are Lx
if path:
#...of relevant levels
relevant_lx = (ltable.level.belongs(levels))
else:
#...of any level
relevant_lx = (ltable.level != None)
lx = db(query & relevant_lx).select(join = join,
groupby = ltable.id,
*fields
)
# Add to result rows
if lx:
rows = (rows | lx) if rows else lx
# Pick subset for parent lookup
if lx and location_ids:
# ...all parents which are not Lx of relevant levels
remaining = location_ids - set(row.id for row in lx)
if remaining:
query = ltable.id.belongs(remaining)
else:
# No more parents to look up
break
else:
# ...all locations which are not Lx
if path:
# ...or not of relevant levels
query &= ((ltable.level == None) | (~(ltable.level.belongs(levels))))
else:
query &= (ltable.level == None)
# From subset, just extract the parent ID
query &= (ltable.parent != None)
parents = db(query).select(ltable.parent,
join = join,
groupby = ltable.parent,
)
location_ids = set(row.parent for row in parents if row.parent)
if not location_ids:
break
# Restore referee name
db._referee_name = rname
return rows | def _lookup_options(levels, resource, selector=None, location_ids=None, path=False):
"""
Look up the filter options from the resource: i.e. the immediate Lx
ancestors for all locations referenced by selector
Args:
levels: the relevant Lx levels, tuple of "L1", "L2" etc
resource: the master resource
selector: the selector for the location reference
location_ids: use these location_ids rather than looking them
up from the resource
path: include the Lx path in the result rows, to lookup
local names for options (which is done via IDs in
the path)
Returns:
gis_location Rows, or None
Note:
path=True potentially requires additional iterations in order
to reduce the paths to only relevant Lx levels (so that fewer
local names would be extracted) - which though limits the
performance gain if there actually are only few or no translations.
If that becomes a problem somewhere, we can make the iteration
mode controllable by a separate parameter.
"""
db = current.db
s3db = current.s3db
ltable = s3db.gis_location
if location_ids:
# Fixed set
location_ids = set(location_ids)
else:
# Lookup from resource
location_ids = set()
# Resolve the selector
rfield = resource.resolve_selector(selector)
# Get the joins for the selector
from ..resource import S3Joins
joins = S3Joins(resource.tablename)
joins.extend(rfield._joins)
join = joins.as_list()
# Add a join for gis_location
join.append(ltable.on(ltable.id == rfield.field))
# Accessible query for the master table
query = resource.get_query()
# Fields we want to extract for Lx ancestors
fields = [ltable.id] + [ltable[level] for level in levels]
if path:
fields.append(ltable.path)
# Suppress instantiation of LazySets in rows (we don't need them)
rname = db._referee_name
db._referee_name = None
rows = None
while True:
if location_ids:
query = ltable.id.belongs(location_ids)
join = None
# Extract all target locations resp. parents which are Lx
if path:
#...of relevant levels
relevant_lx = (ltable.level.belongs(levels))
else:
#...of any level
relevant_lx = (ltable.level != None)
lx = db(query & relevant_lx).select(join = join,
groupby = ltable.id,
*fields
)
# Add to result rows
if lx:
rows = (rows | lx) if rows else lx
# Pick subset for parent lookup
if lx and location_ids:
# ...all parents which are not Lx of relevant levels
remaining = location_ids - set(row.id for row in lx)
if remaining:
query = ltable.id.belongs(remaining)
else:
# No more parents to look up
break
else:
# ...all locations which are not Lx
if path:
# ...or not of relevant levels
query &= ((ltable.level == None) | (~(ltable.level.belongs(levels))))
else:
query &= (ltable.level == None)
# From subset, just extract the parent ID
query &= (ltable.parent != None)
parents = db(query).select(ltable.parent,
join = join,
groupby = ltable.parent,
)
location_ids = set(row.parent for row in parents if row.parent)
if not location_ids:
break
# Restore referee name
db._referee_name = rname
return rows |
Python | def _add_selected(rows, values, levels, translate=False):
"""
Add currently selected values to the options
Args:
rows: the referenced gis_location Rows
values: the currently selected values as {select: [name, ...]}
levels: the relevant hierarchy levels
translate: whether location names shall be localized
Returns:
the updated gis_location Rows
"""
db = current.db
s3db = current.s3db
ltable = s3db.gis_location
accessible = current.auth.s3_accessible_query("read", ltable)
fields = [ltable.id] + [ltable[l] for l in levels]
if translate:
fields.append(ltable.path)
for f, v in values.items():
if not v:
continue
level = "L%s" % f.split("L", 1)[1][0]
query = accessible & \
(ltable.level == level) & \
(ltable.name.belongs(v) & \
(ltable.end_date == None))
selected = db(query).select(*fields)
if rows:
rows &= selected
else:
rows = selected
return rows | def _add_selected(rows, values, levels, translate=False):
"""
Add currently selected values to the options
Args:
rows: the referenced gis_location Rows
values: the currently selected values as {select: [name, ...]}
levels: the relevant hierarchy levels
translate: whether location names shall be localized
Returns:
the updated gis_location Rows
"""
db = current.db
s3db = current.s3db
ltable = s3db.gis_location
accessible = current.auth.s3_accessible_query("read", ltable)
fields = [ltable.id] + [ltable[l] for l in levels]
if translate:
fields.append(ltable.path)
for f, v in values.items():
if not v:
continue
level = "L%s" % f.split("L", 1)[1][0]
query = accessible & \
(ltable.level == level) & \
(ltable.name.belongs(v) & \
(ltable.end_date == None))
selected = db(query).select(*fields)
if rows:
rows &= selected
else:
rows = selected
return rows |
Python | def _get_local_names(rows):
"""
Look up the local names for locations
Args:
rows: the gis_location Rows (must contain "path" attribute)
Returns:
a mapping {name: local_name}
"""
local_names = {}
ids = set()
for row in rows:
path = row.path
if path:
path = path.split("/")
else:
if "id" in row:
path = current.gis.update_location_tree(row)
path = path.split("/")
if path:
ids |= set(path)
if ids:
s3db = current.s3db
ltable = s3db.gis_location
ntable = s3db.gis_location_name
query = (ltable.id.belongs(ids)) & \
(ntable.deleted == False) & \
(ntable.location_id == ltable.id) & \
(ntable.language == current.session.s3.language)
nrows = current.db(query).select(ltable.name,
ntable.name_l10n,
limitby = (0, len(ids)),
)
for row in nrows:
local_names[row.gis_location.name] = row.gis_location_name.name_l10n
return local_names | def _get_local_names(rows):
"""
Look up the local names for locations
Args:
rows: the gis_location Rows (must contain "path" attribute)
Returns:
a mapping {name: local_name}
"""
local_names = {}
ids = set()
for row in rows:
path = row.path
if path:
path = path.split("/")
else:
if "id" in row:
path = current.gis.update_location_tree(row)
path = path.split("/")
if path:
ids |= set(path)
if ids:
s3db = current.s3db
ltable = s3db.gis_location
ntable = s3db.gis_location_name
query = (ltable.id.belongs(ids)) & \
(ntable.deleted == False) & \
(ntable.location_id == ltable.id) & \
(ntable.language == current.session.s3.language)
nrows = current.db(query).select(ltable.name,
ntable.name_l10n,
limitby = (0, len(ids)),
)
for row in nrows:
local_names[row.gis_location.name] = row.gis_location_name.name_l10n
return local_names |
Python | def _sort_options(levels, translate=False):
"""
Sort the filter options per level
Args:
levels: the levels-dict (see self.levels)
translate: whether location names have been localized
"""
if translate:
for level in levels:
options = levels[level]["options"]
levels[level]["options"] = OrderedDict(sorted(options.items()))
else:
for level in levels:
levels[level]["options"].sort() | def _sort_options(levels, translate=False):
"""
Sort the filter options per level
Args:
levels: the levels-dict (see self.levels)
translate: whether location names have been localized
"""
if translate:
for level in levels:
options = levels[level]["options"]
levels[level]["options"] = OrderedDict(sorted(options.items()))
else:
for level in levels:
levels[level]["options"].sort() |
Python | def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
Args:
resource: the resource
values: the search values from the URL query
"""
settings = current.deployment_settings
if not settings.get_gis_spatialdb():
current.log.warning("No Spatial DB => Cannot do Intersects Query yet => Disabling MapFilter")
return ""
attr_get = self.attr.get
opts_get = self.opts.get
css = attr_get("class")
_class = "%s %s" % (css, self.css_base) if css else self.css_base
_id = attr_get("_id")
# Hidden INPUT to store the WKT
hidden_input = INPUT(_type = "hidden",
_class = _class,
_id = _id,
)
# Populate with the value, if given
if values not in (None, []):
if type(values) is list:
values = values[0]
hidden_input["_value"] = values
# Map Widget
map_id = "%s-map" % _id
c, f = resource.tablename.split("_", 1)
c = opts_get("controller", c)
f = opts_get("function", f)
ltable = current.s3db.gis_layer_feature
query = (ltable.controller == c) & \
(ltable.function == f) & \
(ltable.deleted == False)
layer = current.db(query).select(ltable.layer_id,
ltable.name,
limitby=(0, 1)
).first()
try:
layer_id = layer.layer_id
except AttributeError:
# No prepop done?
layer_id = None
layer_name = resource.tablename
else:
layer_name = layer.name
feature_resources = [{"name" : current.T(layer_name),
"id" : "search_results",
"layer_id" : layer_id,
"filter" : opts_get("filter"),
},
]
button = opts_get("button")
if button:
# No need for the toolbar
toolbar = opts_get("toolbar", False)
else:
# Need the toolbar
toolbar = True
_map = current.gis.show_map(id = map_id,
height = opts_get("height", settings.get_gis_map_height()),
width = opts_get("width", settings.get_gis_map_width()),
collapsed = True,
callback = '''S3.search.s3map('%s')''' % map_id,
feature_resources = feature_resources,
toolbar = toolbar,
add_polygon = True,
)
return TAG[""](hidden_input,
button,
_map,
) | def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
Args:
resource: the resource
values: the search values from the URL query
"""
settings = current.deployment_settings
if not settings.get_gis_spatialdb():
current.log.warning("No Spatial DB => Cannot do Intersects Query yet => Disabling MapFilter")
return ""
attr_get = self.attr.get
opts_get = self.opts.get
css = attr_get("class")
_class = "%s %s" % (css, self.css_base) if css else self.css_base
_id = attr_get("_id")
# Hidden INPUT to store the WKT
hidden_input = INPUT(_type = "hidden",
_class = _class,
_id = _id,
)
# Populate with the value, if given
if values not in (None, []):
if type(values) is list:
values = values[0]
hidden_input["_value"] = values
# Map Widget
map_id = "%s-map" % _id
c, f = resource.tablename.split("_", 1)
c = opts_get("controller", c)
f = opts_get("function", f)
ltable = current.s3db.gis_layer_feature
query = (ltable.controller == c) & \
(ltable.function == f) & \
(ltable.deleted == False)
layer = current.db(query).select(ltable.layer_id,
ltable.name,
limitby=(0, 1)
).first()
try:
layer_id = layer.layer_id
except AttributeError:
# No prepop done?
layer_id = None
layer_name = resource.tablename
else:
layer_name = layer.name
feature_resources = [{"name" : current.T(layer_name),
"id" : "search_results",
"layer_id" : layer_id,
"filter" : opts_get("filter"),
},
]
button = opts_get("button")
if button:
# No need for the toolbar
toolbar = opts_get("toolbar", False)
else:
# Need the toolbar
toolbar = True
_map = current.gis.show_map(id = map_id,
height = opts_get("height", settings.get_gis_map_height()),
width = opts_get("width", settings.get_gis_map_width()),
collapsed = True,
callback = '''S3.search.s3map('%s')''' % map_id,
feature_resources = feature_resources,
toolbar = toolbar,
add_polygon = True,
)
return TAG[""](hidden_input,
button,
_map,
) |
Python | def handlers(self):
"""
Returns the import handler registry for this instance
Returns:
a dict {name: function}
"""
handlers = self._handlers
if not handlers:
handlers = {"import_feeds": self.import_feeds,
"import_font": self.import_font,
"import_images": self.import_images,
"import_roles": self.import_roles,
"schedule_task": self.schedule_task,
"import_users": self.import_users,
"import_xml": self.import_xml,
}
# Template-defined task handlers
custom = current.deployment_settings.get_base_import_handlers()
if custom:
handlers.update(custom)
self._handlers = handlers
return handlers | def handlers(self):
"""
Returns the import handler registry for this instance
Returns:
a dict {name: function}
"""
handlers = self._handlers
if not handlers:
handlers = {"import_feeds": self.import_feeds,
"import_font": self.import_font,
"import_images": self.import_images,
"import_roles": self.import_roles,
"schedule_task": self.schedule_task,
"import_users": self.import_users,
"import_xml": self.import_xml,
}
# Template-defined task handlers
custom = current.deployment_settings.get_base_import_handlers()
if custom:
handlers.update(custom)
self._handlers = handlers
return handlers |
Python | def perform_tasks(self, path):
"""
Parses a tasks.cfg file, and runs all import tasks specified by it
Args:
path: the path to the tasks.cfg file (without filename)
Returns:
a list of error messages (empty list if there were no errors)
"""
errors = []
db = current.db
for task in self.parse_task_config(path):
task_type = task[0]
if not task_type:
errors.append(task[1])
continue
start = datetime.datetime.now()
if task_type == 1:
error = self.import_csv(*(task[1:6]))
if isinstance(error, list):
errors.extend(error)
elif error:
errors.append(error)
else:
db.commit()
csv_name = os.path.split(task[3])[1]
msg = "%s imported (%%s sec)" % csv_name
elif task_type == 2:
handler = self.handlers.get(task[1])
if not handler:
errors.append("Invalid task type %s" % task[1])
continue
try:
error = handler(*task[2:])
except TypeError as e:
errors.append(str(e))
else:
if isinstance(error, list):
errors.extend(error)
elif error:
errors.append(error)
else:
db.commit()
msg = "%s completed (%%s sec)" % task[1]
duration = datetime.datetime.now() - start
current.log.debug(msg % '{:.2f}'.format(duration.total_seconds()))
return errors | def perform_tasks(self, path):
"""
Parses a tasks.cfg file, and runs all import tasks specified by it
Args:
path: the path to the tasks.cfg file (without filename)
Returns:
a list of error messages (empty list if there were no errors)
"""
errors = []
db = current.db
for task in self.parse_task_config(path):
task_type = task[0]
if not task_type:
errors.append(task[1])
continue
start = datetime.datetime.now()
if task_type == 1:
error = self.import_csv(*(task[1:6]))
if isinstance(error, list):
errors.extend(error)
elif error:
errors.append(error)
else:
db.commit()
csv_name = os.path.split(task[3])[1]
msg = "%s imported (%%s sec)" % csv_name
elif task_type == 2:
handler = self.handlers.get(task[1])
if not handler:
errors.append("Invalid task type %s" % task[1])
continue
try:
error = handler(*task[2:])
except TypeError as e:
errors.append(str(e))
else:
if isinstance(error, list):
errors.extend(error)
elif error:
errors.append(error)
else:
db.commit()
msg = "%s completed (%%s sec)" % task[1]
duration = datetime.datetime.now() - start
current.log.debug(msg % '{:.2f}'.format(duration.total_seconds()))
return errors |
Python | def parse_task_config(cls, path):
"""
Reads a tasks.cfg file, collects import tasks and resolves the
file paths for standard CSV imports
Args:
path: the path to the tasks.cfg file (without filename)
Returns:
a list of import tasks (tuples); a tuple with None as
first element indicates an error (second element is the
error message then)
"""
strip_comments = lambda row: row.split("#", 1)[0]
clean = lambda line: [item.strip('" ') for item in line]
tasks = []
with open(os.path.join(path, "tasks.cfg"), "r") as source:
for line in csv.reader(filter(strip_comments, source)):
task = cls.parse_task_line(path, clean(line))
tasks.append(task)
return tasks | def parse_task_config(cls, path):
"""
Reads a tasks.cfg file, collects import tasks and resolves the
file paths for standard CSV imports
Args:
path: the path to the tasks.cfg file (without filename)
Returns:
a list of import tasks (tuples); a tuple with None as
first element indicates an error (second element is the
error message then)
"""
strip_comments = lambda row: row.split("#", 1)[0]
clean = lambda line: [item.strip('" ') for item in line]
tasks = []
with open(os.path.join(path, "tasks.cfg"), "r") as source:
for line in csv.reader(filter(strip_comments, source)):
task = cls.parse_task_line(path, clean(line))
tasks.append(task)
return tasks |
Python | def parse_task_line(cls, path, line):
"""
Parses a line in the task configuration, and completes file paths
Args:
path: the path of the task configuration file
line: the CSV line to parse (as list of strings)
Returns:
- the task as tuple (type, *params)
- (None, error) if the line is invalid
"""
folder = current.request.folder
if line and line[0] == "*":
# Import using BulkImporter handler (*,handler,filename,args)
handler, filename = (line + EMPTYLINE)[1:3]
if not handler or filename is None:
return (None, "Missing argument(s) in task %s (line ignored)" % str(line))
# Source file location
filepath = cls._addpath(path, filename)
return (2, handler, filepath, *line[3:])
else:
# Import using XMLImporter (mod,res,csv_name,xslt_name,extra_data)
mod, res, csv_name, xslt_name, extra_data = (line + EMPTYLINE)[:5]
if not all((mod, res, csv_name, xslt_name)):
return (None, "Missing argument(s) in task %s (line ignored)" % str(line))
# CSV file location
csv_path = cls._addpath(path, csv_name)
# Transformation stylesheet location
base_path = os.path.join(folder, "static", "formats", "s3csv")
sub, filename = os.path.split(xslt_name)
if sub:
if sub[0] == ".":
# Alternative location relative to CSV file
location = (path, sub, filename)
else:
# Alternative location relative to base path
location = (base_path, sub, filename)
else:
# Standard location
location = (base_path, mod, filename)
xslt_path = os.path.normpath(os.path.join(*location))
if not os.path.exists(xslt_path):
return (None, "Transformation stylesheet not found: %s" % xslt_path)
return (1, mod, res, csv_path, xslt_path, extra_data) | def parse_task_line(cls, path, line):
"""
Parses a line in the task configuration, and completes file paths
Args:
path: the path of the task configuration file
line: the CSV line to parse (as list of strings)
Returns:
- the task as tuple (type, *params)
- (None, error) if the line is invalid
"""
folder = current.request.folder
if line and line[0] == "*":
# Import using BulkImporter handler (*,handler,filename,args)
handler, filename = (line + EMPTYLINE)[1:3]
if not handler or filename is None:
return (None, "Missing argument(s) in task %s (line ignored)" % str(line))
# Source file location
filepath = cls._addpath(path, filename)
return (2, handler, filepath, *line[3:])
else:
# Import using XMLImporter (mod,res,csv_name,xslt_name,extra_data)
mod, res, csv_name, xslt_name, extra_data = (line + EMPTYLINE)[:5]
if not all((mod, res, csv_name, xslt_name)):
return (None, "Missing argument(s) in task %s (line ignored)" % str(line))
# CSV file location
csv_path = cls._addpath(path, csv_name)
# Transformation stylesheet location
base_path = os.path.join(folder, "static", "formats", "s3csv")
sub, filename = os.path.split(xslt_name)
if sub:
if sub[0] == ".":
# Alternative location relative to CSV file
location = (path, sub, filename)
else:
# Alternative location relative to base path
location = (base_path, sub, filename)
else:
# Standard location
location = (base_path, mod, filename)
xslt_path = os.path.normpath(os.path.join(*location))
if not os.path.exists(xslt_path):
return (None, "Transformation stylesheet not found: %s" % xslt_path)
return (1, mod, res, csv_path, xslt_path, extra_data) |
Python | def import_csv(cls, prefix, name, csv_path, xslt_path, extra_data=None):
"""
Imports CSV data, using S3CSV transformation stylesheet
Args:
prefix: the table name prefix
name: the table name without prefix
csv_path: the path to the source file, a local file system path
or a http/https URL
xslt_path: the path to the transformation stylesheet, a local
file system path, or a http/https URL
extra_data: extra data to add to the CSV (as JSON string)
Returns:
error message(s) on failure, otherwise None
"""
current.auth.ignore_min_password_length()
s3db = current.s3db
# Customise and instantiate the resource
tablename = "%s_%s" % (prefix, name)
if not s3db.customised(tablename):
from ..controller import CRUDRequest
r = CRUDRequest(prefix, name, current.request)
r.customise_resource(tablename)
try:
resource = s3db.resource(tablename)
except AttributeError:
return "Table %s not found, import skipped" % tablename
# Decode extra data
if extra_data:
try:
decoded = json.loads(unescape(extra_data, {"'": '"'}))
except JSONERRORS:
return "Invalid extra data JSON: %s" % str(extra_data)
else:
if not isinstance(decoded, dict):
return "Invalid extra data type (dict expected): %s" % str(extra_data)
else:
extra_data = decoded
# Detect ZIP file extension
sp = csv_path.rsplit(".", 1)
zipped = len(sp) > 1 and sp[-1] == "zip"
# Import from source
auth = current.auth
auth.rollback = True
try:
with cls._load(csv_path) as source:
if zipped:
data = cls._extract_from_zip(source)
if data is None:
raise IOError("Could not unpack %s" % csv_path)
else:
data = source
result = resource.import_xml(data,
source_type = "csv",
stylesheet = xslt_path,
extra_data = extra_data,
)
except IOError as e:
return str(e)
except SyntaxError as e:
return "Failed to import %s (%s): %s" % (csv_path, xslt_path, e)
finally:
auth.rollback = False
# Collect import errors
error = result.error
if error:
errors = ["%s - %s: %s" % (csv_path, tablename, error)]
xml_errors = current.xml.collect_errors(result.error_tree)
if xml_errors:
errors.extend(xml_errors)
# Must roll back if there was an error!
current.db.rollback()
else:
errors = None
return errors | def import_csv(cls, prefix, name, csv_path, xslt_path, extra_data=None):
"""
Imports CSV data, using S3CSV transformation stylesheet
Args:
prefix: the table name prefix
name: the table name without prefix
csv_path: the path to the source file, a local file system path
or a http/https URL
xslt_path: the path to the transformation stylesheet, a local
file system path, or a http/https URL
extra_data: extra data to add to the CSV (as JSON string)
Returns:
error message(s) on failure, otherwise None
"""
current.auth.ignore_min_password_length()
s3db = current.s3db
# Customise and instantiate the resource
tablename = "%s_%s" % (prefix, name)
if not s3db.customised(tablename):
from ..controller import CRUDRequest
r = CRUDRequest(prefix, name, current.request)
r.customise_resource(tablename)
try:
resource = s3db.resource(tablename)
except AttributeError:
return "Table %s not found, import skipped" % tablename
# Decode extra data
if extra_data:
try:
decoded = json.loads(unescape(extra_data, {"'": '"'}))
except JSONERRORS:
return "Invalid extra data JSON: %s" % str(extra_data)
else:
if not isinstance(decoded, dict):
return "Invalid extra data type (dict expected): %s" % str(extra_data)
else:
extra_data = decoded
# Detect ZIP file extension
sp = csv_path.rsplit(".", 1)
zipped = len(sp) > 1 and sp[-1] == "zip"
# Import from source
auth = current.auth
auth.rollback = True
try:
with cls._load(csv_path) as source:
if zipped:
data = cls._extract_from_zip(source)
if data is None:
raise IOError("Could not unpack %s" % csv_path)
else:
data = source
result = resource.import_xml(data,
source_type = "csv",
stylesheet = xslt_path,
extra_data = extra_data,
)
except IOError as e:
return str(e)
except SyntaxError as e:
return "Failed to import %s (%s): %s" % (csv_path, xslt_path, e)
finally:
auth.rollback = False
# Collect import errors
error = result.error
if error:
errors = ["%s - %s: %s" % (csv_path, tablename, error)]
xml_errors = current.xml.collect_errors(result.error_tree)
if xml_errors:
errors.extend(xml_errors)
# Must roll back if there was an error!
current.db.rollback()
else:
errors = None
return errors |
Python | def import_roles(cls, filepath):
"""
Imports user roles and permissions from CSV
Args:
filepath: the path to source file
Returns:
error message(s) on error, otherwise None
"""
try:
with open(filepath, "r", encoding="utf-8") as source:
reader = csv.DictReader(source)
roles = {}
for row in reader:
cls._add_rule(roles, row)
create_role = current.auth.s3_create_role
for name, role in roles.items():
create_role(name,
role.get("description"),
*role["rules"],
**role["kwargs"],
)
except IOError:
return "Unable to open file %s" % filepath
return None | def import_roles(cls, filepath):
"""
Imports user roles and permissions from CSV
Args:
filepath: the path to source file
Returns:
error message(s) on error, otherwise None
"""
try:
with open(filepath, "r", encoding="utf-8") as source:
reader = csv.DictReader(source)
roles = {}
for row in reader:
cls._add_rule(roles, row)
create_role = current.auth.s3_create_role
for name, role in roles.items():
create_role(name,
role.get("description"),
*role["rules"],
**role["kwargs"],
)
except IOError:
return "Unable to open file %s" % filepath
return None |
Python | def import_images(cls, filepath, tablename, keyfield, imagefield):
"""
Imports images, such as organisation logos
Args:
filepath: the path to source file
tablename: the name of the table
keyfield: the field used to identify the record
imagefield: the field to store the image
Returns:
error message(s) on error, otherwise None
Example:
bi.import_images("org_logos.csv", "org_organisation", "name", "logo")
...with a file "org_logos.csv" like:
id, file
Sahana Software Foundation, sahanalogo.jpg
American Red Cross, icrc.gif
"""
table = current.s3db.table(tablename)
if not table:
return "Table not found: %s" % tablename
if keyfield not in table.fields:
return "Invalid key field: %s" % keyfield
key = table[keyfield]
base_query = (table.deleted == False)
path = os.path.split(filepath)[0]
errors = []
try:
with open(filepath, "r", encoding="utf-8") as source:
data = csv.DictReader(source)
for item in data:
if not item:
continue
value = item.get("id")
image = item.get("file")
if not value or not image:
continue
image = os.path.join(path, image)
error = None
try:
cls._store_image(table,
base_query & (key==value),
table[imagefield],
image,
)
except KeyError as e:
# Record not found
error = "Record %s=%s not found" % (keyfield, value)
except (IOError, AttributeError, ValueError) as e:
# Other error
error = "Image import failed: %s" % str(e)
if error:
errors.append(error)
except IOError as e:
return "Image list not accessible: %s" % e
return errors if errors else None | def import_images(cls, filepath, tablename, keyfield, imagefield):
"""
Imports images, such as organisation logos
Args:
filepath: the path to source file
tablename: the name of the table
keyfield: the field used to identify the record
imagefield: the field to store the image
Returns:
error message(s) on error, otherwise None
Example:
bi.import_images("org_logos.csv", "org_organisation", "name", "logo")
...with a file "org_logos.csv" like:
id, file
Sahana Software Foundation, sahanalogo.jpg
American Red Cross, icrc.gif
"""
table = current.s3db.table(tablename)
if not table:
return "Table not found: %s" % tablename
if keyfield not in table.fields:
return "Invalid key field: %s" % keyfield
key = table[keyfield]
base_query = (table.deleted == False)
path = os.path.split(filepath)[0]
errors = []
try:
with open(filepath, "r", encoding="utf-8") as source:
data = csv.DictReader(source)
for item in data:
if not item:
continue
value = item.get("id")
image = item.get("file")
if not value or not image:
continue
image = os.path.join(path, image)
error = None
try:
cls._store_image(table,
base_query & (key==value),
table[imagefield],
image,
)
except KeyError as e:
# Record not found
error = "Record %s=%s not found" % (keyfield, value)
except (IOError, AttributeError, ValueError) as e:
# Other error
error = "Image import failed: %s" % str(e)
if error:
errors.append(error)
except IOError as e:
return "Image list not accessible: %s" % e
return errors if errors else None |
Python | def import_font(filepath, url):
"""
Installs a font in static/fonts
Args:
filepath: path to the source file (ignored)
url: the font file url, or keyword "unifont" to fetch from
standard location
Returns:
error message(s) on error, otherwise None
"""
if url == "unifont":
url = "http://unifoundry.com/pub/unifont/unifont-14.0.01/font-builds/unifont-14.0.01.ttf"
filename, extension = "unifont.ttf", "ttf"
else:
filename, extension = url.split("/")[-1].rsplit(".", 1)
if extension not in ("ttf", "gz", "zip"):
return "Unsupported font extension: %s" % extension
filename = "%s.ttf" % filename
font_path = os.path.join(current.request.folder, "static", "fonts")
if os.path.exists(os.path.join(font_path, filename)):
# Already installed
current.log.warning("Using cached copy of %s" % filename)
return None
# Change to the font directory
cwd = os.getcwd()
os.chdir(font_path)
# Fetch the font file
try:
stream = fetch(url)
except URLError as e:
os.chdir(cwd)
return str(e)
# Unpack and store the font
try:
if extension == "gz":
import tarfile
tf = tarfile.open(fileobj=stream)
tf.extractall()
elif extension == "zip":
import zipfile
zf = zipfile.ZipFile(stream)
zf.extractall()
else:
f = open(filename, "wb")
f.write(stream)
f.close()
finally:
# Revert back to the working directory as before.
os.chdir(cwd)
return None | def import_font(filepath, url):
"""
Installs a font in static/fonts
Args:
filepath: path to the source file (ignored)
url: the font file url, or keyword "unifont" to fetch from
standard location
Returns:
error message(s) on error, otherwise None
"""
if url == "unifont":
url = "http://unifoundry.com/pub/unifont/unifont-14.0.01/font-builds/unifont-14.0.01.ttf"
filename, extension = "unifont.ttf", "ttf"
else:
filename, extension = url.split("/")[-1].rsplit(".", 1)
if extension not in ("ttf", "gz", "zip"):
return "Unsupported font extension: %s" % extension
filename = "%s.ttf" % filename
font_path = os.path.join(current.request.folder, "static", "fonts")
if os.path.exists(os.path.join(font_path, filename)):
# Already installed
current.log.warning("Using cached copy of %s" % filename)
return None
# Change to the font directory
cwd = os.getcwd()
os.chdir(font_path)
# Fetch the font file
try:
stream = fetch(url)
except URLError as e:
os.chdir(cwd)
return str(e)
# Unpack and store the font
try:
if extension == "gz":
import tarfile
tf = tarfile.open(fileobj=stream)
tf.extractall()
elif extension == "zip":
import zipfile
zf = zipfile.ZipFile(stream)
zf.extractall()
else:
f = open(filename, "wb")
f.write(stream)
f.close()
finally:
# Revert back to the working directory as before.
os.chdir(cwd)
return None |
Python | def _addpath(path, filename):
"""
Adds the path to a source file
Args:
path: the base path (i.e. where the tasks.cfg is)
filename: the file name as specified in tasks.cfg
Returns:
the updated file name
"""
if filename:
sp = filename.split("://", 1)
if sp[0] in ("http", "https") and len(sp) > 1:
filepath = filename
else:
template, filename = os.path.split(filename)
if template:
# File in other template
path = os.path.join(current.request.folder,
"modules",
"templates",
template,
)
filepath = os.path.join(path, filename)
else:
filepath = None
return filepath | def _addpath(path, filename):
"""
Adds the path to a source file
Args:
path: the base path (i.e. where the tasks.cfg is)
filename: the file name as specified in tasks.cfg
Returns:
the updated file name
"""
if filename:
sp = filename.split("://", 1)
if sp[0] in ("http", "https") and len(sp) > 1:
filepath = filename
else:
template, filename = os.path.split(filename)
if template:
# File in other template
path = os.path.join(current.request.folder,
"modules",
"templates",
template,
)
filepath = os.path.join(path, filename)
else:
filepath = None
return filepath |
Python | def _load(path):
"""
Opens an import source from path
Args:
path: local file path, or a http/https URL
Returns:
binary, file-like object with the source data
Raises:
IOError: if the source is not accessible
"""
sp = path.split("://", 1)
if sp[0] in ("http", "https") and len(sp) > 1:
import requests, tempfile
try:
r = requests.get(path, stream=True)
except requests.RequestException as e:
raise IOError("Failed to load source %s: %s" % (path, type(e).__name__))
else:
source = tempfile.TemporaryFile()
for chunk in r.iter_content(chunk_size=65536):
source.write(chunk)
source.seek(0)
else:
source = open(path, "rb")
return source | def _load(path):
"""
Opens an import source from path
Args:
path: local file path, or a http/https URL
Returns:
binary, file-like object with the source data
Raises:
IOError: if the source is not accessible
"""
sp = path.split("://", 1)
if sp[0] in ("http", "https") and len(sp) > 1:
import requests, tempfile
try:
r = requests.get(path, stream=True)
except requests.RequestException as e:
raise IOError("Failed to load source %s: %s" % (path, type(e).__name__))
else:
source = tempfile.TemporaryFile()
for chunk in r.iter_content(chunk_size=65536):
source.write(chunk)
source.seek(0)
else:
source = open(path, "rb")
return source |
Python | def _extract_from_zip(source, dataformat="csv"):
"""
Extracts a source file from a ZIP archive
Args:
source: the ZIP archive (file-like object, or file name)
dataformat: the format extension of the source file
Returns:
BytesIO, the data from the first file with a matching
format extension found in the archive, or None if the
archive is not readable or does not contain any file
with a matching extension
"""
import zipfile
data = None
try:
with zipfile.ZipFile(source) as zipped:
for f in zipped.infolist():
filename = f.filename
extension = filename.split(".")[-1]
if extension == dataformat:
data = BytesIO(zipped.read(filename))
break
except zipfile.BadZipfile:
pass
return data | def _extract_from_zip(source, dataformat="csv"):
"""
Extracts a source file from a ZIP archive
Args:
source: the ZIP archive (file-like object, or file name)
dataformat: the format extension of the source file
Returns:
BytesIO, the data from the first file with a matching
format extension found in the archive, or None if the
archive is not readable or does not contain any file
with a matching extension
"""
import zipfile
data = None
try:
with zipfile.ZipFile(source) as zipped:
for f in zipped.infolist():
filename = f.filename
extension = filename.split(".")[-1]
if extension == dataformat:
data = BytesIO(zipped.read(filename))
break
except zipfile.BadZipfile:
pass
return data |
Python | def _add_rule(cls, roles, row):
"""
Parses a single CSV row for import_roles, and updates the
roles-dict with the data
Args:
roles: the roles-dict to update
row: the CSV row
"""
if not row:
return
row_get = row.get
name = row_get("role")
if name not in roles:
role = roles[name] = {"kwargs": {}, "rules": []}
else:
role = roles[name]
# Update description
description = row_get("description")
if description:
role["description"] = description
# Update role keyword args (uid and flags)
kwargs = role["kwargs"]
uid = row_get("uid")
if uid:
kwargs["uid"] = uid
for flag in ("hidden", "system", "protected"):
value = row_get(flag)
if value:
if value.lower() in ("true", "yes", "1"):
kwargs[flag] = True
elif value.lower() in ("false", "no", "0"):
kwargs[flag] = False
# Parse the rule
rule = {param: row_get(keyword) or None
for keyword, param in (("controller", "c"),
("function", "f"),
("table", "t"),
)}
if any(rule.values()):
parse_permissions = cls._parse_permissions
for keyword in ("oacl", "uacl"):
value = row_get(keyword)
if value:
rule[keyword] = parse_permissions(value)
entity = row_get("entity")
if entity:
if entity != "any":
try:
entity = int(entity)
except ValueError:
entity = cls._lookup_pe(entity)
rule["entity"] = entity
role["rules"].append(rule) | def _add_rule(cls, roles, row):
"""
Parses a single CSV row for import_roles, and updates the
roles-dict with the data
Args:
roles: the roles-dict to update
row: the CSV row
"""
if not row:
return
row_get = row.get
name = row_get("role")
if name not in roles:
role = roles[name] = {"kwargs": {}, "rules": []}
else:
role = roles[name]
# Update description
description = row_get("description")
if description:
role["description"] = description
# Update role keyword args (uid and flags)
kwargs = role["kwargs"]
uid = row_get("uid")
if uid:
kwargs["uid"] = uid
for flag in ("hidden", "system", "protected"):
value = row_get(flag)
if value:
if value.lower() in ("true", "yes", "1"):
kwargs[flag] = True
elif value.lower() in ("false", "no", "0"):
kwargs[flag] = False
# Parse the rule
rule = {param: row_get(keyword) or None
for keyword, param in (("controller", "c"),
("function", "f"),
("table", "t"),
)}
if any(rule.values()):
parse_permissions = cls._parse_permissions
for keyword in ("oacl", "uacl"):
value = row_get(keyword)
if value:
rule[keyword] = parse_permissions(value)
entity = row_get("entity")
if entity:
if entity != "any":
try:
entity = int(entity)
except ValueError:
entity = cls._lookup_pe(entity)
rule["entity"] = entity
role["rules"].append(rule) |
Python | def _store_image(table, query, field, filepath):
"""
Store an image in a record
Args:
table: the Table
query: the Query to retrieve the record
field: the Field to store the image
filepath: the path to the image file
Raises:
KeyError: if the record was not found
ValueError: if the image is invalid
"""
db = current.db
s3db = current.s3db
audit = current.audit
table_id = table._id
# Get the record
record = db(query).select(table_id, limitby=(0, 1)).first()
if not record:
raise KeyError("Record not found")
record_id = record[table_id]
filename = os.path.split(filepath)[1]
with open(filepath, "rb") as image:
# Validate the image
error = field.validate(Storage(filename=filename, file=image))[1]
if error:
raise ValueError("Invalid image %s: %s" % (filename, error))
# Store it in the record
data = {field.name: field.store(image, filename)}
record.update_record(**data)
# Postprocess the record update
prefix, name = str(table).split("_", 1)
audit("update", prefix, name,
form = Storage(vars=Storage(record)),
record = record_id,
representation = "csv",
)
s3db.update_super(table, record)
s3db.onaccept(table, record, method="update") | def _store_image(table, query, field, filepath):
"""
Store an image in a record
Args:
table: the Table
query: the Query to retrieve the record
field: the Field to store the image
filepath: the path to the image file
Raises:
KeyError: if the record was not found
ValueError: if the image is invalid
"""
db = current.db
s3db = current.s3db
audit = current.audit
table_id = table._id
# Get the record
record = db(query).select(table_id, limitby=(0, 1)).first()
if not record:
raise KeyError("Record not found")
record_id = record[table_id]
filename = os.path.split(filepath)[1]
with open(filepath, "rb") as image:
# Validate the image
error = field.validate(Storage(filename=filename, file=image))[1]
if error:
raise ValueError("Invalid image %s: %s" % (filename, error))
# Store it in the record
data = {field.name: field.store(image, filename)}
record.update_record(**data)
# Postprocess the record update
prefix, name = str(table).split("_", 1)
audit("update", prefix, name,
form = Storage(vars=Storage(record)),
record = record_id,
representation = "csv",
)
s3db.update_super(table, record)
s3db.onaccept(table, record, method="update") |
Python | def cascade(cls, table, record_ids, rules):
"""
Apply cascade of rules to anonymize records
Args:
table: the Table
record_ids: a set of record IDs
rules: the rules for this Table
Raises:
Exception: if the cascade failed due to DB constraints
or invalid rules; callers should roll back
the transaction if an exception is raised
"""
from ..resource import FS, S3Joins
s3db = current.s3db
pkey = table._id.name
cascade = rules.get("cascade")
if cascade:
fieldnames = set(rule.get("match", pkey) for _, rule in cascade)
if pkey not in fieldnames:
fieldnames.add(pkey)
fields = [table[fn] for fn in fieldnames]
db = current.db
rows = db(table._id.belongs(record_ids)).select(*fields)
for tablename, rule in cascade:
lookup = rule.get("lookup")
if lookup:
# Explicit look-up function, call with master table+rows,
# as well as the name of the related table; should return
# a set/tuple/list of record ids in the related table
ids = lookup(table, rows, tablename)
else:
key = rule.get("key")
if not key:
continue
field = rule.get("match", pkey)
match = set(row[field] for row in rows)
# Resolve key and construct query
resource = s3db.resource(tablename, components=[])
rq = FS(key).belongs(match)
query = rq.query(resource)
# Construct necessary joins
joins = S3Joins(tablename)
joins.extend(rq._joins(resource)[0])
joins = joins.as_list()
# Extract the target table IDs
target_rows = db(query).select(resource._id,
join = joins,
)
ids = set(row[resource._id.name] for row in target_rows)
# Recurse into related table
if ids:
cls.cascade(resource.table, ids, rule)
# Apply field rules
field_rules = rules.get("fields")
if field_rules:
cls.apply_field_rules(table, record_ids, field_rules)
# Apply deletion rules
if rules.get("delete"):
resource = s3db.resource(table, id=list(record_ids))
resource.delete(cascade=True) | def cascade(cls, table, record_ids, rules):
"""
Apply cascade of rules to anonymize records
Args:
table: the Table
record_ids: a set of record IDs
rules: the rules for this Table
Raises:
Exception: if the cascade failed due to DB constraints
or invalid rules; callers should roll back
the transaction if an exception is raised
"""
from ..resource import FS, S3Joins
s3db = current.s3db
pkey = table._id.name
cascade = rules.get("cascade")
if cascade:
fieldnames = set(rule.get("match", pkey) for _, rule in cascade)
if pkey not in fieldnames:
fieldnames.add(pkey)
fields = [table[fn] for fn in fieldnames]
db = current.db
rows = db(table._id.belongs(record_ids)).select(*fields)
for tablename, rule in cascade:
lookup = rule.get("lookup")
if lookup:
# Explicit look-up function, call with master table+rows,
# as well as the name of the related table; should return
# a set/tuple/list of record ids in the related table
ids = lookup(table, rows, tablename)
else:
key = rule.get("key")
if not key:
continue
field = rule.get("match", pkey)
match = set(row[field] for row in rows)
# Resolve key and construct query
resource = s3db.resource(tablename, components=[])
rq = FS(key).belongs(match)
query = rq.query(resource)
# Construct necessary joins
joins = S3Joins(tablename)
joins.extend(rq._joins(resource)[0])
joins = joins.as_list()
# Extract the target table IDs
target_rows = db(query).select(resource._id,
join = joins,
)
ids = set(row[resource._id.name] for row in target_rows)
# Recurse into related table
if ids:
cls.cascade(resource.table, ids, rule)
# Apply field rules
field_rules = rules.get("fields")
if field_rules:
cls.apply_field_rules(table, record_ids, field_rules)
# Apply deletion rules
if rules.get("delete"):
resource = s3db.resource(table, id=list(record_ids))
resource.delete(cascade=True) |
Python | def widget(cls,
r,
label = "Anonymize",
ajaxURL = None,
_class = "action-lnk",
):
"""
Render an action item (link or button) to anonymize the
target record of an CRUDRequest, which can be embedded in
the record view
Args:
r: the CRUDRequest
label: The label for the action item
ajaxURL: The URL for the AJAX request
_class: HTML class for the action item
Returns:
the action item (a HTML helper instance), or an empty
string if no anonymize-rules are configured for the
target table, no target record was specified or the
user is not permitted to anonymize it
"""
T = current.T
default = ""
# Determine target table
if r.component:
resource = r.component
if resource.link and not r.actuate_link():
resource = resource.link
else:
resource = r.resource
table = resource.table
# Determine target record
record_id = S3Anonymize._record_id(r)
if not record_id:
return default
# Check if target is configured for anonymize
rules = resource.get_config("anonymize")
if not rules:
return default
if not isinstance(rules, (tuple, list)):
# Single rule
rules["name"] = "default"
rules = [rules]
# Check permissions to anonymize
if not S3Anonymize.permitted(table, record_id):
return default
# Determine widget ID
widget_id = "%s-%s-anonymize" % (table, record_id)
# Inject script
if ajaxURL is None:
ajaxURL = r.url(method = "anonymize",
representation = "json",
)
script_options = {"ajaxURL": ajaxURL,
}
next_url = resource.get_config("anonymize_next")
if next_url:
script_options["nextURL"] = next_url
cls.inject_script(widget_id, script_options)
# Action button
translated_label = T(label)
action_button = A(translated_label, _class="anonymize-btn")
if _class:
action_button.add_class(_class)
# Dialog and Form
INFO = T("The following information will be deleted from the record")
CONFIRM = T("Are you sure you want to delete the selected details?")
SUCCESS = T("Action successful - please wait...")
form = FORM(P("%s:" % INFO),
cls.selector(rules),
P(CONFIRM),
DIV(INPUT(value = "anonymize_confirm",
_name = "anonymize_confirm",
_type = "checkbox",
),
LABEL(T("Yes, delete the selected details")),
_class = "anonymize-confirm",
),
cls.buttons(),
_class = "anonymize-form",
# Store action key in form
hidden = {"action-key": cls.action_key(widget_id)},
)
dialog = DIV(form,
DIV(P(SUCCESS),
_class = "hide anonymize-success",
),
_class = "anonymize-dialog hide",
_title = translated_label,
)
# Assemble widget
widget = DIV(action_button,
dialog,
_class = "s3-anonymize",
_id = widget_id,
)
return widget | def widget(cls,
r,
label = "Anonymize",
ajaxURL = None,
_class = "action-lnk",
):
"""
Render an action item (link or button) to anonymize the
target record of an CRUDRequest, which can be embedded in
the record view
Args:
r: the CRUDRequest
label: The label for the action item
ajaxURL: The URL for the AJAX request
_class: HTML class for the action item
Returns:
the action item (a HTML helper instance), or an empty
string if no anonymize-rules are configured for the
target table, no target record was specified or the
user is not permitted to anonymize it
"""
T = current.T
default = ""
# Determine target table
if r.component:
resource = r.component
if resource.link and not r.actuate_link():
resource = resource.link
else:
resource = r.resource
table = resource.table
# Determine target record
record_id = S3Anonymize._record_id(r)
if not record_id:
return default
# Check if target is configured for anonymize
rules = resource.get_config("anonymize")
if not rules:
return default
if not isinstance(rules, (tuple, list)):
# Single rule
rules["name"] = "default"
rules = [rules]
# Check permissions to anonymize
if not S3Anonymize.permitted(table, record_id):
return default
# Determine widget ID
widget_id = "%s-%s-anonymize" % (table, record_id)
# Inject script
if ajaxURL is None:
ajaxURL = r.url(method = "anonymize",
representation = "json",
)
script_options = {"ajaxURL": ajaxURL,
}
next_url = resource.get_config("anonymize_next")
if next_url:
script_options["nextURL"] = next_url
cls.inject_script(widget_id, script_options)
# Action button
translated_label = T(label)
action_button = A(translated_label, _class="anonymize-btn")
if _class:
action_button.add_class(_class)
# Dialog and Form
INFO = T("The following information will be deleted from the record")
CONFIRM = T("Are you sure you want to delete the selected details?")
SUCCESS = T("Action successful - please wait...")
form = FORM(P("%s:" % INFO),
cls.selector(rules),
P(CONFIRM),
DIV(INPUT(value = "anonymize_confirm",
_name = "anonymize_confirm",
_type = "checkbox",
),
LABEL(T("Yes, delete the selected details")),
_class = "anonymize-confirm",
),
cls.buttons(),
_class = "anonymize-form",
# Store action key in form
hidden = {"action-key": cls.action_key(widget_id)},
)
dialog = DIV(form,
DIV(P(SUCCESS),
_class = "hide anonymize-success",
),
_class = "anonymize-dialog hide",
_title = translated_label,
)
# Assemble widget
widget = DIV(action_button,
dialog,
_class = "s3-anonymize",
_id = widget_id,
)
return widget |
Python | def widget(self, r, method=None, widget_id=None, visible=True, **attr):
"""
Widget-render entry point for S3Summary.
Args:
r: the CRUDRequest
method: the widget method
widget_id: the widget ID
visible: whether the widget is initially visible
attr: controller attributes
"""
# Get the target resource
resource = self.get_target(r)
# Read the relevant GET vars
report_vars, get_vars = self.get_options(r, resource)
# Parse event timestamp option
timestamp = get_vars.get("timestamp")
event_start, event_end = self.parse_timestamp(timestamp)
# Parse fact option
fact = get_vars.get("fact")
try:
facts = TimeSeriesFact.parse(fact)
except SyntaxError:
r.error(400, sys.exc_info()[1])
baseline = get_vars.get("baseline")
# Parse grouping axes
rows = get_vars.get("rows")
cols = get_vars.get("cols")
# Parse event frame parameters
start = get_vars.get("start")
end = get_vars.get("end")
slots = get_vars.get("slots")
if visible:
# Create time series
ts = TimeSeries(resource,
start = start,
end = end,
slots = slots,
event_start = event_start,
event_end = event_end,
rows = rows,
cols = cols,
facts = facts,
baseline = baseline,
# @todo: add title
#title = title,
)
# Extract aggregated results as JSON-serializable dict
data = ts.as_dict()
else:
data = None
# Render output
if r.representation in ("html", "iframe"):
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_url = r.url(method = "",
representation = "",
vars = ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)))
ajaxurl = attr.get("ajaxurl", r.url(method = "timeplot",
representation = "json",
vars = ajax_vars,
))
output = TimePlotForm(resource).html(data,
get_vars = get_vars,
filter_widgets = None,
ajaxurl = ajaxurl,
filter_url = filter_url,
widget_id = widget_id,
)
# Detect and store theme-specific inner layout
view = self._view(r, "timeplot.html")
# Render inner layout (outer page layout is set by S3Summary)
output["title"] = None
output = XML(current.response.render(view, output))
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output | def widget(self, r, method=None, widget_id=None, visible=True, **attr):
"""
Widget-render entry point for S3Summary.
Args:
r: the CRUDRequest
method: the widget method
widget_id: the widget ID
visible: whether the widget is initially visible
attr: controller attributes
"""
# Get the target resource
resource = self.get_target(r)
# Read the relevant GET vars
report_vars, get_vars = self.get_options(r, resource)
# Parse event timestamp option
timestamp = get_vars.get("timestamp")
event_start, event_end = self.parse_timestamp(timestamp)
# Parse fact option
fact = get_vars.get("fact")
try:
facts = TimeSeriesFact.parse(fact)
except SyntaxError:
r.error(400, sys.exc_info()[1])
baseline = get_vars.get("baseline")
# Parse grouping axes
rows = get_vars.get("rows")
cols = get_vars.get("cols")
# Parse event frame parameters
start = get_vars.get("start")
end = get_vars.get("end")
slots = get_vars.get("slots")
if visible:
# Create time series
ts = TimeSeries(resource,
start = start,
end = end,
slots = slots,
event_start = event_start,
event_end = event_end,
rows = rows,
cols = cols,
facts = facts,
baseline = baseline,
# @todo: add title
#title = title,
)
# Extract aggregated results as JSON-serializable dict
data = ts.as_dict()
else:
data = None
# Render output
if r.representation in ("html", "iframe"):
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_url = r.url(method = "",
representation = "",
vars = ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)))
ajaxurl = attr.get("ajaxurl", r.url(method = "timeplot",
representation = "json",
vars = ajax_vars,
))
output = TimePlotForm(resource).html(data,
get_vars = get_vars,
filter_widgets = None,
ajaxurl = ajaxurl,
filter_url = filter_url,
widget_id = widget_id,
)
# Detect and store theme-specific inner layout
view = self._view(r, "timeplot.html")
# Render inner layout (outer page layout is set by S3Summary)
output["title"] = None
output = XML(current.response.render(view, output))
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output |
Python | def html(self,
data,
filter_widgets = None,
get_vars = None,
ajaxurl = None,
filter_url = None,
filter_form = None,
filter_tab = None,
widget_id = None,
):
"""
Render the form for the report
Args:
get_vars: the GET vars if the request (as dict)
widget_id: the HTML element base ID for the widgets
"""
T = current.T
# Filter options
if filter_widgets is not None:
filter_options = self._fieldset(T("Filter Options"),
filter_widgets,
_id="%s-filters" % widget_id,
_class="filter-form")
else:
filter_options = ""
# Report options
report_options = self.report_options(get_vars = get_vars,
widget_id = widget_id)
hidden = {"tp-data": json.dumps(data, separators=JSONSEPARATORS)}
# @todo: chart title
empty = T("No data available")
# Report form submit element
resource = self.resource
submit = resource.get_config("report_submit", True)
if submit:
_class = "tp-submit"
if submit is True:
label = T("Update Report")
elif isinstance(submit, (list, tuple)):
label = submit[0]
_class = "%s %s" % (submit[1], _class)
else:
label = submit
submit = TAG[""](
INPUT(_type="button",
_value=label,
_class=_class))
else:
submit = ""
# @todo: use view template (see S3ReportForm)
form = FORM(filter_options,
report_options,
submit,
hidden = hidden,
_class = "tp-form",
_id = "%s-tp-form" % widget_id,
)
# View variables
output = {"form": form,
"empty": empty,
"widget_id": widget_id,
}
# D3/Timeplot scripts (injected so that they are available for summary)
S3Report.inject_d3()
s3 = current.response.s3
scripts = s3.scripts
appname = current.request.application
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.timeplot.js" % appname
if script not in scripts:
scripts.append(script)
else:
script = "/%s/static/scripts/S3/s3.ui.timeplot.min.js" % appname
if script not in scripts:
scripts.append(script)
# Script to attach the timeplot widget
settings = current.deployment_settings
options = {
"ajaxURL": ajaxurl,
"autoSubmit": settings.get_ui_report_auto_submit(),
"emptyMessage": str(empty),
}
script = """$("#%(widget_id)s").timeplot(%(options)s)""" % \
{"widget_id": widget_id,
"options": json.dumps(options),
}
s3.jquery_ready.append(script)
return output | def html(self,
data,
filter_widgets = None,
get_vars = None,
ajaxurl = None,
filter_url = None,
filter_form = None,
filter_tab = None,
widget_id = None,
):
"""
Render the form for the report
Args:
get_vars: the GET vars if the request (as dict)
widget_id: the HTML element base ID for the widgets
"""
T = current.T
# Filter options
if filter_widgets is not None:
filter_options = self._fieldset(T("Filter Options"),
filter_widgets,
_id="%s-filters" % widget_id,
_class="filter-form")
else:
filter_options = ""
# Report options
report_options = self.report_options(get_vars = get_vars,
widget_id = widget_id)
hidden = {"tp-data": json.dumps(data, separators=JSONSEPARATORS)}
# @todo: chart title
empty = T("No data available")
# Report form submit element
resource = self.resource
submit = resource.get_config("report_submit", True)
if submit:
_class = "tp-submit"
if submit is True:
label = T("Update Report")
elif isinstance(submit, (list, tuple)):
label = submit[0]
_class = "%s %s" % (submit[1], _class)
else:
label = submit
submit = TAG[""](
INPUT(_type="button",
_value=label,
_class=_class))
else:
submit = ""
# @todo: use view template (see S3ReportForm)
form = FORM(filter_options,
report_options,
submit,
hidden = hidden,
_class = "tp-form",
_id = "%s-tp-form" % widget_id,
)
# View variables
output = {"form": form,
"empty": empty,
"widget_id": widget_id,
}
# D3/Timeplot scripts (injected so that they are available for summary)
S3Report.inject_d3()
s3 = current.response.s3
scripts = s3.scripts
appname = current.request.application
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.timeplot.js" % appname
if script not in scripts:
scripts.append(script)
else:
script = "/%s/static/scripts/S3/s3.ui.timeplot.min.js" % appname
if script not in scripts:
scripts.append(script)
# Script to attach the timeplot widget
settings = current.deployment_settings
options = {
"ajaxURL": ajaxurl,
"autoSubmit": settings.get_ui_report_auto_submit(),
"emptyMessage": str(empty),
}
script = """$("#%(widget_id)s").timeplot(%(options)s)""" % \
{"widget_id": widget_id,
"options": json.dumps(options),
}
s3.jquery_ready.append(script)
return output |
Python | def report_options(self, get_vars=None, widget_id="timeplot"):
"""
Render the widgets for the report options form
Args:
get_vars: the GET vars if the request (as dict)
widget_id: the HTML element base ID for the widgets
"""
T = current.T
timeplot_options = self.resource.get_config("timeplot_options")
label = lambda l, **attr: LABEL("%s:" % l, **attr)
selectors = []
# Fact options
selector = self.fact_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
selectors.append(("%s-fact__row" % widget_id,
label(T("Report of"), _for=selector["_id"]),
selector,
None,
))
# Timestamp options
selector = self.timestamp_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
selectors.append(("%s-timestamp__row" % widget_id,
label(T("Mode"), _for=selector["_id"]),
selector,
None,
))
# Time frame and slots options
tf_selector = self.time_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
ts_selector = self.slot_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
if ts_selector:
selector = DIV(tf_selector,
label(T("Intervals"), _for=ts_selector["_id"]),
ts_selector,
_class = "tp-time-options",
)
else:
selector = tf_selector
selectors.append(("%s-time__row" % widget_id,
label(T("Time Frame"), _for=tf_selector["_id"]),
selector,
None,
))
# Build field set
formstyle = current.deployment_settings.get_ui_filter_formstyle()
if not callable(formstyle):
formstyle = SQLFORM.formstyles[formstyle]
selectors = formstyle(FORM(), selectors)
return self._fieldset(T("Report Options"),
selectors,
_id = "%s-options" % widget_id,
_class = "report-options",
) | def report_options(self, get_vars=None, widget_id="timeplot"):
"""
Render the widgets for the report options form
Args:
get_vars: the GET vars if the request (as dict)
widget_id: the HTML element base ID for the widgets
"""
T = current.T
timeplot_options = self.resource.get_config("timeplot_options")
label = lambda l, **attr: LABEL("%s:" % l, **attr)
selectors = []
# Fact options
selector = self.fact_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
selectors.append(("%s-fact__row" % widget_id,
label(T("Report of"), _for=selector["_id"]),
selector,
None,
))
# Timestamp options
selector = self.timestamp_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
selectors.append(("%s-timestamp__row" % widget_id,
label(T("Mode"), _for=selector["_id"]),
selector,
None,
))
# Time frame and slots options
tf_selector = self.time_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
ts_selector = self.slot_options(options = timeplot_options,
get_vars = get_vars,
widget_id = widget_id,
)
if ts_selector:
selector = DIV(tf_selector,
label(T("Intervals"), _for=ts_selector["_id"]),
ts_selector,
_class = "tp-time-options",
)
else:
selector = tf_selector
selectors.append(("%s-time__row" % widget_id,
label(T("Time Frame"), _for=tf_selector["_id"]),
selector,
None,
))
# Build field set
formstyle = current.deployment_settings.get_ui_filter_formstyle()
if not callable(formstyle):
formstyle = SQLFORM.formstyles[formstyle]
selectors = formstyle(FORM(), selectors)
return self._fieldset(T("Report Options"),
selectors,
_id = "%s-options" % widget_id,
_class = "report-options",
) |
Python | def fact_options(self, options=None, get_vars=None, widget_id=None):
"""
Generate a selector for fact options (multiple allowed)
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
a multi-select widget
"""
T = current.T
table = self.resource.table
default = "count(%s)" % (table._id.name)
# Options
if options and "facts" in options:
opts = options["facts"]
else:
from ..model import s3_all_meta_field_names
meta_fields = s3_all_meta_field_names()
opts = [(T("Number of Records"), default)]
for fn in table.fields:
if fn in meta_fields:
continue
field = table[fn]
if not field.readable:
continue
requires = field.requires
if field.type == "integer" and not hasattr(requires, "options") or \
field.type == "double":
label = T("%(field)s (total)") % {"field": field.label}
opts.append((label, "sum(%s)" % fn))
# Currently selected option(s)
value = []
if get_vars:
selected = get_vars.get("fact")
if not isinstance(selected, list):
selected = [selected]
for item in selected:
if isinstance(item, (tuple, list)):
value.append(item[-1])
elif isinstance(item, str):
value.extend(item.split(","))
if not value:
value = default
# Dummy field
widget_opts = [(opt, label) for (label, opt) in opts]
dummy_field = Storage(name = "timeplot-fact",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
from ..ui import S3MultiSelectWidget
return S3MultiSelectWidget()(dummy_field,
value,
_id = "%s-fact" % widget_id,
_name = "fact",
_class = "tp-fact",
) | def fact_options(self, options=None, get_vars=None, widget_id=None):
"""
Generate a selector for fact options (multiple allowed)
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
a multi-select widget
"""
T = current.T
table = self.resource.table
default = "count(%s)" % (table._id.name)
# Options
if options and "facts" in options:
opts = options["facts"]
else:
from ..model import s3_all_meta_field_names
meta_fields = s3_all_meta_field_names()
opts = [(T("Number of Records"), default)]
for fn in table.fields:
if fn in meta_fields:
continue
field = table[fn]
if not field.readable:
continue
requires = field.requires
if field.type == "integer" and not hasattr(requires, "options") or \
field.type == "double":
label = T("%(field)s (total)") % {"field": field.label}
opts.append((label, "sum(%s)" % fn))
# Currently selected option(s)
value = []
if get_vars:
selected = get_vars.get("fact")
if not isinstance(selected, list):
selected = [selected]
for item in selected:
if isinstance(item, (tuple, list)):
value.append(item[-1])
elif isinstance(item, str):
value.extend(item.split(","))
if not value:
value = default
# Dummy field
widget_opts = [(opt, label) for (label, opt) in opts]
dummy_field = Storage(name = "timeplot-fact",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
from ..ui import S3MultiSelectWidget
return S3MultiSelectWidget()(dummy_field,
value,
_id = "%s-fact" % widget_id,
_name = "fact",
_class = "tp-fact",
) |
Python | def timestamp_options(self, options=None, get_vars=None, widget_id=None):
"""
Generate a selector for timestamp options
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
an options widget
"""
T = current.T
table = self.resource.table
# Options
if options and "timestamp" in options:
opts = options["timestamp"]
else:
start, end = TimeSeries.default_timestamp(table)
if not start:
return None
separate = (start, end) if end else (start, start)
opts = [(T("per interval"), ",".join(separate)),
(T("cumulative"), start),
]
if not opts:
return SPAN(T("no options available"),
_class = "no-options-available",
)
# Currently selected option
value = get_vars.get("timestamp") if get_vars else None
if not value:
start, end = TimeSeries.default_timestamp(table)
if start and end:
value = "%s,%s" % (start, end)
elif start:
value = start
# Dummy field
widget_opts = [(opt, label) for (label, opt) in opts]
dummy_field = Storage(name = "timestamp",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
return OptionsWidget.widget(dummy_field,
value,
_id = "%s-timestamp" % widget_id,
_name = "timestamp",
_class = "tp-timestamp",
) | def timestamp_options(self, options=None, get_vars=None, widget_id=None):
"""
Generate a selector for timestamp options
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
an options widget
"""
T = current.T
table = self.resource.table
# Options
if options and "timestamp" in options:
opts = options["timestamp"]
else:
start, end = TimeSeries.default_timestamp(table)
if not start:
return None
separate = (start, end) if end else (start, start)
opts = [(T("per interval"), ",".join(separate)),
(T("cumulative"), start),
]
if not opts:
return SPAN(T("no options available"),
_class = "no-options-available",
)
# Currently selected option
value = get_vars.get("timestamp") if get_vars else None
if not value:
start, end = TimeSeries.default_timestamp(table)
if start and end:
value = "%s,%s" % (start, end)
elif start:
value = start
# Dummy field
widget_opts = [(opt, label) for (label, opt) in opts]
dummy_field = Storage(name = "timestamp",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
return OptionsWidget.widget(dummy_field,
value,
_id = "%s-timestamp" % widget_id,
_name = "timestamp",
_class = "tp-timestamp",
) |
Python | def time_options(cls, options=None, get_vars=None, widget_id=None):
"""
Generate a selector for the report time frame
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
an options widget
"""
T = current.T
# Time options:
if options and "time" in options:
opts = options["time"]
else:
# (label, start, end, slots)
# - if start is specified, end is relative to start
# - otherwise, end is relative to now
# - start "" means the date of the earliest recorded event
# - end "" means now
opts = ((T("All up to now"), "", "", ""),
(T("Last Year"), "<-1 year", "+1 year", "months"),
(T("This Year"), "<-0 years", "", "months"),
(T("Last Month"), "<-1 month", "+1 month", "days"),
(T("This Month"), "<-0 months", "", "days"),
(T("Last Week"), "<-1 week", "+1 week", "days"),
(T("This Week"), "<-0 weeks", "", "days"),
#(T("Past 12 Months"), "-12months", "", "months"),
#(T("Past 6 Months"), "-6months", "", "weeks"),
#(T("Past 3 Months"), "-3months", "", "weeks"),
#(T("Past Month"), "-1month", "", "days"),
#(T("Past Week"), "-1week", "", "days"),
#("All/+1 Month", "", "+1month", ""),
#("All/+2 Month", "", "+2month", ""),
#("-6/+3 Months", "-6months", "+9months", "months"),
#("-3/+1 Months", "-3months", "+4months", "weeks"),
#("-4/+2 Weeks", "-4weeks", "+6weeks", "weeks"),
#("-2/+1 Weeks", "-2weeks", "+3weeks", "days"),
)
widget_opts = []
for opt in opts:
label, start, end, slots = opt
widget_opts.append(("|".join((start, end, slots)), T(label)))
# Currently selected value
if get_vars:
start, end, slots = cls.get_timeframe(get_vars)
else:
start = end = slots = ""
value = "|".join((start, end, slots))
# Dummy field
dummy_field = Storage(name = "time",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
return OptionsWidget.widget(dummy_field,
value,
_id = "%s-time" % widget_id,
_name = "time",
_class = "tp-time",
) | def time_options(cls, options=None, get_vars=None, widget_id=None):
"""
Generate a selector for the report time frame
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
an options widget
"""
T = current.T
# Time options:
if options and "time" in options:
opts = options["time"]
else:
# (label, start, end, slots)
# - if start is specified, end is relative to start
# - otherwise, end is relative to now
# - start "" means the date of the earliest recorded event
# - end "" means now
opts = ((T("All up to now"), "", "", ""),
(T("Last Year"), "<-1 year", "+1 year", "months"),
(T("This Year"), "<-0 years", "", "months"),
(T("Last Month"), "<-1 month", "+1 month", "days"),
(T("This Month"), "<-0 months", "", "days"),
(T("Last Week"), "<-1 week", "+1 week", "days"),
(T("This Week"), "<-0 weeks", "", "days"),
#(T("Past 12 Months"), "-12months", "", "months"),
#(T("Past 6 Months"), "-6months", "", "weeks"),
#(T("Past 3 Months"), "-3months", "", "weeks"),
#(T("Past Month"), "-1month", "", "days"),
#(T("Past Week"), "-1week", "", "days"),
#("All/+1 Month", "", "+1month", ""),
#("All/+2 Month", "", "+2month", ""),
#("-6/+3 Months", "-6months", "+9months", "months"),
#("-3/+1 Months", "-3months", "+4months", "weeks"),
#("-4/+2 Weeks", "-4weeks", "+6weeks", "weeks"),
#("-2/+1 Weeks", "-2weeks", "+3weeks", "days"),
)
widget_opts = []
for opt in opts:
label, start, end, slots = opt
widget_opts.append(("|".join((start, end, slots)), T(label)))
# Currently selected value
if get_vars:
start, end, slots = cls.get_timeframe(get_vars)
else:
start = end = slots = ""
value = "|".join((start, end, slots))
# Dummy field
dummy_field = Storage(name = "time",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
return OptionsWidget.widget(dummy_field,
value,
_id = "%s-time" % widget_id,
_name = "time",
_class = "tp-time",
) |
Python | def slot_options(options=None, get_vars=None, widget_id=None):
"""
Generates a selector for the time slots
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
an options widget, or None if there is only
the "auto" option available
"""
T = current.T
automatic = (T("Automatic"), "auto")
if options and "slots" in options:
opts = options["slots"]
else:
# Do not render by default
return None
#opts = (automatic,
#(T("Days"), "days"),
#(T("Weeks"), "weeks"),
#(T("2 Weeks"), "2 weeks"),
#(T("Months"), "months"),
#(T("3 Months"), "3 months"),
#)
if not any(opt[1] == "auto" for opt in opts):
explicit = opts
opts = [automatic]
opts.extend(explicit)
if len(opts) == 1:
return None
# Currently selected value
value = get_vars.get("slots") if get_vars else None
if not value:
value = "auto"
# Dummy field
widget_opts = [(opt, label) for (label, opt) in opts]
dummy_field = Storage(name = "slots",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
return OptionsWidget.widget(dummy_field,
value,
_id = "%s-slots" % widget_id,
_name = "slots",
_class = "tp-slots",
) | def slot_options(options=None, get_vars=None, widget_id=None):
"""
Generates a selector for the time slots
Args:
options: the timeplot options for the target table
get_vars: the current GET vars with selected options
or defaults, respectively
widget_id: the main widget DOM ID
Returns:
an options widget, or None if there is only
the "auto" option available
"""
T = current.T
automatic = (T("Automatic"), "auto")
if options and "slots" in options:
opts = options["slots"]
else:
# Do not render by default
return None
#opts = (automatic,
#(T("Days"), "days"),
#(T("Weeks"), "weeks"),
#(T("2 Weeks"), "2 weeks"),
#(T("Months"), "months"),
#(T("3 Months"), "3 months"),
#)
if not any(opt[1] == "auto" for opt in opts):
explicit = opts
opts = [automatic]
opts.extend(explicit)
if len(opts) == 1:
return None
# Currently selected value
value = get_vars.get("slots") if get_vars else None
if not value:
value = "auto"
# Dummy field
widget_opts = [(opt, label) for (label, opt) in opts]
dummy_field = Storage(name = "slots",
requires = IS_IN_SET(widget_opts, zero=None),
)
# Widget
return OptionsWidget.widget(dummy_field,
value,
_id = "%s-slots" % widget_id,
_name = "slots",
_class = "tp-slots",
) |
Python | def reception_center_onaccept(cls, form):
"""
Onaccept of reception center
- update total population
- update gross available capacity
- sanitize net available capacity
- update occupancy (percentage)
- update status history
"""
record_id = get_form_record_id(form)
if not record_id:
return
table = current.s3db.cr_reception_center
query = (table.id == record_id) & \
(table.deleted == False)
record = current.db(query).select(table.id,
table.population_registered,
table.population_unregistered,
table.capacity,
table.allocatable_capacity,
limitby = (0, 1),
).first()
num_r = record.population_registered
num_u = record.population_unregistered
total = (num_r if num_r else 0) + (num_u if num_u else 0)
update = {"population": total}
capacity = record.capacity
if capacity:
available = max(0, capacity - total)
else:
available = 0
update["available_capacity"] = available
allocatable = record.allocatable_capacity
if allocatable > available or allocatable < 0:
update["allocatable_capacity"] = available
if capacity > 0:
occupancy = total * 100 // capacity
else:
occupancy = 100
update["occupancy"] = occupancy
record.update_record(**update)
# Update the status history
cls.update_status_history(record.id) | def reception_center_onaccept(cls, form):
"""
Onaccept of reception center
- update total population
- update gross available capacity
- sanitize net available capacity
- update occupancy (percentage)
- update status history
"""
record_id = get_form_record_id(form)
if not record_id:
return
table = current.s3db.cr_reception_center
query = (table.id == record_id) & \
(table.deleted == False)
record = current.db(query).select(table.id,
table.population_registered,
table.population_unregistered,
table.capacity,
table.allocatable_capacity,
limitby = (0, 1),
).first()
num_r = record.population_registered
num_u = record.population_unregistered
total = (num_r if num_r else 0) + (num_u if num_u else 0)
update = {"population": total}
capacity = record.capacity
if capacity:
available = max(0, capacity - total)
else:
available = 0
update["available_capacity"] = available
allocatable = record.allocatable_capacity
if allocatable > available or allocatable < 0:
update["allocatable_capacity"] = available
if capacity > 0:
occupancy = total * 100 // capacity
else:
occupancy = 100
update["occupancy"] = occupancy
record.update_record(**update)
# Update the status history
cls.update_status_history(record.id) |
Python | def update_status_history(facility_id):
"""
Updates the status history of a facility
Args:
facility_id: the cr_reception_center record ID
"""
db = current.db
s3db = current.s3db
ftable = s3db.cr_reception_center
stable = s3db.cr_reception_center_status
status_fields = ("status",
"capacity",
"available_capacity",
"population",
"population_registered",
"population_unregistered",
"allocatable_capacity",
"occupancy",
)
# Get the reception center record
fields = [ftable.id] + [ftable[fn] for fn in status_fields]
query = (ftable.id == facility_id) & (ftable.deleted == False)
facility = db(query).select(*fields, limitby=(0, 1)).first()
# Look up the status record for today
today = current.request.utcnow.date()
query = (stable.facility_id == facility_id) & \
(stable.date == today) & \
(stable.deleted == False)
status = db(query).select(stable.id, limitby = (0, 1)).first()
if not status:
# Create it
data = {fn: facility[fn] for fn in status_fields}
data["facility_id"] = facility.id
status_id = data["id"] = stable.insert(**data)
s3db.update_super(stable, status)
current.auth.s3_set_record_owner(stable, status_id)
s3db.onaccept(stable, status, method="create")
else:
# Update it
update = {fn: facility[fn] for fn in status_fields}
status.update_record(**update)
s3db.onaccept(stable, status, method="update")
# Update the previous record (set end-date)
query = (stable.facility_id == facility_id) & \
(stable.date < today) & \
(stable.deleted == False)
status = db(query).select(stable.id,
orderby = ~stable.date,
limitby = (0, 1),
).first()
if status:
status.update_record(date_until = today-datetime.timedelta(days=1)) | def update_status_history(facility_id):
"""
Updates the status history of a facility
Args:
facility_id: the cr_reception_center record ID
"""
db = current.db
s3db = current.s3db
ftable = s3db.cr_reception_center
stable = s3db.cr_reception_center_status
status_fields = ("status",
"capacity",
"available_capacity",
"population",
"population_registered",
"population_unregistered",
"allocatable_capacity",
"occupancy",
)
# Get the reception center record
fields = [ftable.id] + [ftable[fn] for fn in status_fields]
query = (ftable.id == facility_id) & (ftable.deleted == False)
facility = db(query).select(*fields, limitby=(0, 1)).first()
# Look up the status record for today
today = current.request.utcnow.date()
query = (stable.facility_id == facility_id) & \
(stable.date == today) & \
(stable.deleted == False)
status = db(query).select(stable.id, limitby = (0, 1)).first()
if not status:
# Create it
data = {fn: facility[fn] for fn in status_fields}
data["facility_id"] = facility.id
status_id = data["id"] = stable.insert(**data)
s3db.update_super(stable, status)
current.auth.s3_set_record_owner(stable, status_id)
s3db.onaccept(stable, status, method="create")
else:
# Update it
update = {fn: facility[fn] for fn in status_fields}
status.update_record(**update)
s3db.onaccept(stable, status, method="update")
# Update the previous record (set end-date)
query = (stable.facility_id == facility_id) & \
(stable.date < today) & \
(stable.deleted == False)
status = db(query).select(stable.id,
orderby = ~stable.date,
limitby = (0, 1),
).first()
if status:
status.update_record(date_until = today-datetime.timedelta(days=1)) |
Python | def occupancy_represent(value, row=None):
"""
Representation of occupancy as decision aid, progress-bar style
Args:
value: the occupancy value (percentage, integer 0..>100)
Returns:
stylable DIV
"""
if not value:
value = 0
css_class = "occupancy-0"
else:
reprval = value // 10 * 10 + 10
if reprval > 100:
css_class = "occupancy-exc"
else:
css_class = "occupancy-%s" % reprval
return DIV("%s%%" % value,
DIV(_class="occupancy %s" % css_class),
_class="occupancy-bar",
) | def occupancy_represent(value, row=None):
"""
Representation of occupancy as decision aid, progress-bar style
Args:
value: the occupancy value (percentage, integer 0..>100)
Returns:
stylable DIV
"""
if not value:
value = 0
css_class = "occupancy-0"
else:
reprval = value // 10 * 10 + 10
if reprval > 100:
css_class = "occupancy-exc"
else:
css_class = "occupancy-%s" % reprval
return DIV("%s%%" % value,
DIV(_class="occupancy %s" % css_class),
_class="occupancy-bar",
) |
Python | def count(self, left=None, distinct=False):
"""
Get the total number of available records in this resource
Args:
left: left outer joins, if required
distinct: only count distinct rows
"""
if self.rfilter is None:
self.build_query()
if self._length is None:
self._length = self.rfilter.count(left = left,
distinct = distinct)
return self._length | def count(self, left=None, distinct=False):
"""
Get the total number of available records in this resource
Args:
left: left outer joins, if required
distinct: only count distinct rows
"""
if self.rfilter is None:
self.build_query()
if self._length is None:
self._length = self.rfilter.count(left = left,
distinct = distinct)
return self._length |
Python | def insert(self, **fields):
"""
Insert a record into this resource
Args:
fields: dict of field/value pairs to insert
"""
table = self.table
tablename = self.tablename
# Check permission
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
from ..errors import S3PermissionError
raise S3PermissionError("Operation not permitted: INSERT INTO %s" %
tablename)
# Insert new record
record_id = self.table.insert(**fields)
# Post-process create
if record_id:
# Audit
current.audit("create", self.prefix, self.name, record=record_id)
record = Storage(fields)
record.id = record_id
# Update super
s3db = current.s3db
s3db.update_super(table, record)
# Record owner
auth = current.auth
auth.s3_set_record_owner(table, record_id)
auth.s3_make_session_owner(table, record_id)
# Execute onaccept
s3db.onaccept(tablename, record, method="create")
return record_id | def insert(self, **fields):
"""
Insert a record into this resource
Args:
fields: dict of field/value pairs to insert
"""
table = self.table
tablename = self.tablename
# Check permission
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
from ..errors import S3PermissionError
raise S3PermissionError("Operation not permitted: INSERT INTO %s" %
tablename)
# Insert new record
record_id = self.table.insert(**fields)
# Post-process create
if record_id:
# Audit
current.audit("create", self.prefix, self.name, record=record_id)
record = Storage(fields)
record.id = record_id
# Update super
s3db = current.s3db
s3db.update_super(table, record)
# Record owner
auth = current.auth
auth.s3_set_record_owner(table, record_id)
auth.s3_make_session_owner(table, record_id)
# Execute onaccept
s3db.onaccept(tablename, record, method="create")
return record_id |
Python | def delete(self,
format = None,
cascade = False,
replaced_by = None,
log_errors = False,
):
"""
Delete all records in this resource
Args:
format: the representation format of the request (optional)
cascade: this is a cascade delete (prevents commits)
replaced_by: used by record merger
log_errors: log errors even when cascade=True
Returns:
number of records deleted
Note:
skipping undeletable rows is no longer the default behavior,
process will now fail immediately for any error; use DeleteProcess
directly if skipping of undeletable rows is desired
"""
from .delete import DeleteProcess
delete = DeleteProcess(self, representation=format)
result = delete(cascade = cascade,
replaced_by = replaced_by,
#skip_undeletable = False,
)
if log_errors and cascade:
# Call log_errors explicitly if suppressed by cascade
delete.log_errors()
return result | def delete(self,
format = None,
cascade = False,
replaced_by = None,
log_errors = False,
):
"""
Delete all records in this resource
Args:
format: the representation format of the request (optional)
cascade: this is a cascade delete (prevents commits)
replaced_by: used by record merger
log_errors: log errors even when cascade=True
Returns:
number of records deleted
Note:
skipping undeletable rows is no longer the default behavior,
process will now fail immediately for any error; use DeleteProcess
directly if skipping of undeletable rows is desired
"""
from .delete import DeleteProcess
delete = DeleteProcess(self, representation=format)
result = delete(cascade = cascade,
replaced_by = replaced_by,
#skip_undeletable = False,
)
if log_errors and cascade:
# Call log_errors explicitly if suppressed by cascade
delete.log_errors()
return result |
Python | def reject(self, cascade=False):
""" Reject (delete) all records in this resource """
db = current.db
s3db = current.s3db
define_resource = s3db.resource
DELETED = current.xml.DELETED
INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR
tablename = self.tablename
table = self.table
pkey = table._id.name
# Get hooks configuration
get_config = s3db.get_config
ondelete = get_config(tablename, "ondelete")
onreject = get_config(tablename, "onreject")
ondelete_cascade = get_config(tablename, "ondelete_cascade")
# Get all rows
if "uuid" in table.fields:
rows = self.select([table._id.name, "uuid"], as_rows=True)
else:
rows = self.select([table._id.name], as_rows=True)
if not rows:
return True
delete_super = s3db.delete_super
if DELETED in table:
references = table._referenced_by
for row in rows:
error = self.error
self.error = None
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# Automatic cascade
for ref in references:
tn, fn = ref.tablename, ref.name
rtable = db[tn]
rfield = rtable[fn]
query = (rfield == row[pkey])
# Ignore RESTRICTs => reject anyway
if rfield.ondelete in ("CASCADE", "RESTRICT"):
rresource = define_resource(tn, filter=query, unapproved=True)
rresource.reject(cascade=True)
if rresource.error:
break
elif rfield.ondelete == "SET NULL":
try:
db(query).update(**{fn:None})
except:
self.error = INTEGRITY_ERROR
break
elif rfield.ondelete == "SET DEFAULT":
try:
db(query).update(**{fn:rfield.default})
except:
self.error = INTEGRITY_ERROR
break
if not self.error and not delete_super(table, row):
self.error = INTEGRITY_ERROR
if self.error:
db.rollback()
raise RuntimeError("Reject failed for %s.%s" %
(tablename, row[table._id]))
else:
# Pull back prior error status
self.error = error
error = None
# On-reject hook
if onreject:
callback(onreject, row, tablename=tablename)
# Park foreign keys
fields = {"deleted": True}
if "deleted_fk" in table:
record = table[row[pkey]]
fk = {}
for f in table.fields:
if record[f] is not None and \
s3_has_foreign_key(table[f]):
fk[f] = record[f]
fields[f] = None
else:
continue
if fk:
fields.update(deleted_fk=json.dumps(fk))
# Update the row, finally
db(table._id == row[pkey]).update(**fields)
# Clear session
if get_last_record_id(tablename) == row[pkey]:
remove_last_record_id(tablename)
# On-delete hook
if ondelete:
callback(ondelete, row, tablename=tablename)
else:
# Hard delete
for row in rows:
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# On-reject
if onreject:
callback(onreject, row, tablename=tablename)
try:
del table[row[pkey]]
except:
# Row is not deletable
self.error = INTEGRITY_ERROR
db.rollback()
raise
else:
# Clear session
if get_last_record_id(tablename) == row[pkey]:
remove_last_record_id(tablename)
# Delete super-entity
delete_super(table, row)
# On-delete
if ondelete:
callback(ondelete, row, tablename=tablename)
return True | def reject(self, cascade=False):
""" Reject (delete) all records in this resource """
db = current.db
s3db = current.s3db
define_resource = s3db.resource
DELETED = current.xml.DELETED
INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR
tablename = self.tablename
table = self.table
pkey = table._id.name
# Get hooks configuration
get_config = s3db.get_config
ondelete = get_config(tablename, "ondelete")
onreject = get_config(tablename, "onreject")
ondelete_cascade = get_config(tablename, "ondelete_cascade")
# Get all rows
if "uuid" in table.fields:
rows = self.select([table._id.name, "uuid"], as_rows=True)
else:
rows = self.select([table._id.name], as_rows=True)
if not rows:
return True
delete_super = s3db.delete_super
if DELETED in table:
references = table._referenced_by
for row in rows:
error = self.error
self.error = None
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# Automatic cascade
for ref in references:
tn, fn = ref.tablename, ref.name
rtable = db[tn]
rfield = rtable[fn]
query = (rfield == row[pkey])
# Ignore RESTRICTs => reject anyway
if rfield.ondelete in ("CASCADE", "RESTRICT"):
rresource = define_resource(tn, filter=query, unapproved=True)
rresource.reject(cascade=True)
if rresource.error:
break
elif rfield.ondelete == "SET NULL":
try:
db(query).update(**{fn:None})
except:
self.error = INTEGRITY_ERROR
break
elif rfield.ondelete == "SET DEFAULT":
try:
db(query).update(**{fn:rfield.default})
except:
self.error = INTEGRITY_ERROR
break
if not self.error and not delete_super(table, row):
self.error = INTEGRITY_ERROR
if self.error:
db.rollback()
raise RuntimeError("Reject failed for %s.%s" %
(tablename, row[table._id]))
else:
# Pull back prior error status
self.error = error
error = None
# On-reject hook
if onreject:
callback(onreject, row, tablename=tablename)
# Park foreign keys
fields = {"deleted": True}
if "deleted_fk" in table:
record = table[row[pkey]]
fk = {}
for f in table.fields:
if record[f] is not None and \
s3_has_foreign_key(table[f]):
fk[f] = record[f]
fields[f] = None
else:
continue
if fk:
fields.update(deleted_fk=json.dumps(fk))
# Update the row, finally
db(table._id == row[pkey]).update(**fields)
# Clear session
if get_last_record_id(tablename) == row[pkey]:
remove_last_record_id(tablename)
# On-delete hook
if ondelete:
callback(ondelete, row, tablename=tablename)
else:
# Hard delete
for row in rows:
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# On-reject
if onreject:
callback(onreject, row, tablename=tablename)
try:
del table[row[pkey]]
except:
# Row is not deletable
self.error = INTEGRITY_ERROR
db.rollback()
raise
else:
# Clear session
if get_last_record_id(tablename) == row[pkey]:
remove_last_record_id(tablename)
# Delete super-entity
delete_super(table, row)
# On-delete
if ondelete:
callback(ondelete, row, tablename=tablename)
return True |
Python | def merge(self,
original_id,
duplicate_id,
replace = None,
update = None,
main = True,
):
""" Merge two records, see also S3RecordMerger.merge """
from ..methods import S3RecordMerger
return S3RecordMerger(self).merge(original_id,
duplicate_id,
replace = replace,
update = update,
main = main,
) | def merge(self,
original_id,
duplicate_id,
replace = None,
update = None,
main = True,
):
""" Merge two records, see also S3RecordMerger.merge """
from ..methods import S3RecordMerger
return S3RecordMerger(self).merge(original_id,
duplicate_id,
replace = replace,
update = update,
main = main,
) |
Python | def datatable(self,
fields = None,
start = 0,
limit = None,
left = None,
orderby = None,
distinct = False,
list_id = None,
):
"""
Generate a data table of this resource
Args:
fields: list of fields to include (field selector strings)
start: index of the first record to include
limit: maximum number of records to include
left: additional left joins for DB query
orderby: orderby for DB query
distinct: distinct-flag for DB query
list_id: the datatable ID
Returns:
tuple (DataTable, numrows), where numrows represents
the total number of rows in the table that match the query
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
table = self.table
# Automatically include the record ID
table_id = table._id
pkey = table_id.name
if pkey not in selectors:
fields.insert(0, pkey)
selectors.insert(0, pkey)
# Skip representation of IDs in data tables
id_repr = table_id.represent
table_id.represent = None
# Extract the data
data = self.select(selectors,
start = start,
limit = limit,
orderby = orderby,
left = left,
distinct = distinct,
count = True,
getids = False,
represent = True,
)
rows = data.rows
# Restore ID representation
table_id.represent = id_repr
# Generate the data table
rfields = data.rfields
dt = DataTable(rfields, rows, list_id, orderby=orderby)
return dt, data.numrows | def datatable(self,
fields = None,
start = 0,
limit = None,
left = None,
orderby = None,
distinct = False,
list_id = None,
):
"""
Generate a data table of this resource
Args:
fields: list of fields to include (field selector strings)
start: index of the first record to include
limit: maximum number of records to include
left: additional left joins for DB query
orderby: orderby for DB query
distinct: distinct-flag for DB query
list_id: the datatable ID
Returns:
tuple (DataTable, numrows), where numrows represents
the total number of rows in the table that match the query
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
table = self.table
# Automatically include the record ID
table_id = table._id
pkey = table_id.name
if pkey not in selectors:
fields.insert(0, pkey)
selectors.insert(0, pkey)
# Skip representation of IDs in data tables
id_repr = table_id.represent
table_id.represent = None
# Extract the data
data = self.select(selectors,
start = start,
limit = limit,
orderby = orderby,
left = left,
distinct = distinct,
count = True,
getids = False,
represent = True,
)
rows = data.rows
# Restore ID representation
table_id.represent = id_repr
# Generate the data table
rfields = data.rfields
dt = DataTable(rfields, rows, list_id, orderby=orderby)
return dt, data.numrows |
Python | def datalist(self,
fields = None,
start = 0,
limit = None,
left = None,
orderby = None,
distinct = False,
list_id = None,
layout = None,
):
"""
Generate a data list of this resource
Args:
fields: list of fields to include (field selector strings)
start: index of the first record to include
limit: maximum number of records to include
left: additional left joins for DB query
orderby: orderby for DB query
distinct: distinct-flag for DB query
list_id: the list identifier
layout: custom renderer function (see S3DataList.render)
Returns:
tuple (S3DataList, numrows, ids), where numrows represents
the total number of rows in the table that match the query
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
table = self.table
# Automatically include the record ID
pkey = table._id.name
if pkey not in selectors:
fields.insert(0, pkey)
selectors.insert(0, pkey)
# Extract the data
data = self.select(selectors,
start = start,
limit = limit,
orderby = orderby,
left = left,
distinct = distinct,
count = True,
getids = False,
raw_data = True,
represent = True,
)
# Generate the data list
numrows = data.numrows
dl = S3DataList(self,
fields,
data.rows,
list_id = list_id,
start = start,
limit = limit,
total = numrows,
layout = layout,
)
return dl, numrows | def datalist(self,
fields = None,
start = 0,
limit = None,
left = None,
orderby = None,
distinct = False,
list_id = None,
layout = None,
):
"""
Generate a data list of this resource
Args:
fields: list of fields to include (field selector strings)
start: index of the first record to include
limit: maximum number of records to include
left: additional left joins for DB query
orderby: orderby for DB query
distinct: distinct-flag for DB query
list_id: the list identifier
layout: custom renderer function (see S3DataList.render)
Returns:
tuple (S3DataList, numrows, ids), where numrows represents
the total number of rows in the table that match the query
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
table = self.table
# Automatically include the record ID
pkey = table._id.name
if pkey not in selectors:
fields.insert(0, pkey)
selectors.insert(0, pkey)
# Extract the data
data = self.select(selectors,
start = start,
limit = limit,
orderby = orderby,
left = left,
distinct = distinct,
count = True,
getids = False,
raw_data = True,
represent = True,
)
# Generate the data list
numrows = data.numrows
dl = S3DataList(self,
fields,
data.rows,
list_id = list_id,
start = start,
limit = limit,
total = numrows,
layout = layout,
)
return dl, numrows |
Python | def json(self,
fields = None,
start = 0,
limit = None,
left = None,
distinct = False,
orderby = None,
):
"""
Export a JSON representation of the resource.
Args:
fields: list of field selector strings
start: index of the first record
limit: maximum number of records
left: list of (additional) left joins
distinct: select only distinct rows
orderby: Orderby-expression for the query
Returns:
the JSON (as string), representing a list of dicts
with {"tablename.fieldname":"value"}
"""
data = self.select(fields = fields,
start = start,
limit = limit,
orderby = orderby,
left = left,
distinct = distinct,
)
return json.dumps(data.rows) | def json(self,
fields = None,
start = 0,
limit = None,
left = None,
distinct = False,
orderby = None,
):
"""
Export a JSON representation of the resource.
Args:
fields: list of field selector strings
start: index of the first record
limit: maximum number of records
left: list of (additional) left joins
distinct: select only distinct rows
orderby: Orderby-expression for the query
Returns:
the JSON (as string), representing a list of dicts
with {"tablename.fieldname":"value"}
"""
data = self.select(fields = fields,
start = start,
limit = limit,
orderby = orderby,
left = left,
distinct = distinct,
)
return json.dumps(data.rows) |
Python | def load(self,
fields = None,
skip = None,
start = None,
limit = None,
orderby = None,
virtual = True,
cacheable = False,
):
"""
Loads records from the resource, applying the current filters,
and stores them in the instance.
Args:
fields: list of field names to include
skip: list of field names to skip
start: the index of the first record to load
limit: the maximum number of records to load
orderby: orderby-expression for the query
virtual: whether to load virtual fields or not
cacheable: don't define Row actions like update_record
or delete_record (faster, and the record can
be cached)
Returns:
the records as list of Rows
"""
table = self.table
tablename = self.tablename
UID = current.xml.UID
load_uids = hasattr(table, UID)
if not skip:
skip = ()
if fields or skip:
s3 = current.response.s3
if "all_meta_fields" in s3:
meta_fields = s3.all_meta_fields
else:
meta_fields = s3.all_meta_fields = s3_all_meta_field_names()
s3db = current.s3db
superkeys = s3db.get_super_keys(table)
else:
meta_fields = superkeys = None
# Field selection
qfields = ([table._id.name, UID])
append = qfields.append
for f in table.fields:
if f in ("wkt", "the_geom"):
if tablename == "gis_location":
if f == "the_geom":
# Filter out bulky Polygons
continue
else:
fmt = current.auth.permission.format
if fmt == "cap":
# Include WKT
pass
elif fmt == "xml" and current.deployment_settings.get_gis_xml_wkt():
# Include WKT
pass
else:
# Filter out bulky Polygons
continue
elif tablename.startswith("gis_layer_shapefile_"):
# Filter out bulky Polygons
continue
if fields or skip:
# Must include all meta-fields
if f in meta_fields:
append(f)
continue
# Must include the fkey if component
if self.parent and not self.link and f == self.fkey:
append(f)
continue
# Must include all super-keys
if f in superkeys:
append(f)
continue
if f in skip:
continue
if not fields or f in fields:
qfields.append(f)
fields = list(set(fn for fn in qfields if hasattr(table, fn)))
if self._rows is not None:
self.clear()
pagination = limit is not None or start
rfilter = self.rfilter
multiple = rfilter.multiple if rfilter is not None else True
if not multiple and self.parent and self.parent.count() == 1:
start = 0
limit = 1
rows = self.select(fields,
start = start,
limit = limit,
orderby = orderby,
virtual = virtual,
as_rows = True,
)
ids = self._ids = []
new_id = ids.append
self._uids = []
self._rows = []
if rows:
new_uid = self._uids.append
new_row = self._rows.append
pkey = table._id.name
for row in rows:
if hasattr(row, tablename):
_row = ogetattr(row, tablename)
if type(_row) is Row:
row = _row
record_id = ogetattr(row, pkey)
if record_id not in ids:
new_id(record_id)
new_row(row)
if load_uids:
new_uid(ogetattr(row, UID))
# If this is an unlimited load, or the first page with no
# rows, then the result length is equal to the total number
# of matching records => store length for subsequent count()s
length = len(self._rows)
if not pagination or not start and not length:
self._length = length
return self._rows | def load(self,
fields = None,
skip = None,
start = None,
limit = None,
orderby = None,
virtual = True,
cacheable = False,
):
"""
Loads records from the resource, applying the current filters,
and stores them in the instance.
Args:
fields: list of field names to include
skip: list of field names to skip
start: the index of the first record to load
limit: the maximum number of records to load
orderby: orderby-expression for the query
virtual: whether to load virtual fields or not
cacheable: don't define Row actions like update_record
or delete_record (faster, and the record can
be cached)
Returns:
the records as list of Rows
"""
table = self.table
tablename = self.tablename
UID = current.xml.UID
load_uids = hasattr(table, UID)
if not skip:
skip = ()
if fields or skip:
s3 = current.response.s3
if "all_meta_fields" in s3:
meta_fields = s3.all_meta_fields
else:
meta_fields = s3.all_meta_fields = s3_all_meta_field_names()
s3db = current.s3db
superkeys = s3db.get_super_keys(table)
else:
meta_fields = superkeys = None
# Field selection
qfields = ([table._id.name, UID])
append = qfields.append
for f in table.fields:
if f in ("wkt", "the_geom"):
if tablename == "gis_location":
if f == "the_geom":
# Filter out bulky Polygons
continue
else:
fmt = current.auth.permission.format
if fmt == "cap":
# Include WKT
pass
elif fmt == "xml" and current.deployment_settings.get_gis_xml_wkt():
# Include WKT
pass
else:
# Filter out bulky Polygons
continue
elif tablename.startswith("gis_layer_shapefile_"):
# Filter out bulky Polygons
continue
if fields or skip:
# Must include all meta-fields
if f in meta_fields:
append(f)
continue
# Must include the fkey if component
if self.parent and not self.link and f == self.fkey:
append(f)
continue
# Must include all super-keys
if f in superkeys:
append(f)
continue
if f in skip:
continue
if not fields or f in fields:
qfields.append(f)
fields = list(set(fn for fn in qfields if hasattr(table, fn)))
if self._rows is not None:
self.clear()
pagination = limit is not None or start
rfilter = self.rfilter
multiple = rfilter.multiple if rfilter is not None else True
if not multiple and self.parent and self.parent.count() == 1:
start = 0
limit = 1
rows = self.select(fields,
start = start,
limit = limit,
orderby = orderby,
virtual = virtual,
as_rows = True,
)
ids = self._ids = []
new_id = ids.append
self._uids = []
self._rows = []
if rows:
new_uid = self._uids.append
new_row = self._rows.append
pkey = table._id.name
for row in rows:
if hasattr(row, tablename):
_row = ogetattr(row, tablename)
if type(_row) is Row:
row = _row
record_id = ogetattr(row, pkey)
if record_id not in ids:
new_id(record_id)
new_row(row)
if load_uids:
new_uid(ogetattr(row, UID))
# If this is an unlimited load, or the first page with no
# rows, then the result length is equal to the total number
# of matching records => store length for subsequent count()s
length = len(self._rows)
if not pagination or not start and not length:
self._length = length
return self._rows |
Python | def records(self, fields=None):
"""
Get the current set as Rows instance
Args:
fields: the fields to include (list of Fields)
"""
if fields is None:
if self.tablename == "gis_location":
fields = [f for f in self.table
if f.name not in ("wkt", "the_geom")]
else:
fields = [f for f in self.table]
if self._rows is None:
return Rows(current.db)
else:
colnames = [str(f) for f in fields]
return Rows(current.db, self._rows, colnames=colnames) | def records(self, fields=None):
"""
Get the current set as Rows instance
Args:
fields: the fields to include (list of Fields)
"""
if fields is None:
if self.tablename == "gis_location":
fields = [f for f in self.table
if f.name not in ("wkt", "the_geom")]
else:
fields = [f for f in self.table]
if self._rows is None:
return Rows(current.db)
else:
colnames = [str(f) for f in fields]
return Rows(current.db, self._rows, colnames=colnames) |
Python | def export_options(self,
component = None,
fields = None,
only_last = False,
show_uids = False,
hierarchy = False,
as_json = False,
):
"""
Export field options of this resource as element tree
Args:
component: name of the component which the options are
requested of, None for the primary table
fields: list of names of fields for which the options
are requested, None for all fields (which have
options)
as_json: convert the output into JSON
only_last: obtain only the latest record
"""
if component is not None:
c = self.components.get(component)
if c:
tree = c.export_options(fields = fields,
only_last = only_last,
show_uids = show_uids,
hierarchy = hierarchy,
as_json = as_json,
)
return tree
else:
# If we get here, we've been called from the back-end,
# otherwise the request would have failed during parse.
# So it's safe to raise an exception:
raise AttributeError
else:
if as_json and only_last and len(fields) == 1:
# Identify the field
default = {"option":[]}
try:
field = self.table[fields[0]]
except AttributeError:
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Check that the validator has a lookup table
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
from ..tools import IS_LOCATION
if not isinstance(requires, (IS_ONE_OF, IS_LOCATION)):
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Identify the lookup table
db = current.db
lookuptable = requires.ktable
lookupfield = db[lookuptable][requires.kfield]
# Fields to extract
fields = [lookupfield]
h = None
if hierarchy:
from ..tools import S3Hierarchy
h = S3Hierarchy(lookuptable)
if not h.config:
h = None
elif h.pkey.name != lookupfield.name:
# Also extract the node key for the hierarchy
fields.append(h.pkey)
# Get the latest record
# NB: this assumes that the lookupfield is auto-incremented
row = db().select(orderby = ~lookupfield,
limitby = (0, 1),
*fields).first()
# Represent the value and generate the output JSON
if row:
value = row[lookupfield]
widget = field.widget
if hasattr(widget, "represent") and widget.represent:
# Prefer the widget's represent as options.json
# is usually called to Ajax-update the widget
represent = widget.represent(value)
elif field.represent:
represent = field.represent(value)
else:
represent = s3_str(value)
if isinstance(represent, A):
represent = represent.components[0]
item = {"@value": value, "$": represent}
if h:
parent = h.parent(row[h.pkey])
if parent:
item["@parent"] = str(parent)
result = [item]
else:
result = []
return json.dumps({'option': result})
xml = current.xml
tree = xml.get_options(self.table,
fields = fields,
show_uids = show_uids,
hierarchy = hierarchy,
)
if as_json:
return xml.tree2json(tree, pretty_print=False, native=True)
else:
return xml.tostring(tree, pretty_print=False) | def export_options(self,
component = None,
fields = None,
only_last = False,
show_uids = False,
hierarchy = False,
as_json = False,
):
"""
Export field options of this resource as element tree
Args:
component: name of the component which the options are
requested of, None for the primary table
fields: list of names of fields for which the options
are requested, None for all fields (which have
options)
as_json: convert the output into JSON
only_last: obtain only the latest record
"""
if component is not None:
c = self.components.get(component)
if c:
tree = c.export_options(fields = fields,
only_last = only_last,
show_uids = show_uids,
hierarchy = hierarchy,
as_json = as_json,
)
return tree
else:
# If we get here, we've been called from the back-end,
# otherwise the request would have failed during parse.
# So it's safe to raise an exception:
raise AttributeError
else:
if as_json and only_last and len(fields) == 1:
# Identify the field
default = {"option":[]}
try:
field = self.table[fields[0]]
except AttributeError:
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Check that the validator has a lookup table
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
from ..tools import IS_LOCATION
if not isinstance(requires, (IS_ONE_OF, IS_LOCATION)):
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Identify the lookup table
db = current.db
lookuptable = requires.ktable
lookupfield = db[lookuptable][requires.kfield]
# Fields to extract
fields = [lookupfield]
h = None
if hierarchy:
from ..tools import S3Hierarchy
h = S3Hierarchy(lookuptable)
if not h.config:
h = None
elif h.pkey.name != lookupfield.name:
# Also extract the node key for the hierarchy
fields.append(h.pkey)
# Get the latest record
# NB: this assumes that the lookupfield is auto-incremented
row = db().select(orderby = ~lookupfield,
limitby = (0, 1),
*fields).first()
# Represent the value and generate the output JSON
if row:
value = row[lookupfield]
widget = field.widget
if hasattr(widget, "represent") and widget.represent:
# Prefer the widget's represent as options.json
# is usually called to Ajax-update the widget
represent = widget.represent(value)
elif field.represent:
represent = field.represent(value)
else:
represent = s3_str(value)
if isinstance(represent, A):
represent = represent.components[0]
item = {"@value": value, "$": represent}
if h:
parent = h.parent(row[h.pkey])
if parent:
item["@parent"] = str(parent)
result = [item]
else:
result = []
return json.dumps({'option': result})
xml = current.xml
tree = xml.get_options(self.table,
fields = fields,
show_uids = show_uids,
hierarchy = hierarchy,
)
if as_json:
return xml.tree2json(tree, pretty_print=False, native=True)
else:
return xml.tostring(tree, pretty_print=False) |
Python | def export_struct(self,
meta = False,
options = False,
references = False,
stylesheet = None,
as_json = False,
as_tree = False,
):
"""
Get the structure of the resource
Args:
options: include option lists in option fields
references: include option lists even for reference fields
stylesheet: the stylesheet to use for transformation
as_json: convert into JSON after transformation
"""
xml = current.xml
# Get the structure of the main resource
root = etree.Element(xml.TAG.root)
main = xml.get_struct(self.prefix, self.name,
alias = self.alias,
parent = root,
meta = meta,
options = options,
references = references,
)
# Include the exposed components
for component in self.components.exposed.values():
prefix = component.prefix
name = component.name
xml.get_struct(prefix, name,
alias = component.alias,
parent = main,
meta = meta,
options = options,
references = references,
)
# Transformation
tree = etree.ElementTree(root)
if stylesheet is not None:
args = {"domain": xml.domain,
"base_url": current.response.s3.base_url,
"prefix": self.prefix,
"name": self.name,
"utcnow": s3_format_datetime(),
}
tree = xml.transform(tree, stylesheet, **args)
if tree is None:
return None
# Return tree if requested
if as_tree:
return tree
# Otherwise string-ify it
if as_json:
return xml.tree2json(tree, pretty_print=True)
else:
return xml.tostring(tree, pretty_print=True) | def export_struct(self,
meta = False,
options = False,
references = False,
stylesheet = None,
as_json = False,
as_tree = False,
):
"""
Get the structure of the resource
Args:
options: include option lists in option fields
references: include option lists even for reference fields
stylesheet: the stylesheet to use for transformation
as_json: convert into JSON after transformation
"""
xml = current.xml
# Get the structure of the main resource
root = etree.Element(xml.TAG.root)
main = xml.get_struct(self.prefix, self.name,
alias = self.alias,
parent = root,
meta = meta,
options = options,
references = references,
)
# Include the exposed components
for component in self.components.exposed.values():
prefix = component.prefix
name = component.name
xml.get_struct(prefix, name,
alias = component.alias,
parent = main,
meta = meta,
options = options,
references = references,
)
# Transformation
tree = etree.ElementTree(root)
if stylesheet is not None:
args = {"domain": xml.domain,
"base_url": current.response.s3.base_url,
"prefix": self.prefix,
"name": self.name,
"utcnow": s3_format_datetime(),
}
tree = xml.transform(tree, stylesheet, **args)
if tree is None:
return None
# Return tree if requested
if as_tree:
return tree
# Otherwise string-ify it
if as_json:
return xml.tree2json(tree, pretty_print=True)
else:
return xml.tostring(tree, pretty_print=True) |
Python | def resolve_selectors(self,
selectors,
skip_components = False,
extra_fields = True,
show = True,
):
"""
Resolve a list of field selectors against this resource
Args:
selectors: the field selectors
skip_components: skip fields in components
extra_fields: automatically add extra_fields of all virtual
fields in this table
show: default for S3ResourceField.show
Returns:
tuple of (fields, joins, left, distinct)
"""
prefix = lambda s: "~.%s" % s \
if "." not in s.split("$", 1)[0] else s
display_fields = set()
add = display_fields.add
# Store field selectors
for item in selectors:
if not item:
continue
elif type(item) is tuple:
item = item[-1]
if isinstance(item, str):
selector = item
elif isinstance(item, S3ResourceField):
selector = item.selector
elif isinstance(item, FS):
selector = item.name
else:
continue
add(prefix(selector))
slist = list(selectors)
# Collect extra fields from virtual tables
if extra_fields:
extra = self.get_config("extra_fields")
if extra:
append = slist.append
for selector in extra:
s = prefix(selector)
if s not in display_fields:
append(s)
joins = {}
left = {}
distinct = False
columns = set()
add_column = columns.add
rfields = []
append = rfields.append
for s in slist:
# Allow to override the field label
if type(s) is tuple:
label, selector = s
else:
label, selector = None, s
# Resolve the selector
if isinstance(selector, str):
selector = prefix(selector)
try:
rfield = S3ResourceField(self, selector, label=label)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, FS):
try:
rfield = selector.resolve(self)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, S3ResourceField):
rfield = selector
else:
continue
# Unresolvable selector?
if rfield.field is None and not rfield.virtual:
continue
# De-duplicate columns
colname = rfield.colname
if colname in columns:
continue
else:
add_column(colname)
# Replace default label
if label is not None:
rfield.label = label
# Skip components
if skip_components:
head = rfield.selector.split("$", 1)[0]
if "." in head and head.split(".")[0] not in ("~", self.alias):
continue
# Resolve the joins
if rfield.distinct:
left.update(rfield._joins)
distinct = True
elif rfield.join:
joins.update(rfield._joins)
rfield.show = show and rfield.selector in display_fields
append(rfield)
return (rfields, joins, left, distinct) | def resolve_selectors(self,
selectors,
skip_components = False,
extra_fields = True,
show = True,
):
"""
Resolve a list of field selectors against this resource
Args:
selectors: the field selectors
skip_components: skip fields in components
extra_fields: automatically add extra_fields of all virtual
fields in this table
show: default for S3ResourceField.show
Returns:
tuple of (fields, joins, left, distinct)
"""
prefix = lambda s: "~.%s" % s \
if "." not in s.split("$", 1)[0] else s
display_fields = set()
add = display_fields.add
# Store field selectors
for item in selectors:
if not item:
continue
elif type(item) is tuple:
item = item[-1]
if isinstance(item, str):
selector = item
elif isinstance(item, S3ResourceField):
selector = item.selector
elif isinstance(item, FS):
selector = item.name
else:
continue
add(prefix(selector))
slist = list(selectors)
# Collect extra fields from virtual tables
if extra_fields:
extra = self.get_config("extra_fields")
if extra:
append = slist.append
for selector in extra:
s = prefix(selector)
if s not in display_fields:
append(s)
joins = {}
left = {}
distinct = False
columns = set()
add_column = columns.add
rfields = []
append = rfields.append
for s in slist:
# Allow to override the field label
if type(s) is tuple:
label, selector = s
else:
label, selector = None, s
# Resolve the selector
if isinstance(selector, str):
selector = prefix(selector)
try:
rfield = S3ResourceField(self, selector, label=label)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, FS):
try:
rfield = selector.resolve(self)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, S3ResourceField):
rfield = selector
else:
continue
# Unresolvable selector?
if rfield.field is None and not rfield.virtual:
continue
# De-duplicate columns
colname = rfield.colname
if colname in columns:
continue
else:
add_column(colname)
# Replace default label
if label is not None:
rfield.label = label
# Skip components
if skip_components:
head = rfield.selector.split("$", 1)[0]
if "." in head and head.split(".")[0] not in ("~", self.alias):
continue
# Resolve the joins
if rfield.distinct:
left.update(rfield._joins)
distinct = True
elif rfield.join:
joins.update(rfield._joins)
rfield.show = show and rfield.selector in display_fields
append(rfield)
return (rfields, joins, left, distinct) |
Python | def _join(self, implicit=False, reverse=False):
"""
Get a join for this component
Args:
implicit: return a subquery with an implicit join rather
than an explicit join
reverse: get the reverse join (joining master to component)
Returns:
a Query if implicit=True, otherwise a list of joins
"""
if self.parent is None:
# This isn't a component
return None
else:
ltable = self.parent.table
rtable = self.table
pkey = self.pkey
fkey = self.fkey
DELETED = current.xml.DELETED
if self.linked:
return self.linked._join(implicit=implicit, reverse=reverse)
elif self.linktable:
linktable = self.linktable
lkey = self.lkey
rkey = self.rkey
lquery = (ltable[pkey] == linktable[lkey])
if DELETED in linktable:
lquery &= (linktable[DELETED] == False)
if self.filter is not None and not reverse:
rquery = (linktable[rkey] == rtable[fkey]) & self.filter
else:
rquery = (linktable[rkey] == rtable[fkey])
if reverse:
join = [linktable.on(rquery), ltable.on(lquery)]
else:
join = [linktable.on(lquery), rtable.on(rquery)]
else:
lquery = (ltable[pkey] == rtable[fkey])
if DELETED in rtable and not reverse:
lquery &= (rtable[DELETED] == False)
if self.filter is not None:
lquery &= self.filter
if reverse:
join = [ltable.on(lquery)]
else:
join = [rtable.on(lquery)]
if implicit:
query = None
for expression in join:
if query is None:
query = expression.second
else:
query &= expression.second
return query
else:
return join | def _join(self, implicit=False, reverse=False):
"""
Get a join for this component
Args:
implicit: return a subquery with an implicit join rather
than an explicit join
reverse: get the reverse join (joining master to component)
Returns:
a Query if implicit=True, otherwise a list of joins
"""
if self.parent is None:
# This isn't a component
return None
else:
ltable = self.parent.table
rtable = self.table
pkey = self.pkey
fkey = self.fkey
DELETED = current.xml.DELETED
if self.linked:
return self.linked._join(implicit=implicit, reverse=reverse)
elif self.linktable:
linktable = self.linktable
lkey = self.lkey
rkey = self.rkey
lquery = (ltable[pkey] == linktable[lkey])
if DELETED in linktable:
lquery &= (linktable[DELETED] == False)
if self.filter is not None and not reverse:
rquery = (linktable[rkey] == rtable[fkey]) & self.filter
else:
rquery = (linktable[rkey] == rtable[fkey])
if reverse:
join = [linktable.on(rquery), ltable.on(lquery)]
else:
join = [linktable.on(lquery), rtable.on(rquery)]
else:
lquery = (ltable[pkey] == rtable[fkey])
if DELETED in rtable and not reverse:
lquery &= (rtable[DELETED] == False)
if self.filter is not None:
lquery &= self.filter
if reverse:
join = [ltable.on(lquery)]
else:
join = [rtable.on(lquery)]
if implicit:
query = None
for expression in join:
if query is None:
query = expression.second
else:
query &= expression.second
return query
else:
return join |
Python | def update_link(self, master, record):
"""
Create a new link in a link table if it doesn't yet exist.
This function is meant to also update links in "embed"
actuation mode once this gets implemented, therefore the
method name "update_link".
Args:
master: the master record
record: the new component record to be linked
"""
if self.parent is None or self.linked is None:
return None
# Find the keys
resource = self.linked
pkey = resource.pkey
lkey = resource.lkey
rkey = resource.rkey
fkey = resource.fkey
if pkey not in master:
return None
_lkey = master[pkey]
if fkey not in record:
return None
_rkey = record[fkey]
if not _lkey or not _rkey:
return None
ltable = self.table
ltn = ltable._tablename
# Create the link if it does not already exist
query = ((ltable[lkey] == _lkey) &
(ltable[rkey] == _rkey))
row = current.db(query).select(ltable._id, limitby=(0, 1)).first()
if not row:
s3db = current.s3db
onaccept = s3db.get_config(ltn, "create_onaccept")
if onaccept is None:
onaccept = s3db.get_config(ltn, "onaccept")
data = {lkey:_lkey, rkey:_rkey}
link_id = ltable.insert(**data)
data[ltable._id.name] = link_id
s3db.update_super(ltable, data)
current.auth.s3_set_record_owner(ltable, data)
if link_id and onaccept:
callback(onaccept, Storage(vars=Storage(data)))
else:
link_id = row[ltable._id.name]
return link_id | def update_link(self, master, record):
"""
Create a new link in a link table if it doesn't yet exist.
This function is meant to also update links in "embed"
actuation mode once this gets implemented, therefore the
method name "update_link".
Args:
master: the master record
record: the new component record to be linked
"""
if self.parent is None or self.linked is None:
return None
# Find the keys
resource = self.linked
pkey = resource.pkey
lkey = resource.lkey
rkey = resource.rkey
fkey = resource.fkey
if pkey not in master:
return None
_lkey = master[pkey]
if fkey not in record:
return None
_rkey = record[fkey]
if not _lkey or not _rkey:
return None
ltable = self.table
ltn = ltable._tablename
# Create the link if it does not already exist
query = ((ltable[lkey] == _lkey) &
(ltable[rkey] == _rkey))
row = current.db(query).select(ltable._id, limitby=(0, 1)).first()
if not row:
s3db = current.s3db
onaccept = s3db.get_config(ltn, "create_onaccept")
if onaccept is None:
onaccept = s3db.get_config(ltn, "onaccept")
data = {lkey:_lkey, rkey:_rkey}
link_id = ltable.insert(**data)
data[ltable._id.name] = link_id
s3db.update_super(ltable, data)
current.auth.s3_set_record_owner(ltable, data)
if link_id and onaccept:
callback(onaccept, Storage(vars=Storage(data)))
else:
link_id = row[ltable._id.name]
return link_id |
Python | def list_fields(self, key="list_fields", id_column=0):
"""
Get the list_fields for this resource
Args:
key: alternative key for the table configuration
id_column: - False to exclude the record ID
- True to include it if it is configured
- 0 to make it the first column regardless
whether it is configured or not
"""
list_fields = self.get_config(key, None)
if not list_fields and key != "list_fields":
list_fields = self.get_config("list_fields", None)
if not list_fields:
list_fields = [f.name for f in self.readable_fields()]
id_field = pkey = self._id.name
# Do not include the parent key for components
if self.parent and not self.link and \
not current.response.s3.component_show_key:
fkey = self.fkey
else:
fkey = None
fields = []
append = fields.append
selectors = set()
seen = selectors.add
for f in list_fields:
selector = f[1] if type(f) is tuple else f
if fkey and selector == fkey:
continue
if selector == pkey and not id_column:
id_field = f
elif selector not in selectors:
seen(selector)
append(f)
if id_column == 0:
fields.insert(0, id_field)
return fields | def list_fields(self, key="list_fields", id_column=0):
"""
Get the list_fields for this resource
Args:
key: alternative key for the table configuration
id_column: - False to exclude the record ID
- True to include it if it is configured
- 0 to make it the first column regardless
whether it is configured or not
"""
list_fields = self.get_config(key, None)
if not list_fields and key != "list_fields":
list_fields = self.get_config("list_fields", None)
if not list_fields:
list_fields = [f.name for f in self.readable_fields()]
id_field = pkey = self._id.name
# Do not include the parent key for components
if self.parent and not self.link and \
not current.response.s3.component_show_key:
fkey = self.fkey
else:
fkey = None
fields = []
append = fields.append
selectors = set()
seen = selectors.add
for f in list_fields:
selector = f[1] if type(f) is tuple else f
if fkey and selector == fkey:
continue
if selector == pkey and not id_column:
id_field = f
elif selector not in selectors:
seen(selector)
append(f)
if id_column == 0:
fields.insert(0, id_field)
return fields |
Python | def select_items(self, job_id, r, **attr):
"""
View a pending import job after trial phase and select items to commit
- provides a table of import items
- pre-selects all items without error
- submitting the selection goes to commit()
Args:
job_id: the import job UUID (or None to read from request vars)
r: the CRUDRequest
attr: controller parameters
"""
T = current.T
if job_id is None:
job_id = r.vars.get("job_id")
if not job_id:
r.error(400, T("No import job specified"))
s3db = current.s3db
s3 = current.response.s3
itable = s3db.s3_import_item
field = itable.element
field.represent = self.element_represent
# Target resource tablename
ttablename = r.resource.tablename
from ..resource import FS
query = (FS("job_id") == job_id) & \
(FS("tablename") == ttablename)
iresource = s3db.resource(itable, filter=query)
# Get a list of the records that have an error of None
query = (itable.job_id == job_id) & \
(itable.tablename == r.resource.tablename)
rows = current.db(query).select(itable.id, itable.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append("%s" % row.id)
representation = r.representation
get_vars = r.get_vars
# Datatable Filter
list_fields = ["id", "element", "error"]
if representation == "aadata":
searchq, orderby, left = iresource.datatable_filter(list_fields, get_vars)
if searchq is not None:
iresource.add_filter(searchq)
else:
orderby, left = None, None
if not orderby:
orderby, left = ~iresource.table.error, None
# Pagination
if representation == "aadata":
start, limit = self._limits(get_vars)
else:
start, limit = None, 0
# How many records per page?
settings = current.deployment_settings
display_length = settings.get_ui_datatables_pagelength()
if not limit:
limit = 2 * display_length
# Generate datatable
dt, totalrows = iresource.datatable(fields = list_fields,
left = left,
start = start,
limit = limit,
orderby = orderby,
list_id = "import-items",
)
dt_bulk_actions = [current.T("Import")]
if representation == "aadata":
# Pagination request (Ajax)
displayrows = totalrows
totalrows = iresource.count()
draw = int(get_vars.draw or 0)
output = dt.json(totalrows,
displayrows,
draw,
dt_bulk_actions = dt_bulk_actions,
)
else:
# Initial HTML response
displayrows = totalrows
# Generate formkey and store in session
import uuid
formkey = uuid.uuid4()
current.session["_formkey[%s/%s]" % (ttablename, job_id)] = str(formkey)
ajax_url = "/%s/%s/%s/import.aadata?job_id=%s" % (r.application,
r.controller,
r.function,
job_id,
)
# Generate the datatable HTML
s3.no_formats = True
items = dt.html(totalrows,
displayrows,
dt_formkey = formkey,
dt_pagination = True,
dt_pageLength = display_length,
dt_base_url = r.url(method="import", vars={"job_id": job_id}),
dt_permalink = None,
dt_ajax_url = ajax_url,
dt_bulk_actions = dt_bulk_actions,
dt_bulk_selected = select_list,
dt_styles = {"dtwarning": error_list},
)
# Append the job_id to the datatable form
job = INPUT(_type = "hidden",
_name = "job_id",
_value = "%s" % job_id,
)
items.append(job)
# Add toggle-button for item details
SHOW = T("Display Details")
HIDE = T("Hide Details")
s3.actions = [{"label": s3_str(SHOW),
"_class": "action-btn toggle-item",
},
]
script = '''$('#import-items').on('click','.toggle-item',function(){b=$(this);$('.import-item-details',b.closest('tr')).toggle().each(function(){b.text($(this).is(':visible')?'%s':'%s')})})'''
s3.jquery_ready.append(script % (HIDE, SHOW))
# View
current.response.view = self._view(r, "list.html")
output = {"title": T("Select records to import"),
"items": items,
}
return output | def select_items(self, job_id, r, **attr):
"""
View a pending import job after trial phase and select items to commit
- provides a table of import items
- pre-selects all items without error
- submitting the selection goes to commit()
Args:
job_id: the import job UUID (or None to read from request vars)
r: the CRUDRequest
attr: controller parameters
"""
T = current.T
if job_id is None:
job_id = r.vars.get("job_id")
if not job_id:
r.error(400, T("No import job specified"))
s3db = current.s3db
s3 = current.response.s3
itable = s3db.s3_import_item
field = itable.element
field.represent = self.element_represent
# Target resource tablename
ttablename = r.resource.tablename
from ..resource import FS
query = (FS("job_id") == job_id) & \
(FS("tablename") == ttablename)
iresource = s3db.resource(itable, filter=query)
# Get a list of the records that have an error of None
query = (itable.job_id == job_id) & \
(itable.tablename == r.resource.tablename)
rows = current.db(query).select(itable.id, itable.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append("%s" % row.id)
representation = r.representation
get_vars = r.get_vars
# Datatable Filter
list_fields = ["id", "element", "error"]
if representation == "aadata":
searchq, orderby, left = iresource.datatable_filter(list_fields, get_vars)
if searchq is not None:
iresource.add_filter(searchq)
else:
orderby, left = None, None
if not orderby:
orderby, left = ~iresource.table.error, None
# Pagination
if representation == "aadata":
start, limit = self._limits(get_vars)
else:
start, limit = None, 0
# How many records per page?
settings = current.deployment_settings
display_length = settings.get_ui_datatables_pagelength()
if not limit:
limit = 2 * display_length
# Generate datatable
dt, totalrows = iresource.datatable(fields = list_fields,
left = left,
start = start,
limit = limit,
orderby = orderby,
list_id = "import-items",
)
dt_bulk_actions = [current.T("Import")]
if representation == "aadata":
# Pagination request (Ajax)
displayrows = totalrows
totalrows = iresource.count()
draw = int(get_vars.draw or 0)
output = dt.json(totalrows,
displayrows,
draw,
dt_bulk_actions = dt_bulk_actions,
)
else:
# Initial HTML response
displayrows = totalrows
# Generate formkey and store in session
import uuid
formkey = uuid.uuid4()
current.session["_formkey[%s/%s]" % (ttablename, job_id)] = str(formkey)
ajax_url = "/%s/%s/%s/import.aadata?job_id=%s" % (r.application,
r.controller,
r.function,
job_id,
)
# Generate the datatable HTML
s3.no_formats = True
items = dt.html(totalrows,
displayrows,
dt_formkey = formkey,
dt_pagination = True,
dt_pageLength = display_length,
dt_base_url = r.url(method="import", vars={"job_id": job_id}),
dt_permalink = None,
dt_ajax_url = ajax_url,
dt_bulk_actions = dt_bulk_actions,
dt_bulk_selected = select_list,
dt_styles = {"dtwarning": error_list},
)
# Append the job_id to the datatable form
job = INPUT(_type = "hidden",
_name = "job_id",
_value = "%s" % job_id,
)
items.append(job)
# Add toggle-button for item details
SHOW = T("Display Details")
HIDE = T("Hide Details")
s3.actions = [{"label": s3_str(SHOW),
"_class": "action-btn toggle-item",
},
]
script = '''$('#import-items').on('click','.toggle-item',function(){b=$(this);$('.import-item-details',b.closest('tr')).toggle().each(function(){b.text($(this).is(':visible')?'%s':'%s')})})'''
s3.jquery_ready.append(script % (HIDE, SHOW))
# View
current.response.view = self._view(r, "list.html")
output = {"title": T("Select records to import"),
"items": items,
}
return output |
Python | def import_from_source(cls,
resource,
source,
fmt = "csv",
stylesheet = None,
extra_data = None,
commit = False,
**args,
):
"""
Import spreadsheet data into a resource
Args:
resource: the target resource
source: the source (file-like object)
fmt: the source file format (in connection with source)
extra_data: extra data to add to source rows (in connection with source)
commit: whether to commit the import immediately (in connection with source)
args: additional stylesheet args
Returns:
import job UUID
"""
result = resource.import_xml(source,
source_type = fmt,
extra_data = extra_data,
commit = commit,
ignore_errors = True,
stylesheet = stylesheet,
**args)
job_id = result.job_id
if not job_id and result.error:
raise ValueError(result.error)
return job_id | def import_from_source(cls,
resource,
source,
fmt = "csv",
stylesheet = None,
extra_data = None,
commit = False,
**args,
):
"""
Import spreadsheet data into a resource
Args:
resource: the target resource
source: the source (file-like object)
fmt: the source file format (in connection with source)
extra_data: extra data to add to source rows (in connection with source)
commit: whether to commit the import immediately (in connection with source)
args: additional stylesheet args
Returns:
import job UUID
"""
result = resource.import_xml(source,
source_type = fmt,
extra_data = extra_data,
commit = commit,
ignore_errors = True,
stylesheet = stylesheet,
**args)
job_id = result.job_id
if not job_id and result.error:
raise ValueError(result.error)
return job_id |
Python | def element_represent(self, value):
"""
Represent the import item XML element as details in the import
item datatable
Args:
value: the XML element (as string)
Returns:
DIV containing a representation of the element
"""
try:
element = etree.fromstring(value)
except (etree.ParseError, etree.XMLSyntaxError):
return DIV(value)
s3db = current.s3db
table = s3db[element.get("name")]
output = DIV()
details = TABLE(_class="import-item-details")
# Field values in main record
header, rows = self.item_details(table, element)
if header is not None:
output.append(header)
# Add component details, if present
components = element.findall("resource")
for component in components:
ctablename = component.get("name")
ctable = s3db.table(ctablename)
if not ctable:
continue
cdetails = self.item_details(ctable, component, prefix=True)[1]
rows.extend(cdetails)
if rows:
details.append(TBODY(rows))
# Add error messages, if present
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH("%s:" % current.T("Errors")),
TD(UL([LI(e) for e in errors])))))
if rows == [] and components == []:
# No field data in the main record, nor components
# => target table containing only references?
refdetail = TABLE(_class = "import-item-details")
references = element.findall("reference")
for reference in references:
resource = reference.get("resource")
tuid = reference.get("tuid")
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return output | def element_represent(self, value):
"""
Represent the import item XML element as details in the import
item datatable
Args:
value: the XML element (as string)
Returns:
DIV containing a representation of the element
"""
try:
element = etree.fromstring(value)
except (etree.ParseError, etree.XMLSyntaxError):
return DIV(value)
s3db = current.s3db
table = s3db[element.get("name")]
output = DIV()
details = TABLE(_class="import-item-details")
# Field values in main record
header, rows = self.item_details(table, element)
if header is not None:
output.append(header)
# Add component details, if present
components = element.findall("resource")
for component in components:
ctablename = component.get("name")
ctable = s3db.table(ctablename)
if not ctable:
continue
cdetails = self.item_details(ctable, component, prefix=True)[1]
rows.extend(cdetails)
if rows:
details.append(TBODY(rows))
# Add error messages, if present
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH("%s:" % current.T("Errors")),
TD(UL([LI(e) for e in errors])))))
if rows == [] and components == []:
# No field data in the main record, nor components
# => target table containing only references?
refdetail = TABLE(_class = "import-item-details")
references = element.findall("reference")
for reference in references:
resource = reference.get("resource")
tuid = reference.get("tuid")
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return output |
Python | def item_details(cls, table, element, prefix=False):
"""
Show details of an import item
Args:
table: the table
element: the S3XML resource-element
prefix: prefix field names with the table name
Returns:
tuple (P(header), [TR(detail), ...])
"""
header = None
first_string = True
header_text = lambda f, v: P(B("%s: " % f), v)
details = []
tablename = table._tablename
for data_element in element.findall("data"):
# Get the field name
fname = data_element.get("field")
# Skip unspecified, non-existent and WKT fields
if not fname or fname not in table.fields or fname == "wkt":
continue
# Get the field and field type
ftype = str(table[fname].type)
# Decode the value
value = data_element.get("value")
if value is None:
value = current.xml.xml_decode(data_element.text)
value = s3_str(value)
# Set main detail (header)
if fname == "name":
header = header_text(fname, value)
first_string = False
elif ftype == "string" and first_string:
header = header_text(fname, value)
first_string = False
elif not header:
header = header_text(fname, value)
# Append detail to details table
label = "%s.%s:" % (tablename, fname) if prefix else "%s:" % fname
details.append(TR(TH(label), TD(value)))
return (header, details) | def item_details(cls, table, element, prefix=False):
"""
Show details of an import item
Args:
table: the table
element: the S3XML resource-element
prefix: prefix field names with the table name
Returns:
tuple (P(header), [TR(detail), ...])
"""
header = None
first_string = True
header_text = lambda f, v: P(B("%s: " % f), v)
details = []
tablename = table._tablename
for data_element in element.findall("data"):
# Get the field name
fname = data_element.get("field")
# Skip unspecified, non-existent and WKT fields
if not fname or fname not in table.fields or fname == "wkt":
continue
# Get the field and field type
ftype = str(table[fname].type)
# Decode the value
value = data_element.get("value")
if value is None:
value = current.xml.xml_decode(data_element.text)
value = s3_str(value)
# Set main detail (header)
if fname == "name":
header = header_text(fname, value)
first_string = False
elif ftype == "string" and first_string:
header = header_text(fname, value)
first_string = False
elif not header:
header = header_text(fname, value)
# Append detail to details table
label = "%s.%s:" % (tablename, fname) if prefix else "%s:" % fname
details.append(TR(TH(label), TD(value)))
return (header, details) |
Python | def encode(resource, **attr):
"""
Method to encode a resource in the target format,
to be implemented by the subclass (mandatory)
Args:
resource: the CRUDResource
Returns:
a handle to the output
"""
raise NotImplementedError | def encode(resource, **attr):
"""
Method to encode a resource in the target format,
to be implemented by the subclass (mandatory)
Args:
resource: the CRUDResource
Returns:
a handle to the output
"""
raise NotImplementedError |
Python | def decode(resource, source, **attr):
"""
Method to decode a source into an ElementTree,
to be implemented by the subclass
Args:
resource: the CRUDResource
source: the source
Returns:
an ElementTree
"""
return current.xml.tree() | def decode(resource, source, **attr):
"""
Method to decode a source into an ElementTree,
to be implemented by the subclass
Args:
resource: the CRUDResource
source: the source
Returns:
an ElementTree
"""
return current.xml.tree() |
Python | def parse_source(cls,
tablename,
source,
source_type = "xml",
stylesheet = None,
extra_data = None,
**args):
"""
Parse a data source for import, and convert it into a S3XML
element tree.
Args:
tablename: the name of the target table
source: the data source; accepts a single source, a list of
sources or a list of tuples (name, source); each
source must be either an ElementTree or a file-like
object
str source_type: the source type (xml|json|csv|xls|xlsx)
stylesheet: the transformation stylesheet
extra_data: for CSV imports, dict of extra columns to add
to each row
args: parameters to pass to the transformation stylesheet
"""
xml = current.xml
tree = None
if not isinstance(source, (list, tuple)):
source = [source]
for item in source:
if isinstance(item, (list, tuple)):
name, s = item[:2]
else:
name, s = None, item
if isinstance(s, etree._ElementTree):
t = s
elif source_type == "json":
if isinstance(s, str):
t = xml.json2tree(StringIO(s))
else:
t = xml.json2tree(s)
elif source_type == "csv":
t = xml.csv2tree(s, resourcename=name, extra_data=extra_data)
elif source_type == "xls":
t = xml.xls2tree(s, resourcename=name, extra_data=extra_data)
elif source_type == "xlsx":
t = xml.xlsx2tree(s, resourcename=name, extra_data=extra_data)
else:
t = xml.parse(s)
if not t:
if xml.error:
raise SyntaxError(xml.error)
else:
raise SyntaxError("Invalid source")
if stylesheet is not None:
prefix, name = tablename.split("_", 1)
args.update(domain = xml.domain,
base_url = current.response.s3.base_url,
prefix = prefix,
name = name,
utcnow = s3_format_datetime(),
)
t = xml.transform(t, stylesheet, **args)
if not t:
raise SyntaxError(xml.error)
if not tree:
tree = t.getroot()
else:
tree.extend(list(t.getroot()))
return tree | def parse_source(cls,
tablename,
source,
source_type = "xml",
stylesheet = None,
extra_data = None,
**args):
"""
Parse a data source for import, and convert it into a S3XML
element tree.
Args:
tablename: the name of the target table
source: the data source; accepts a single source, a list of
sources or a list of tuples (name, source); each
source must be either an ElementTree or a file-like
object
str source_type: the source type (xml|json|csv|xls|xlsx)
stylesheet: the transformation stylesheet
extra_data: for CSV imports, dict of extra columns to add
to each row
args: parameters to pass to the transformation stylesheet
"""
xml = current.xml
tree = None
if not isinstance(source, (list, tuple)):
source = [source]
for item in source:
if isinstance(item, (list, tuple)):
name, s = item[:2]
else:
name, s = None, item
if isinstance(s, etree._ElementTree):
t = s
elif source_type == "json":
if isinstance(s, str):
t = xml.json2tree(StringIO(s))
else:
t = xml.json2tree(s)
elif source_type == "csv":
t = xml.csv2tree(s, resourcename=name, extra_data=extra_data)
elif source_type == "xls":
t = xml.xls2tree(s, resourcename=name, extra_data=extra_data)
elif source_type == "xlsx":
t = xml.xlsx2tree(s, resourcename=name, extra_data=extra_data)
else:
t = xml.parse(s)
if not t:
if xml.error:
raise SyntaxError(xml.error)
else:
raise SyntaxError("Invalid source")
if stylesheet is not None:
prefix, name = tablename.split("_", 1)
args.update(domain = xml.domain,
base_url = current.response.s3.base_url,
prefix = prefix,
name = name,
utcnow = s3_format_datetime(),
)
t = xml.transform(t, stylesheet, **args)
if not t:
raise SyntaxError(xml.error)
if not tree:
tree = t.getroot()
else:
tree.extend(list(t.getroot()))
return tree |
Python | def import_tree(cls,
tablename,
tree,
files = None,
record_id = None,
components = None,
commit = True,
ignore_errors = False,
job_id = None,
select_items = None,
strategy = None,
sync_policy = None,
):
"""
Import data from an S3XML element tree.
Args:
tablename: the name of the target table
tree: the S3XML element tree (ElementTree)
files: file attachments referenced by the tree (dict)
record_id: the target record ID
list components: list of importable components
commit: commit the import job, if False, the import job
will be rolled back and stored for committing at
a later time
ignore_errors: ignore any errors, import what is possible
job_id: the job UID, to restore and commit a previously
stored import job
list select_items: only restore these items from the job
(list of import item record IDs)
strategy: list of allowed import methods
sync_policy: the synchronization policy (SyncPolicy)
"""
db = current.db
s3db = current.s3db
s3 = current.response.s3
table = s3db.table(tablename)
if not table or "id" not in table.fields:
return ImportResult(False, current.ERROR.BAD_RESOURCE)
if tree is not None:
# Run import_prep callback
import_prep = s3.import_prep
if import_prep:
if not isinstance(tree, etree._ElementTree):
tree = etree.ElementTree(tree)
callback(import_prep, tree, tablename=tablename)
# Select matching elements from tree
elements = cls.matching_elements(tree, tablename, record_id=record_id)
if not elements:
# Nothing to import
# - this is only an error if an update of a specific record
# was expected
error = current.ERROR.NO_MATCH if record_id else None
return ImportResult(not record_id, error)
# Create import job
import_job = ImportJob(table,
tree = tree,
files = files,
strategy = strategy,
sync_policy = sync_policy,
)
# Add import items for matching elements
error = None
s3.bulk = True
add_item = import_job.add_item
for element in elements:
success = add_item(element = element,
components = components,
)
if not success:
error = import_job.error
if error and not ignore_errors:
s3.bulk = False
return ImportResult(False, error, job=import_job)
elif not commit:
raise ValueError("Element tree required for trial import")
elif job_id is not None:
# Re-instate the stored import job
try:
import_job = ImportJob(table,
job_id = job_id,
strategy = strategy,
sync_policy = sync_policy,
)
except SyntaxError:
return ImportResult(False, current.ERROR.BAD_SOURCE)
# Select items for target table
item_table = s3db.s3_import_item
query = (item_table.job_id == job_id)
if select_items:
# Limit to selected items for the resource table
query &= (item_table.tablename != tablename) | \
(item_table.id.belongs(select_items))
items = db(query).select()
# Restore the items and references
s3.bulk = True
load_item = import_job.load_item
error = None
for item in items:
success = load_item(item)
if not success:
error = import_job.error
import_job.restore_references()
if error and not ignore_errors:
s3.bulk = False
return ImportResult(False, error)
# Run import_prep callback
import_prep = s3.import_prep
if import_prep:
tree = import_job.get_tree()
callback(import_prep, tree, tablename=tablename)
else:
raise ValueError("Element tree or job ID required")
# Commit the import job
s3.bulk = True
auth = current.auth
auth.rollback = not commit
success = import_job.commit(ignore_errors=ignore_errors)
auth.rollback = False
s3.bulk = False
# Rollback on failure or if so requested
if not success or not commit:
db.rollback()
# Prepare result
error = import_job.error
if error:
if ignore_errors:
error = "%s - invalid items ignored" % import_job.error
elif not success:
raise RuntimeError("Import failed without error message")
result = ImportResult(error is None or ignore_errors,
error = error,
job = import_job,
)
if not commit:
# Save the job
import_job.store()
else:
# Delete the import job when committed
import_job.delete()
result.job_id = None
return result | def import_tree(cls,
tablename,
tree,
files = None,
record_id = None,
components = None,
commit = True,
ignore_errors = False,
job_id = None,
select_items = None,
strategy = None,
sync_policy = None,
):
"""
Import data from an S3XML element tree.
Args:
tablename: the name of the target table
tree: the S3XML element tree (ElementTree)
files: file attachments referenced by the tree (dict)
record_id: the target record ID
list components: list of importable components
commit: commit the import job, if False, the import job
will be rolled back and stored for committing at
a later time
ignore_errors: ignore any errors, import what is possible
job_id: the job UID, to restore and commit a previously
stored import job
list select_items: only restore these items from the job
(list of import item record IDs)
strategy: list of allowed import methods
sync_policy: the synchronization policy (SyncPolicy)
"""
db = current.db
s3db = current.s3db
s3 = current.response.s3
table = s3db.table(tablename)
if not table or "id" not in table.fields:
return ImportResult(False, current.ERROR.BAD_RESOURCE)
if tree is not None:
# Run import_prep callback
import_prep = s3.import_prep
if import_prep:
if not isinstance(tree, etree._ElementTree):
tree = etree.ElementTree(tree)
callback(import_prep, tree, tablename=tablename)
# Select matching elements from tree
elements = cls.matching_elements(tree, tablename, record_id=record_id)
if not elements:
# Nothing to import
# - this is only an error if an update of a specific record
# was expected
error = current.ERROR.NO_MATCH if record_id else None
return ImportResult(not record_id, error)
# Create import job
import_job = ImportJob(table,
tree = tree,
files = files,
strategy = strategy,
sync_policy = sync_policy,
)
# Add import items for matching elements
error = None
s3.bulk = True
add_item = import_job.add_item
for element in elements:
success = add_item(element = element,
components = components,
)
if not success:
error = import_job.error
if error and not ignore_errors:
s3.bulk = False
return ImportResult(False, error, job=import_job)
elif not commit:
raise ValueError("Element tree required for trial import")
elif job_id is not None:
# Re-instate the stored import job
try:
import_job = ImportJob(table,
job_id = job_id,
strategy = strategy,
sync_policy = sync_policy,
)
except SyntaxError:
return ImportResult(False, current.ERROR.BAD_SOURCE)
# Select items for target table
item_table = s3db.s3_import_item
query = (item_table.job_id == job_id)
if select_items:
# Limit to selected items for the resource table
query &= (item_table.tablename != tablename) | \
(item_table.id.belongs(select_items))
items = db(query).select()
# Restore the items and references
s3.bulk = True
load_item = import_job.load_item
error = None
for item in items:
success = load_item(item)
if not success:
error = import_job.error
import_job.restore_references()
if error and not ignore_errors:
s3.bulk = False
return ImportResult(False, error)
# Run import_prep callback
import_prep = s3.import_prep
if import_prep:
tree = import_job.get_tree()
callback(import_prep, tree, tablename=tablename)
else:
raise ValueError("Element tree or job ID required")
# Commit the import job
s3.bulk = True
auth = current.auth
auth.rollback = not commit
success = import_job.commit(ignore_errors=ignore_errors)
auth.rollback = False
s3.bulk = False
# Rollback on failure or if so requested
if not success or not commit:
db.rollback()
# Prepare result
error = import_job.error
if error:
if ignore_errors:
error = "%s - invalid items ignored" % import_job.error
elif not success:
raise RuntimeError("Import failed without error message")
result = ImportResult(error is None or ignore_errors,
error = error,
job = import_job,
)
if not commit:
# Save the job
import_job.store()
else:
# Delete the import job when committed
import_job.delete()
result.job_id = None
return result |
Python | def matching_elements(tree, tablename, record_id=None):
"""
Find elements in the source tree that belong to the target
record, or the target table if no record is specified.
Args:
tree: the source tree (ElementTree)
tablename: the name of the target table
record_id: the target record ID
Returns:
list of matching elements, or None
"""
xml = current.xml
db = current.db
# Select the elements for this table
elements = xml.select_resources(tree, tablename)
if not elements:
return None
# Find matching elements, if a target record ID is given
UID = xml.UID
table = current.s3db[tablename]
if record_id and UID in table:
if not isinstance(record_id, (tuple, list)):
query = (table._id == record_id)
else:
query = (table._id.belongs(record_id))
originals = db(query).select(table[UID])
uids = [row[UID] for row in originals]
matches = []
import_uid = xml.import_uid
append = matches.append
for element in elements:
element_uid = import_uid(element.get(UID, None))
if not element_uid:
continue
if element_uid in uids:
append(element)
if not matches:
first = elements[0]
if len(elements) and not first.get(UID, None):
first.set(UID, uids[0])
matches = [first]
elements = matches
return elements if elements else None | def matching_elements(tree, tablename, record_id=None):
"""
Find elements in the source tree that belong to the target
record, or the target table if no record is specified.
Args:
tree: the source tree (ElementTree)
tablename: the name of the target table
record_id: the target record ID
Returns:
list of matching elements, or None
"""
xml = current.xml
db = current.db
# Select the elements for this table
elements = xml.select_resources(tree, tablename)
if not elements:
return None
# Find matching elements, if a target record ID is given
UID = xml.UID
table = current.s3db[tablename]
if record_id and UID in table:
if not isinstance(record_id, (tuple, list)):
query = (table._id == record_id)
else:
query = (table._id.belongs(record_id))
originals = db(query).select(table[UID])
uids = [row[UID] for row in originals]
matches = []
import_uid = xml.import_uid
append = matches.append
for element in elements:
element_uid = import_uid(element.get(UID, None))
if not element_uid:
continue
if element_uid in uids:
append(element)
if not matches:
first = elements[0]
if len(elements) and not first.get(UID, None):
first.set(UID, uids[0])
matches = [first]
elements = matches
return elements if elements else None |
Python | def json_message(self):
"""
Generate a JSON message from this result
Returns:
the JSON message (str)
"""
xml = current.xml
if self.error_tree is not None:
tree = xml.tree2json(self.error_tree)
else:
tree = None
# Import Summary Info
info = {"records": self.count,
}
if self.created:
info["created"] = list(set(self.created))
if self.updated:
info["updated"] = list(set(self.updated))
if self.deleted:
info["deleted"] = list(set(self.deleted))
if self.success:
msg = xml.json_message(message = self.error,
tree = tree,
**info)
else:
msg = xml.json_message(False, 400,
message = self.error,
tree = tree,
)
return msg | def json_message(self):
"""
Generate a JSON message from this result
Returns:
the JSON message (str)
"""
xml = current.xml
if self.error_tree is not None:
tree = xml.tree2json(self.error_tree)
else:
tree = None
# Import Summary Info
info = {"records": self.count,
}
if self.created:
info["created"] = list(set(self.created))
if self.updated:
info["updated"] = list(set(self.updated))
if self.deleted:
info["deleted"] = list(set(self.deleted))
if self.success:
msg = xml.json_message(message = self.error,
tree = tree,
**info)
else:
msg = xml.json_message(False, 400,
message = self.error,
tree = tree,
)
return msg |
Python | def uidmap(self):
"""
Map uuid/tuid => element, for faster reference lookups
"""
uidmap = self._uidmap
tree = self.tree
if uidmap is None and tree is not None:
root = tree if isinstance(tree, etree._Element) else tree.getroot()
xml = current.xml
UUID = xml.UID
TUID = xml.ATTRIBUTE.tuid
NAME = xml.ATTRIBUTE.name
elements = root.xpath(".//%s" % xml.TAG.resource)
self._uidmap = uidmap = {UUID: {},
TUID: {},
}
uuidmap = uidmap[UUID]
tuidmap = uidmap[TUID]
for element in elements:
name = element.get(NAME)
r_uuid = element.get(UUID)
if r_uuid and r_uuid not in uuidmap:
uuidmap[(name, r_uuid)] = element
r_tuid = element.get(TUID)
if r_tuid and r_tuid not in tuidmap:
tuidmap[(name, r_tuid)] = element
return uidmap | def uidmap(self):
"""
Map uuid/tuid => element, for faster reference lookups
"""
uidmap = self._uidmap
tree = self.tree
if uidmap is None and tree is not None:
root = tree if isinstance(tree, etree._Element) else tree.getroot()
xml = current.xml
UUID = xml.UID
TUID = xml.ATTRIBUTE.tuid
NAME = xml.ATTRIBUTE.name
elements = root.xpath(".//%s" % xml.TAG.resource)
self._uidmap = uidmap = {UUID: {},
TUID: {},
}
uuidmap = uidmap[UUID]
tuidmap = uidmap[TUID]
for element in elements:
name = element.get(NAME)
r_uuid = element.get(UUID)
if r_uuid and r_uuid not in uuidmap:
uuidmap[(name, r_uuid)] = element
r_tuid = element.get(TUID)
if r_tuid and r_tuid not in tuidmap:
tuidmap[(name, r_tuid)] = element
return uidmap |
Python | def schedule(reference):
""" Schedule a referenced item for implicit import """
entry = reference.entry
if entry and entry.element is not None and not entry.item_id:
item_id = add_item(element=entry.element)
if item_id:
entry.item_id = item_id | def schedule(reference):
""" Schedule a referenced item for implicit import """
entry = reference.entry
if entry and entry.element is not None and not entry.item_id:
item_id = add_item(element=entry.element)
if item_id:
entry.item_id = item_id |
Python | def lookahead(self,
element,
table = None,
fields = None,
tree = None,
directory = None,
lookup = None):
"""
Find referenced elements in the tree
Args:
element: the element
table: the DB table
fields: the FK fields in the table
tree: the import tree
directory: a dictionary to lookup elements in the tree
(will be filled in by this function)
"""
db = current.db
s3db = current.s3db
xml = current.xml
import_uid = xml.import_uid
ATTRIBUTE = xml.ATTRIBUTE
TAG = xml.TAG
UID = xml.UID
reference_list = []
rlappend = reference_list.append
root = None
if tree is not None:
root = tree if isinstance(tree, etree._Element) else tree.getroot()
uidmap = self.uidmap
references = [lookup] if lookup else element.findall("reference")
for reference in references:
if lookup:
field = None
if element is None:
tablename, attr, uid = reference
ktable = s3db.table(tablename)
if ktable is None:
continue
uids = [import_uid(uid)] if attr == "uuid" else [uid]
else:
tablename = element.get(ATTRIBUTE.name, None)
ktable, uid = reference
attr = UID
uids = [import_uid(uid)]
else:
field = reference.get(ATTRIBUTE.field, None)
# Ignore references without valid field-attribute
if not field or field not in fields or field not in table:
continue
# Find the key table
ktablename, _, multiple = s3_get_foreign_key(table[field])
if not ktablename:
continue
try:
ktable = s3db[ktablename]
except AttributeError:
continue
tablename = reference.get(ATTRIBUTE.resource, None)
# Ignore references to tables without UID field:
if UID not in ktable.fields:
continue
# Fall back to key table name if tablename is not specified:
if not tablename:
tablename = ktablename
# Super-entity references must use the super-key:
if tablename != ktablename:
field = (ktable._id.name, field)
# Ignore direct references to super-entities:
if tablename == ktablename and ktable._id.name != "id":
continue
# Get the foreign key
uids = reference.get(UID, None)
attr = UID
if not uids:
uids = reference.get(ATTRIBUTE.tuid, None)
attr = ATTRIBUTE.tuid
if uids and multiple:
uids = json.loads(uids)
elif uids:
uids = [uids]
# Find the elements and map to DB records
relements = []
# Create a UID<->ID map
id_map = {}
if attr == UID and uids:
if len(uids) == 1:
uid = import_uid(uids[0])
query = (ktable[UID] == uid)
record = db(query).select(ktable.id,
cacheable = True,
limitby = (0, 1),
).first()
if record:
id_map[uid] = record.id
else:
uids_ = [import_uid(uid) for uid in uids]
query = (ktable[UID].belongs(uids_))
records = db(query).select(ktable.id,
ktable[UID],
limitby = (0, len(uids_)),
)
for r in records:
id_map[r[UID]] = r.id
if not uids:
# Anonymous reference: <resource> inside the element
expr = './/%s[@%s="%s"]' % (TAG.resource,
ATTRIBUTE.name,
tablename,
)
relements = reference.xpath(expr)
if relements and not multiple:
relements = relements[:1]
elif root is not None:
for uid in uids:
entry = None
# Entry already in directory?
if directory is not None:
entry = directory.get((tablename, attr, uid))
if not entry:
e = uidmap[attr].get((tablename, uid)) if uidmap else None
if e is not None:
# Element in the source => append to relements
relements.append(e)
else:
# No element found, see if original record exists
_uid = import_uid(uid)
if _uid and _uid in id_map:
_id = id_map[_uid]
entry = Storage(tablename = tablename,
element = None,
uid = uid,
id = _id,
item_id = None,
)
rlappend(Storage(field = field,
element = reference,
entry = entry,
))
else:
continue
else:
rlappend(Storage(field = field,
element = reference,
entry = entry,
))
# Create entries for all newly found elements
for relement in relements:
uid = relement.get(attr, None)
if attr == UID:
_uid = import_uid(uid)
_id = _uid and id_map and id_map.get(_uid, None) or None
else:
_uid = None
_id = None
entry = Storage(tablename = tablename,
element = relement,
uid = uid,
id = _id,
item_id = None,
)
# Add entry to directory
if uid and directory is not None:
directory[(tablename, attr, uid)] = entry
# Append the entry to the reference list
rlappend(Storage(field = field,
element = reference,
entry = entry,
))
return reference_list | def lookahead(self,
element,
table = None,
fields = None,
tree = None,
directory = None,
lookup = None):
"""
Find referenced elements in the tree
Args:
element: the element
table: the DB table
fields: the FK fields in the table
tree: the import tree
directory: a dictionary to lookup elements in the tree
(will be filled in by this function)
"""
db = current.db
s3db = current.s3db
xml = current.xml
import_uid = xml.import_uid
ATTRIBUTE = xml.ATTRIBUTE
TAG = xml.TAG
UID = xml.UID
reference_list = []
rlappend = reference_list.append
root = None
if tree is not None:
root = tree if isinstance(tree, etree._Element) else tree.getroot()
uidmap = self.uidmap
references = [lookup] if lookup else element.findall("reference")
for reference in references:
if lookup:
field = None
if element is None:
tablename, attr, uid = reference
ktable = s3db.table(tablename)
if ktable is None:
continue
uids = [import_uid(uid)] if attr == "uuid" else [uid]
else:
tablename = element.get(ATTRIBUTE.name, None)
ktable, uid = reference
attr = UID
uids = [import_uid(uid)]
else:
field = reference.get(ATTRIBUTE.field, None)
# Ignore references without valid field-attribute
if not field or field not in fields or field not in table:
continue
# Find the key table
ktablename, _, multiple = s3_get_foreign_key(table[field])
if not ktablename:
continue
try:
ktable = s3db[ktablename]
except AttributeError:
continue
tablename = reference.get(ATTRIBUTE.resource, None)
# Ignore references to tables without UID field:
if UID not in ktable.fields:
continue
# Fall back to key table name if tablename is not specified:
if not tablename:
tablename = ktablename
# Super-entity references must use the super-key:
if tablename != ktablename:
field = (ktable._id.name, field)
# Ignore direct references to super-entities:
if tablename == ktablename and ktable._id.name != "id":
continue
# Get the foreign key
uids = reference.get(UID, None)
attr = UID
if not uids:
uids = reference.get(ATTRIBUTE.tuid, None)
attr = ATTRIBUTE.tuid
if uids and multiple:
uids = json.loads(uids)
elif uids:
uids = [uids]
# Find the elements and map to DB records
relements = []
# Create a UID<->ID map
id_map = {}
if attr == UID and uids:
if len(uids) == 1:
uid = import_uid(uids[0])
query = (ktable[UID] == uid)
record = db(query).select(ktable.id,
cacheable = True,
limitby = (0, 1),
).first()
if record:
id_map[uid] = record.id
else:
uids_ = [import_uid(uid) for uid in uids]
query = (ktable[UID].belongs(uids_))
records = db(query).select(ktable.id,
ktable[UID],
limitby = (0, len(uids_)),
)
for r in records:
id_map[r[UID]] = r.id
if not uids:
# Anonymous reference: <resource> inside the element
expr = './/%s[@%s="%s"]' % (TAG.resource,
ATTRIBUTE.name,
tablename,
)
relements = reference.xpath(expr)
if relements and not multiple:
relements = relements[:1]
elif root is not None:
for uid in uids:
entry = None
# Entry already in directory?
if directory is not None:
entry = directory.get((tablename, attr, uid))
if not entry:
e = uidmap[attr].get((tablename, uid)) if uidmap else None
if e is not None:
# Element in the source => append to relements
relements.append(e)
else:
# No element found, see if original record exists
_uid = import_uid(uid)
if _uid and _uid in id_map:
_id = id_map[_uid]
entry = Storage(tablename = tablename,
element = None,
uid = uid,
id = _id,
item_id = None,
)
rlappend(Storage(field = field,
element = reference,
entry = entry,
))
else:
continue
else:
rlappend(Storage(field = field,
element = reference,
entry = entry,
))
# Create entries for all newly found elements
for relement in relements:
uid = relement.get(attr, None)
if attr == UID:
_uid = import_uid(uid)
_id = _uid and id_map and id_map.get(_uid, None) or None
else:
_uid = None
_id = None
entry = Storage(tablename = tablename,
element = relement,
uid = uid,
id = _id,
item_id = None,
)
# Add entry to directory
if uid and directory is not None:
directory[(tablename, attr, uid)] = entry
# Append the entry to the reference list
rlappend(Storage(field = field,
element = reference,
entry = entry,
))
return reference_list |
Python | def load_item(self, row):
"""
Load an item from the item table (counterpart to add_item
when restoring a job from the database)
"""
item = ImportItem(self)
if not item.restore(row):
self.error = item.error
if item.load_parent is None:
self.error_tree.append(deepcopy(item.element))
# Update lookup lists
item_id = item.item_id
self.items[item_id] = item
return item_id | def load_item(self, row):
"""
Load an item from the item table (counterpart to add_item
when restoring a job from the database)
"""
item = ImportItem(self)
if not item.restore(row):
self.error = item.error
if item.load_parent is None:
self.error_tree.append(deepcopy(item.element))
# Update lookup lists
item_id = item.item_id
self.items[item_id] = item
return item_id |
Python | def commit(self, ignore_errors=False, log_items=None):
"""
Commit the import job to the DB
Args:
ignore_errors: skip any items with errors
(does still report the errors)
log_items: callback function to log import items
before committing them
"""
ATTRIBUTE = current.xml.ATTRIBUTE
METHOD = ImportItem.METHOD
# Resolve references
import_list = []
for item_id in self.items:
self.resolve(item_id, import_list)
if item_id not in import_list:
import_list.append(item_id)
# Commit the items
items = self.items
count = 0
errors = 0
mtime = None
created = []
cappend = created.append
updated = []
deleted = []
tablename = self.table._tablename
self.log = log_items
failed = False
for item_id in import_list:
item = items[item_id]
error = None
if item.accepted is not False:
logged = False
success = item.commit(ignore_errors=ignore_errors)
else:
# Field validation failed
logged = True
success = ignore_errors
if not success:
failed = True
error = item.error
if error:
current.log.error(error)
self.error = error
element = item.element
if element is not None:
if not element.get(ATTRIBUTE.error, False):
element.set(ATTRIBUTE.error, s3_str(error))
if not logged:
self.error_tree.append(deepcopy(element))
if item.tablename == tablename:
errors += 1
elif item.tablename == tablename:
count += 1
if mtime is None or item.mtime > mtime:
mtime = item.mtime
if item.id:
if item.method == METHOD.CREATE:
cappend(item.id)
elif item.method == METHOD.UPDATE:
updated.append(item.id)
elif item.method in (METHOD.MERGE, METHOD.DELETE):
deleted.append(item.id)
if failed:
return False
self.count = count
self.errors = errors
self.mtime = mtime
self.created = created
self.updated = updated
self.deleted = deleted
return True | def commit(self, ignore_errors=False, log_items=None):
"""
Commit the import job to the DB
Args:
ignore_errors: skip any items with errors
(does still report the errors)
log_items: callback function to log import items
before committing them
"""
ATTRIBUTE = current.xml.ATTRIBUTE
METHOD = ImportItem.METHOD
# Resolve references
import_list = []
for item_id in self.items:
self.resolve(item_id, import_list)
if item_id not in import_list:
import_list.append(item_id)
# Commit the items
items = self.items
count = 0
errors = 0
mtime = None
created = []
cappend = created.append
updated = []
deleted = []
tablename = self.table._tablename
self.log = log_items
failed = False
for item_id in import_list:
item = items[item_id]
error = None
if item.accepted is not False:
logged = False
success = item.commit(ignore_errors=ignore_errors)
else:
# Field validation failed
logged = True
success = ignore_errors
if not success:
failed = True
error = item.error
if error:
current.log.error(error)
self.error = error
element = item.element
if element is not None:
if not element.get(ATTRIBUTE.error, False):
element.set(ATTRIBUTE.error, s3_str(error))
if not logged:
self.error_tree.append(deepcopy(element))
if item.tablename == tablename:
errors += 1
elif item.tablename == tablename:
count += 1
if mtime is None or item.mtime > mtime:
mtime = item.mtime
if item.id:
if item.method == METHOD.CREATE:
cappend(item.id)
elif item.method == METHOD.UPDATE:
updated.append(item.id)
elif item.method in (METHOD.MERGE, METHOD.DELETE):
deleted.append(item.id)
if failed:
return False
self.count = count
self.errors = errors
self.mtime = mtime
self.created = created
self.updated = updated
self.deleted = deleted
return True |
Python | def store(self):
"""
Store this job and all its items in the job table
"""
db = current.db
s3db = current.s3db
jobtable = s3db.s3_import_job
query = (jobtable.job_id == self.job_id)
row = db(query).select(jobtable.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job_id)
try:
tablename = self.table._tablename
except AttributeError:
pass
else:
record.update(tablename=tablename)
for item in self.items.values():
item.store(item_table=s3db.s3_import_item)
if record_id:
db(jobtable.id == record_id).update(**record)
else:
record_id = jobtable.insert(**record)
return record_id | def store(self):
"""
Store this job and all its items in the job table
"""
db = current.db
s3db = current.s3db
jobtable = s3db.s3_import_job
query = (jobtable.job_id == self.job_id)
row = db(query).select(jobtable.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job_id)
try:
tablename = self.table._tablename
except AttributeError:
pass
else:
record.update(tablename=tablename)
for item in self.items.values():
item.store(item_table=s3db.s3_import_item)
if record_id:
db(jobtable.id == record_id).update(**record)
else:
record_id = jobtable.insert(**record)
return record_id |
Python | def delete(self):
"""
Delete this job and all its items from the job table
"""
db = current.db
s3db = current.s3db
job_id = self.job_id
item_table = s3db.s3_import_item
db(item_table.job_id == job_id).delete()
job_table = s3db.s3_import_job
db(job_table.job_id == job_id).delete() | def delete(self):
"""
Delete this job and all its items from the job table
"""
db = current.db
s3db = current.s3db
job_id = self.job_id
item_table = s3db.s3_import_item
db(item_table.job_id == job_id).delete()
job_table = s3db.s3_import_job
db(job_table.job_id == job_id).delete() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.