repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mchels/FolderBrowser | plotcontrols.py | 1 | 6028 | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QSizePolicy
class PlotControls(QtWidgets.QWidget):
"""
Control bar for controlling how plots are shown.
Parameters
----------
cmap_names : list
List of colormap names to show in the colormap dropdown menu.
plot_2D_types : list
List of plot_2D_type names.
"""
def __init__(self, cmap_names, plot_2D_types):
super().__init__()
self.layout = QtWidgets.QHBoxLayout()
self.num_col_boxes = 3
self.num_lim_boxes = 3
self.cmap_names = cmap_names
self.plot_2D_types = plot_2D_types
self.init_col_sel_boxes()
self.init_cmap_sel()
self.init_plot_2D_type_sel()
self.init_lim_boxes()
self.init_aspect_box()
self.setLayout(self.layout)
def reset_col_boxes(self, array_of_text_items):
"""
Reset column selector boxes.
"""
assert len(array_of_text_items) == self.num_col_boxes
for i, box in enumerate(self.col_boxes):
box.list_of_text_items = array_of_text_items[i]
prev_text = box.currentText()
box.clear()
box.addItems(array_of_text_items[i])
idx = box.findText(prev_text)
box.setCurrentIndex(idx)
min_width = len(max(box.list_of_text_items, key=len)) * 8
box.view().setMinimumWidth(min_width)
# All indices must be set in the loop above before we can start
# assigning lowest unoccupied texts. Otherwise we don't know which
# texts are unoccupied.
for box in self.col_boxes:
if box.currentIndex() == -1:
self.select_lowest_unoccupied(box)
def init_col_sel_boxes(self):
"""
Initialize column selector boxes.
"""
self.col_boxes = [None] * self.num_col_boxes
for i in range(self.num_col_boxes):
box = QtWidgets.QComboBox()
box.setMaxVisibleItems(80)
policy_horiz = QSizePolicy.MinimumExpanding
policy_vert = QSizePolicy.Maximum
box.setSizePolicy(policy_horiz, policy_vert)
box.setMinimumWidth(40)
self.layout.addWidget(box)
self.col_boxes[i] = box
def init_cmap_sel(self):
"""
Initialize colormap selector.
"""
cmap_sel = QtWidgets.QComboBox()
cmap_sel.addItems(self.cmap_names)
policy_horiz = QSizePolicy.MinimumExpanding
policy_vert = QSizePolicy.Maximum
cmap_sel.setSizePolicy(policy_horiz, policy_vert)
cmap_sel.setMinimumWidth(40)
min_width = len(max(self.cmap_names, key=len)) * 8
cmap_sel.view().setMinimumWidth(min_width)
self.layout.addWidget(cmap_sel)
self.cmap_sel = cmap_sel
def init_plot_2D_type_sel(self):
plot_2D_type_sel = QtWidgets.QComboBox()
plot_2D_type_sel.addItems(self.plot_2D_types)
policy_horiz = QSizePolicy.MinimumExpanding
policy_vert = QSizePolicy.Maximum
plot_2D_type_sel.setSizePolicy(policy_horiz, policy_vert)
plot_2D_type_sel.setMinimumWidth(40)
min_width = len(max(self.plot_2D_types, key=len)) * 8
plot_2D_type_sel.view().setMinimumWidth(min_width)
self.layout.addWidget(plot_2D_type_sel)
self.plot_2D_type_sel = plot_2D_type_sel
def init_lim_boxes(self):
self.lim_boxes = [None] * self.num_lim_boxes
dim_names = ['x', 'y', 'z']
for i in range(self.num_lim_boxes):
lim_box = QtWidgets.QLineEdit()
tooltip = ('Limit for {}. Use <number>:<number> where both numbers '
'can be empty').format(dim_names[i])
lim_box.setToolTip(tooltip)
self.layout.addWidget(lim_box)
self.lim_boxes[i] = lim_box
def init_aspect_box(self):
aspect_box = QtWidgets.QLineEdit()
aspect_box.setToolTip('Aspect ratio, use <number> or <number:number>')
self.layout.addWidget(aspect_box)
self.aspect_box = aspect_box
def get_sel_cols(self):
sel_texts = [box.currentText() for box in self.col_boxes]
return sel_texts
def get_sel_2D_type(self):
sel_str = self.plot_2D_type_sel.currentText()
return sel_str
def get_lims(self):
lims = [None] * self.num_lim_boxes
for i, lim_box in enumerate(self.lim_boxes):
lims[i] = self.parse_lims(lim_box.text())
return lims
def get_aspect(self):
text = self.aspect_box.text()
return self.parse_aspect(text)
def select_lowest_unoccupied(self, box):
"""
Sets the text on box to the text with the lowest index in
box.list_of_text_items which is not already selected in another box in
self.col_boxes.
"""
sel_texts = self.get_sel_cols()
for i, text in enumerate(box.list_of_text_items):
if text not in sel_texts:
box.setCurrentIndex(i)
return
def set_text_on_box(self, box_idx, text):
"""
Potential infinite loop if sel_col_func calls this function.
"""
box = self.col_boxes[box_idx]
idx = box.findText(text)
box.setCurrentIndex(idx)
def parse_lims(self, text):
lims = text.split(':')
if len(lims) != 2:
return (None, None)
lower_lim = self.conv_to_float_or_None(lims[0])
upper_lim = self.conv_to_float_or_None(lims[1])
return (lower_lim, upper_lim)
def parse_aspect(self, text):
try: return float(text)
except ValueError: pass
parts = text.split(':')
try:
num = float(parts[0])
den = float(parts[1])
except (ValueError, IndexError):
return 'auto'
return num / den
@staticmethod
def conv_to_float_or_None(str):
try:
return float(str)
except ValueError:
return None
| mit | -5,390,035,938,519,185,000 | 34.251462 | 80 | 0.584439 | false | 3.613909 | false | false | false |
AlexProfi/django-cms | cms/cms_menus.py | 1 | 16191 | # -*- coding: utf-8 -*-
from django.utils.translation import get_language
from cms import constants
from cms.apphook_pool import apphook_pool
from cms.utils.permissions import load_view_restrictions, has_global_page_permission
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import current_site
from cms.utils.i18n import get_fallback_languages, hide_untranslated
from cms.utils.page_resolver import get_page_queryset
from cms.utils.moderator import get_title_queryset, use_draft
from menus.base import Menu, NavigationNode, Modifier
from menus.menu_pool import menu_pool
def get_visible_page_objects(request, pages, site=None):
"""
This code is basically a many-pages-at-once version of
Page.has_view_permission.
pages contains all published pages
check if there is ANY restriction
that needs a permission page visibility calculation
"""
public_for = get_cms_setting('PUBLIC_FOR')
can_see_unrestricted = public_for == 'all' or (
public_for == 'staff' and request.user.is_staff)
is_auth_user = request.user.is_authenticated()
restricted_pages = load_view_restrictions(request, pages)
if not restricted_pages:
if can_see_unrestricted:
return pages
elif not is_auth_user:
return [] # Unauth user can't acquire global or user perm to see pages
if get_cms_setting('PERMISSION') and not site:
site = current_site(request) # avoid one extra query when possible
if has_global_page_permission(request, site, can_view=True):
return pages
def has_global_perm():
if has_global_perm.cache < 0:
if request.user.has_perm('cms.view_page'):
has_global_perm.cache = 1
else:
has_global_perm.cache = 0
return bool(has_global_perm.cache)
has_global_perm.cache = -1
def has_permission_membership(page_id):
"""
PagePermission user group membership tests
"""
user_pk = request.user.pk
for perm in restricted_pages[page_id]:
if perm.user_id == user_pk:
return True
if not perm.group_id:
continue
if has_permission_membership.user_groups is None:
has_permission_membership.user_groups = request.user.groups.all().values_list(
'pk', flat=True)
if perm.group_id in has_permission_membership.user_groups:
return True
return False
has_permission_membership.user_groups = None
visible_pages = []
for page in pages:
to_add = False
page_id = page.pk
is_restricted = page_id in restricted_pages
# restricted_pages contains as key any page.pk that is
# affected by a permission grant_on
if not is_restricted and can_see_unrestricted:
to_add = True
elif is_auth_user:
# setting based handling of unrestricted pages
# check group and user memberships to restricted pages
if is_restricted and has_permission_membership(page_id):
to_add = True
elif has_global_perm():
to_add = True
if to_add:
visible_pages.append(page)
return visible_pages
def get_visible_pages(request, pages, site=None):
"""Returns the IDs of all visible pages"""
pages = get_visible_page_objects(request, pages, site)
return [page.pk for page in pages]
def page_to_node(page, home, cut):
"""
Transform a CMS page into a navigation node.
:param page: the page you wish to transform
:param home: a reference to the "home" page (the page with path="0001")
:param cut: Should we cut page from its parent pages? This means the node will not
have a parent anymore.
"""
# Theses are simple to port over, since they are not calculated.
# Other attributes will be added conditionnally later.
attr = {
'soft_root': page.soft_root,
'auth_required': page.login_required,
'reverse_id': page.reverse_id,
}
parent_id = page.parent_id
# Should we cut the Node from its parents?
if home and page.parent_id == home.pk and cut:
parent_id = None
# possible fix for a possible problem
# if parent_id and not page.parent.get_calculated_status():
# parent_id = None # ????
if page.limit_visibility_in_menu is constants.VISIBILITY_ALL:
attr['visible_for_authenticated'] = True
attr['visible_for_anonymous'] = True
else:
attr['visible_for_authenticated'] = page.limit_visibility_in_menu == constants.VISIBILITY_USERS
attr['visible_for_anonymous'] = page.limit_visibility_in_menu == constants.VISIBILITY_ANONYMOUS
attr['is_home'] = page.is_home
# Extenders can be either navigation extenders or from apphooks.
extenders = []
if page.navigation_extenders:
if page.navigation_extenders in menu_pool.menus:
extenders.append(page.navigation_extenders)
elif "{0}:{1}".format(page.navigation_extenders, page.pk) in menu_pool.menus:
extenders.append("{0}:{1}".format(page.navigation_extenders, page.pk))
# Is this page an apphook? If so, we need to handle the apphooks's nodes
lang = get_language()
# Only run this if we have a translation in the requested language for this
# object. The title cache should have been prepopulated in CMSMenu.get_nodes
# but otherwise, just request the title normally
if not hasattr(page, 'title_cache') or lang in page.title_cache:
app_name = page.get_application_urls(fallback=False)
if app_name: # it means it is an apphook
app = apphook_pool.get_apphook(app_name)
extenders += app.menus
exts = []
for ext in extenders:
if hasattr(ext, "get_instances"):
# CMSAttachMenus are treated a bit differently to allow them to be
# able to be attached to multiple points in the navigation.
exts.append("{0}:{1}".format(ext.__name__, page.pk))
elif hasattr(ext, '__name__'):
exts.append(ext.__name__)
else:
exts.append(ext)
if exts:
attr['navigation_extenders'] = exts
# Do we have a redirectURL?
attr['redirect_url'] = page.get_redirect() # save redirect URL if any
attr['slug'] = page.get_slug() #save page slug
# Now finally, build the NavigationNode object and return it.
ret_node = NavigationNode(
page.get_menu_title(),
page.get_absolute_url(),
page.pk,
parent_id,
attr=attr,
visible=page.in_navigation,
)
return ret_node
class CMSMenu(Menu):
def get_nodes(self, request):
page_queryset = get_page_queryset(request)
site = current_site(request)
lang = get_language_from_request(request)
filters = {
'site': site,
}
if hide_untranslated(lang, site.pk):
filters['title_set__language'] = lang
if not use_draft(request):
filters['title_set__published'] = True
if not use_draft(request):
page_queryset = page_queryset.published()
pages = page_queryset.filter(**filters).order_by("path")
ids = {}
nodes = []
first = True
home_cut = False
home_children = []
home = None
actual_pages = []
# cache view perms
visible_pages = get_visible_pages(request, pages, site)
for page in pages:
# Pages are ordered by path, therefore the first page is the root
# of the page tree (a.k.a "home")
if page.pk not in visible_pages:
# Don't include pages the user doesn't have access to
continue
if not home:
home = page
if first and page.pk != home.pk:
home_cut = True
if (home_cut and (page.parent_id == home.pk or
page.parent_id in home_children)):
home_children.append(page.pk)
if ((page.pk == home.pk and home.in_navigation)
or page.pk != home.pk):
first = False
ids[page.id] = page
actual_pages.append(page)
page.title_cache = {}
langs = [lang]
if not hide_untranslated(lang):
langs.extend(get_fallback_languages(lang))
titles = list(get_title_queryset(request).filter(
page__in=ids, language__in=langs))
for title in titles: # add the title and slugs and some meta data
page = ids[title.page_id]
page.title_cache[title.language] = title
for page in actual_pages:
if page.title_cache:
nodes.append(page_to_node(page, home, home_cut))
return nodes
menu_pool.register_menu(CMSMenu)
class NavExtender(Modifier):
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if post_cut:
return nodes
# rearrange the parent relations
# Find home
home = next((n for n in nodes if n.attr.get("is_home", False)), None)
# Find nodes with NavExtenders
exts = []
for node in nodes:
extenders = node.attr.get("navigation_extenders", None)
if extenders:
for ext in extenders:
if ext not in exts:
exts.append(ext)
# Link the nodes
for extnode in nodes:
if extnode.namespace == ext and not extnode.parent_id:
# if home has nav extenders but home is not visible
if node == home and not node.visible:
# extnode.parent_id = None
extnode.parent_namespace = None
extnode.parent = None
else:
extnode.parent_id = node.id
extnode.parent_namespace = node.namespace
extnode.parent = node
node.children.append(extnode)
removed = []
# find all not assigned nodes
for menu in menu_pool.menus.items():
if (hasattr(menu[1], 'cms_enabled')
and menu[1].cms_enabled and not menu[0] in exts):
for node in nodes:
if node.namespace == menu[0]:
removed.append(node)
if breadcrumb:
# if breadcrumb and home not in navigation add node
if breadcrumb and home and not home.visible:
home.visible = True
if request.path_info == home.get_absolute_url():
home.selected = True
else:
home.selected = False
# remove all nodes that are nav_extenders and not assigned
for node in removed:
nodes.remove(node)
return nodes
menu_pool.register_modifier(NavExtender)
class SoftRootCutter(Modifier):
"""
Ask evildmp/superdmp if you don't understand softroots!
Softroot description from the docs:
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
For example, you’re on the page -Introduction to Bleeding-?, so the menu
might look like this:
School of Medicine
Medical Education
Departments
Department of Lorem Ipsum
Department of Donec Imperdiet
Department of Cras Eros
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <this is the current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
Department of Curabitur a Purus
Department of Sed Accumsan
Department of Etiam
Research
Administration
Contact us
Impressum
which is frankly overwhelming.
By making -Department of Mediaeval Surgery-? a soft root, the menu
becomes much more manageable:
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
# only apply this modifier if we're pre-cut (since what we do is cut)
# or if no id argument is provided, indicating {% show_menu_below_id %}
if post_cut or root_id:
return nodes
selected = None
root_nodes = []
# find the selected node as well as all the root nodes
for node in nodes:
if node.selected:
selected = node
if not node.parent:
root_nodes.append(node)
# if we found a selected ...
if selected:
# and the selected is a softroot
if selected.attr.get("soft_root", False):
# get it's descendants
nodes = selected.get_descendants()
# remove the link to parent
selected.parent = None
# make the selected page the root in the menu
nodes = [selected] + nodes
else:
# if it's not a soft root, walk ancestors (upwards!)
nodes = self.find_ancestors_and_remove_children(selected, nodes)
return nodes
def find_and_remove_children(self, node, nodes):
for child in node.children:
if child.attr.get("soft_root", False):
self.remove_children(child, nodes)
return nodes
def remove_children(self, node, nodes):
for child in node.children:
nodes.remove(child)
self.remove_children(child, nodes)
node.children = []
def find_ancestors_and_remove_children(self, node, nodes):
"""
Check ancestors of node for soft roots
"""
if node.parent:
if node.parent.attr.get("soft_root", False):
nodes = node.parent.get_descendants()
node.parent.parent = None
nodes = [node.parent] + nodes
else:
nodes = self.find_ancestors_and_remove_children(
node.parent, nodes)
else:
for newnode in nodes:
if newnode != node and not newnode.parent:
self.find_and_remove_children(newnode, nodes)
for child in node.children:
if child != node:
self.find_and_remove_children(child, nodes)
return nodes
menu_pool.register_modifier(SoftRootCutter)
| bsd-3-clause | 7,715,891,116,846,316,000 | 36.992958 | 103 | 0.571517 | false | 4.357835 | false | false | false |
hadware/lexicographer | epub-parser/src/epub_to_json/epub_to_json.py | 1 | 5101 | import sys
from epub import open_epub
import simplejson as json
from bs4 import BeautifulSoup, Tag
class SimpleChapter(object):
def __init__(self, name, text):
self.name = name
self.text = text
class Parser(object):
def __init__(self, epub_path):
self.epub_file = open_epub(epub_path, 'r')
# current item used for navigation
self.current_item = None
# soup for the current item
self.item_data_soup = None
def _get_metadata_(self, metadata):
dict = {}
# get metadata
dict['titles'] = [x for x in metadata.titles[0] if x]
dict['creators'] = [x for x in metadata.creators[0] if x]
dict['subjects'] = [x for x in metadata.subjects if x]
dict['identifiers'] = [x for x in metadata.identifiers[0] if x]
dict['dates'] = [x for x in metadata.dates[0] if x]
dict['right'] = metadata.right
# return filled dict
return dict
def _get_text_chapter_(self, current_tag, next_tag=None, first_item=False):
if first_item:
chapter_text = current_tag.get_text()
else:
chapter_text = ''
for elem in current_tag.next_siblings:
# if next tag
if next_tag is not None and isinstance(elem, Tag) and elem == next_tag:
break
# else, append text
elif isinstance(elem, Tag):
text = elem.get_text()
# if end of ebook
if "Project Gutenberg" in text:
break
else:
chapter_text += text
# sanitize text
chapter_text = chapter_text.replace('\n', ' ').replace('*', '').replace('"', ' ')
chapter_text = chapter_text.strip()
return chapter_text
def _switch_item_(self, item):
# if new file or first read
if self.current_item != item or self.item_data_soup is None:
# we change the current item
self.current_item = item
# we read the file
self.item_data_soup = BeautifulSoup(self.epub_file.read_item(item), 'lxml')
def _iterate_chapter_(self, chapters, current_nav, next_nav):
# get chapter name
chapter_name = current_nav.labels[0][0]
# get chapter id & file
split_src = current_nav.src.rsplit('#', 1)
item = self.epub_file.get_item_by_href(split_src[0])
self._switch_item_(item)
# get tag by id
current_tag = self.item_data_soup.find(id=split_src[1])
# determine which tag is next
if current_nav.nav_point:
direct_next = current_nav.nav_point[0]
else:
if next_nav is not None:
direct_next = next_nav
else:
direct_next = None
if direct_next is not None:
next_split = direct_next.src.rsplit('#', 1)
# if next is on same file
if split_src[0] == next_split[0]:
next_tag = self.item_data_soup.find(id=next_split[1])
chapter_text = self._get_text_chapter_(current_tag, next_tag)
else:
# get text remaining on current page
chapter_text = self._get_text_chapter_(current_tag)
# get next item
item = self.epub_file.get_item_by_href(next_split[0])
self._switch_item_(item)
current_tag = self.item_data_soup.body.contents[0]
next_tag = self.item_data_soup.find(id=next_split[1])
chapter_text += self._get_text_chapter_(current_tag, next_tag, True)
else:
chapter_text = self._get_text_chapter_(current_tag)
# add chapter to array if not empty
if chapter_text != '' and "CONTENT" not in chapter_name.upper() and "CHAPTERS" not in chapter_name.upper():
chapters.append(SimpleChapter(chapter_name, chapter_text).__dict__)
# if nav point has subchild
if current_nav.nav_point:
it = iter(current_nav.nav_point)
current_nav = next(it)
for child in it:
self._iterate_chapter_(chapters, current_nav, child)
current_nav = child
self._iterate_chapter_(chapters, current_nav, next_nav)
def epub_to_json(self):
epub = {}
chapters = []
it = iter(self.epub_file.toc.nav_map.nav_point)
current_nav = next(it)
for next_nav in it:
self._iterate_chapter_(chapters, current_nav, next_nav)
current_nav = next_nav
self._iterate_chapter_(chapters, current_nav, None)
# assemble parts
epub['metadatas'] = self._get_metadata_(self.epub_file.opf.metadata)
epub['chapters'] = chapters
# create json object
json_obj = json.dumps(epub, separators=(',', ':'), ensure_ascii=False)
self.epub_file.close()
return json_obj
if __name__ == '__main__':
# need one argument
parser = Parser(sys.argv[1])
parser.epub_to_json()
| gpl-2.0 | 7,686,731,611,143,497,000 | 32.781457 | 115 | 0.554401 | false | 3.867324 | false | false | false |
datamade/yournextmp-popit | candidates/tests/test_person_view.py | 1 | 2933 | # Smoke tests for viewing a candidate's page
from datetime import date, timedelta
import re
from django.conf import settings
from django.test.utils import override_settings
from django_webtest import WebTest
from .factories import (
AreaTypeFactory, ElectionFactory, CandidacyExtraFactory,
ParliamentaryChamberFactory, PartyFactory, PartyExtraFactory,
PersonExtraFactory, PostExtraFactory
)
election_date_before = lambda r: {'DATE_TODAY': date.today()}
election_date_after = lambda r: {'DATE_TODAY': date.today() + timedelta(days=28)}
processors = settings.TEMPLATE_CONTEXT_PROCESSORS
processors_before = processors + ("candidates.tests.test_person_view.election_date_before",)
processors_after = processors + ("candidates.tests.test_person_view.election_date_after",)
class TestPersonView(WebTest):
def setUp(self):
wmc_area_type = AreaTypeFactory.create()
election = ElectionFactory.create(
slug='2015',
name='2015 General Election',
area_types=(wmc_area_type,)
)
commons = ParliamentaryChamberFactory.create()
post_extra = PostExtraFactory.create(
elections=(election,),
base__organization=commons,
slug='65808',
base__label='Member of Parliament for Dulwich and West Norwood'
)
person_extra = PersonExtraFactory.create(
base__id='2009',
base__name='Tessa Jowell'
)
PartyFactory.reset_sequence()
party_extra = PartyExtraFactory.create()
CandidacyExtraFactory.create(
election=election,
base__person=person_extra.base,
base__post=post_extra.base,
base__on_behalf_of=party_extra.base
)
def test_get_tessa_jowell(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertTrue(
re.search(
r'''(?msx)
<h1>Tessa\s+Jowell</h1>\s*
<p>Candidate\s+for\s+
<a\s+href="/election/2015/post/65808/dulwich-and-west-norwood">Dulwich\s+
and\s+West\s+Norwood</a>\s+in\ 2015\s+General\s+Election\s*</p>''',
unicode(response)
)
)
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_before)
def test_get_tessa_jowell_before_election(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertContains(response, 'Contesting in the 2015 General Election')
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_after)
def test_get_tessa_jowell_after_election(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertContains(response, 'Contested in the 2015 General Election')
def test_get_non_existent(self):
response = self.app.get(
'/person/987654/imaginary-person',
expect_errors=True
)
self.assertEqual(response.status_code, 404)
| agpl-3.0 | 2,693,899,126,005,377,500 | 35.6625 | 92 | 0.656325 | false | 3.594363 | true | false | false |
antoinecarme/pyaf | setup.py | 1 | 1126 | from setuptools import setup
from setuptools import find_packages
with open("README.md", "r") as fh:
pyaf_long_description = fh.read()
setup(name='pyaf',
version='3.0-RC1',
description='Python Automatic Forecasting',
long_description=pyaf_long_description,
long_description_content_type="text/markdown",
author='Antoine CARME',
author_email='[email protected]',
url='https://github.com/antoinecarme/pyaf',
license='BSD 3-clause',
packages=find_packages(include=['pyaf', 'pyaf.*']),
python_requires='>=3',
classifiers=['Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3'],
keywords='arx automatic-forecasting autoregressive benchmark cycle decomposition exogenous forecasting heroku hierarchical-forecasting horizon jupyter pandas python scikit-learn seasonal time-series transformation trend web-service',
install_requires=[
'scipy',
'pandas',
'sklearn',
'matplotlib',
'pydot',
'dill',
'sqlalchemy'
])
| bsd-3-clause | 8,461,214,734,102,750,000 | 37.827586 | 239 | 0.64476 | false | 4.035842 | false | true | false |
miguelalonso/pywws | src/doc/conf.py | 1 | 9030 | # -*- coding: utf-8 -*-
#
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-15 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# pywws documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 30 08:05:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# cludge to allow documentation to be compiled without installing dependencies
class Dummy(object):
def __getattr__(self, name):
if name in ('__file__',):
return None
return Dummy
for mod_name in ('hid', 'oauth2', 'twitter', 'usb', 'usb.core', 'usb.util',
'libusb1', 'usb1', 'daemon', 'daemon.runner'):
sys.modules[mod_name] = Dummy()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.viewcode']
autosummary_generate = True
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'undoc-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
rst_epilog = """
----
Comments or questions? Please subscribe to the pywws mailing list
http://groups.google.com/group/pywws and let us know.
"""
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pywws'
copyright = u'2008-15, pywws contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version =
# The full version, including alpha/beta/rc tags.
#release =
from pywws import __version__ as release
version = release[:release.rfind('.')]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
if not on_rtd and 'LANG' in os.environ:
language = os.environ['LANG'].split('_')[0]
locale_dirs = ['../pywws/lang']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
html_logo = 'pywws_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
html_favicon = 'pywws_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pywwsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pywws.tex', u'pywws Documentation',
u'Jim Easterbrook', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pywws', u'pywws Documentation',
[u'Jim Easterbrook'], 1)
]
| gpl-2.0 | 7,874,652,060,301,545,000 | 31.956204 | 81 | 0.706645 | false | 3.673718 | false | false | false |
PaddlePaddle/Paddle | python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py | 1 | 50054 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from functools import reduce
import collections
import math
import os
import warnings
import logging
import six
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.core import CommContext
import paddle.fluid.framework as framework
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools
from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, PSDispatcher
from paddle.fluid.transpiler.details.program_utils import delete_ops
OP_NAME_SCOPE = "op_namescope"
CLIP_OP_NAME_SCOPE = "gradient_clip"
STEP_COUNTER = "@PS_STEP_COUNTER@"
LEARNING_RATE_DECAY_COUNTER = "@LR_DECAY_COUNTER@"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleAttrName()
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
SPARSE_OP_LIST = ["lookup_table", "lookup_table_v2"]
SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"}
def _get_lr_ops(program):
lr_ops = []
for index, op in enumerate(program.global_block().ops):
role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME))
if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \
role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \
int(OPT_OP_ROLE_ATTR_VALUE):
lr_ops.append(op)
return lr_ops
def _has_global_step(lr_ops):
if len(lr_ops) > 0:
for idx, op in enumerate(lr_ops):
if op.type != 'increment':
continue
counter = op.input("X")[0]
if counter == LEARNING_RATE_DECAY_COUNTER:
return True
return False
def is_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr(
'is_distributed') is False:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is False:
return True
return False
def is_distributed_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is True:
return True
return False
def get_sparse_tablename(op):
return op.input("W")[0]
def get_sparse_tablenames(program, is_distributed):
tablenames = set()
if is_distributed:
for op in program.global_block().ops:
if is_distributed_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
else:
for op in program.global_block().ops:
if is_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
return list(tablenames)
class MergedVariable:
def __init__(self, merged, ordered, offsets):
self.merged_var = merged
self.ordered_vars = ordered
self.offsets = offsets
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class CompileTimeStrategy(object):
def __init__(self, main_program, startup_program, strategy, role_maker):
self.min_block_size = 81920
self.origin_main_program = main_program
self.origin_startup_program = startup_program
self.origin_ps_main_program = main_program
self.origin_ps_startup_program = startup_program
self.strategy = strategy
self.role_maker = role_maker
self.use_ps_gpu = False
try:
self.is_heter_ps_mode = role_maker._is_heter_parameter_server_mode
except:
warnings.warn(
"Using paddle.distributed.fleet instead of paddle.fluid.incubate.fleet"
)
self.is_heter_ps_mode = False
self.origin_sparse_pairs = []
self.origin_dense_pairs = []
self.merged_variables_pairs = []
self.merged_dense_pairs = []
self.merged_sparse_pairs = []
self.merged_variable_map = {}
self.param_name_to_grad_name = {}
self.grad_name_to_param_name = {}
self.param_grad_ep_mapping = collections.OrderedDict()
self.grad_param_mapping = collections.OrderedDict()
self._build_var_distributed()
self.tensor_table_dict = {}
# for heter-ps save variables
self.origin_merged_variables_pairs = list(self.merged_variables_pairs)
self.origin_merged_dense_pairs = list(self.merged_dense_pairs)
self.origin_merged_sparse_pairs = list(self.merged_sparse_pairs)
def get_distributed_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode
def is_sync_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.SYNC
def is_geo_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.GEO
def is_async_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.ASYNC
def get_role_id(self):
try:
return self.role_maker._role_id()
except Exception:
return self.role_maker.role_id()
def get_trainers(self):
try:
return self.role_maker._worker_num()
except Exception:
return self.role_maker.worker_num()
def get_ps_endpoint(self):
try:
return self.role_maker._get_pserver_endpoints()[self.get_role_id()]
except Exception:
return self.role_maker.get_pserver_endpoints()[self.get_role_id()]
def get_ps_endpoints(self):
try:
return self.role_maker._get_pserver_endpoints()
except Exception:
return self.role_maker.get_pserver_endpoints()
def get_heter_worker_endpoints(self):
try:
return self.role_maker._get_heter_worker_endpoints()
except Exception:
return self.role_maker.get_heter_worker_endpoints()
def get_heter_worker_endpoint(self):
try:
return self.role_maker._get_heter_worker_endpoint()
except Exception:
return self.role_maker.get_heter_worker_endpoint()
def get_origin_programs(self):
return self.origin_main_program, self.origin_startup_program
def get_origin_main_program(self):
return self.origin_main_program
def get_origin_startup_program(self):
return self.origin_startup_program
def set_origin_ps_main_program(self, program):
self.origin_ps_main_program = program
def set_origin_ps_startup_program(self, program):
self.origin_ps_startup_program = program
def get_origin_ps_main_program(self):
return self.origin_ps_main_program
def get_origin_ps_startup_program(self):
return self.origin_ps_startup_program
def add_tensor_table(self,
feed_var_name,
fetch_var_name="",
startup_program=None,
main_program=None,
tensor_table_class=""):
self.tensor_table_dict[feed_var_name] = {}
self.tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name
self.tensor_table_dict[feed_var_name]["fetch_var_name"] = fetch_var_name
self.tensor_table_dict[feed_var_name][
"startup_program"] = startup_program
self.tensor_table_dict[feed_var_name]["main_program"] = main_program
self.tensor_table_dict[feed_var_name][
"tensor_table_class"] = tensor_table_class
def get_tensor_table_dict(self):
return self.tensor_table_dict
def get_sparse_varname_on_ps(self, is_distributed, endpoint=None):
if not endpoint:
endpoint = self.get_ps_endpoint()
varnames = get_sparse_tablenames(self.get_origin_main_program(),
is_distributed)
ps_sparse_varnames = []
for varname in varnames:
tables = self.get_var_distributed(varname, True)
for i in range(len(tables)):
table, ep, _ = tables[i]
if ep == endpoint:
ps_sparse_varnames.append(table)
return ps_sparse_varnames
def get_optimize_varname_on_ps(self, param_name):
origin_param_name, _, _ = _get_varname_parts(param_name)
optimize_var_names = []
for op in self.get_origin_main_program().global_block().ops:
# check all optimizer op
if int(op.all_attrs()["op_role"]) == 2:
# check param name
if op.input("Param")[0] != origin_param_name:
continue
# check all input
for key in op.input_names:
if key in [
"Param", "Grad", "LearningRate", "Beta1Tensor",
"Beta2Tensor"
]:
continue
# check varibale shape related param, e.g: Moment1
optimize_var_names += self._get_optimizer_param_related_var_name(
op, op.type, key)
return optimize_var_names
def _get_optimizer_param_related_var_name(self, op, op_type, varkey):
"""
Returns the names for optimizer inputs that need to be load
"""
related_var_names = []
if op_type == "adam":
if varkey in ["Moment1", "Moment2"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "adagrad":
if varkey == "Moment":
related_var_names.append(op.input(varkey)[0])
elif op_type in ["momentum", "lars_momentum"]:
if varkey == "Velocity":
related_var_names.append(op.input(varkey)[0])
elif op_type == "rmsprop":
if varkey in ["Moment", "MeanSquare"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "ftrl":
if varkey in ["SquaredAccumulator", "LinearAccumulator"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "sgd":
pass
else:
raise ValueError(
"Not supported optimizer for distributed training: %s" %
op_type)
return related_var_names
def build_ctx(self,
vars,
mapping,
is_grad,
is_sparse,
is_send,
is_distributed=False):
def get_grad_var_ep(slices):
names = []
eps = []
sections = []
for slice in slices:
if self.is_geo_mode():
if is_send:
names.append("{}.delta".format(slice.name))
else:
names.append(slice.name)
elif is_grad and self.is_sync_mode() and self.get_trainers(
) > 1:
names.append("{}.trainer_{}".format(slice.name,
self.get_role_id()))
else:
names.append(slice.name)
sections.append(slice.shape[0])
for ep, pairs in self.param_grad_ep_mapping.items():
params, grads = pairs["params"], pairs["grads"]
for var in params + grads:
if slice.name == var.name:
eps.append(ep)
break
return names, eps, sections
if isinstance(vars, MergedVariable):
name = vars.merged_var.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [var.name for var in vars.ordered_vars]
else:
name = vars.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [vars.name]
trainer_id = self.get_role_id()
aggregate = True
ctx = CommContext(name, names, eps, sections, origin_varnames,
trainer_id, aggregate, is_sparse, is_distributed)
return ctx
def get_trainer_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
if not self.is_geo_mode():
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param = merged[0]
grad = merged[1]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
if self.is_async_mode():
name, ctx = self._step_ctx()
send_ctx[name] = ctx
else:
for pairs in self.origin_sparse_pairs:
param, grad = pairs
param_name = param.name
is_distributed = True if param_name in distibuted_varnames else False
param_ctx = self.build_ctx(param, self.param_var_mapping, False,
True, True, is_distributed)
grad_ctx = self.build_ctx(grad, self.grad_var_mapping, True,
True, True, is_distributed)
ctx = CommContext(param_ctx.var_name(),
param_ctx.split_varnames(),
param_ctx.split_endpoints(),
param_ctx.sections(),
grad_ctx.origin_varnames(),
param_ctx.trainer_id(),
param_ctx.aggregate(),
param_ctx.is_sparse(),
param_ctx.is_distributed())
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
return send_ctx
def get_communicator_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
if self.is_geo_mode():
for pairs in self.merged_dense_pairs:
param = pairs[0]
ctx = self.build_ctx(param, self.param_var_mapping, False,
False, True)
send_ctx[ctx.var_name()] = ctx
for pairs in self.merged_sparse_pairs:
param = pairs[0]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
else:
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param, grad = merged
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
name, ctx = self._step_ctx()
send_ctx[name] = ctx
return send_ctx
def get_communicator_recv_context(self,
recv_type=1,
use_origin_program=False):
# recv_type
# 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
sparse_varnames = []
for pairs in self.origin_sparse_pairs:
param, grad = pairs
sparse_varnames.append(param.name)
dense_recv_ctx = {}
sparse_recv_ctx = {}
distributed_recv_ctx = {}
variables_pairs = self.merged_variables_pairs if not use_origin_program else self.origin_merged_variables_pairs
for merged in variables_pairs:
params = merged[0]
if params.merged_var.name in sparse_varnames:
continue
ctx = self.build_ctx(params, self.param_var_mapping, False, False,
False, False)
dense_recv_ctx[ctx.var_name()] = ctx
for pairs in self.origin_sparse_pairs:
param, grad = pairs
if param.name in distibuted_varnames:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, True)
distributed_recv_ctx[ctx.var_name()] = ctx
else:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, False)
sparse_recv_ctx[ctx.var_name()] = ctx
if recv_type == 1:
return dense_recv_ctx
if recv_type == 2:
return sparse_recv_ctx
if recv_type == 3:
return distributed_recv_ctx
if recv_type == 4:
dense_recv_ctx.update(sparse_recv_ctx)
dense_recv_ctx.update(distributed_recv_ctx)
return dense_recv_ctx
assert ValueError(
"recv_type can only be 1/2/3/4, 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL"
)
def get_the_one_trainer_send_context(self, split_dense_table):
if self.is_geo_mode():
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
distibuted_varnames = get_sparse_tablenames(
self.origin_main_program, True)
for merged in self.merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel = reduce(lambda x, y: x * y, var.shape[1:])
sparse_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(send_ctx) == 0:
raise ValueError(
"GeoSGD require sparse parameters in your net.")
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
else:
return self.get_the_one_send_context(split_dense_table)
def get_dense_send_context(self,
send_ctx,
idx,
merged_dense_pairs,
trainer_id,
split_dense_table=False):
if len(merged_dense_pairs) < 1:
return idx
if not split_dense_table:
origin_varnames = []
var_numel = 0
for merged in merged_dense_pairs:
grad = merged[1]
origin_varnames.append(grad.merged_var.name)
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel += reduce(lambda x, y: x * y, var.shape)
grad_name = "Dense@Grad"
trainer_id = self.get_role_id()
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"],
[var_numel], origin_varnames, trainer_id,
aggregate, False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
else:
for merged in merged_dense_pairs:
grad = merged[1]
origin_varname = grad.merged_var.name
var = self.origin_main_program.global_block().vars[
origin_varname]
var_numel = reduce(lambda x, y: x * y, var.shape)
grad_name = origin_varname
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[origin_varname], trainer_id, aggregate,
False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
return idx
def get_the_one_send_context(self,
split_dense_table=False,
use_origin_program=False,
ep_list=None):
if ep_list is None:
ep_list = ["127.0.0.1:6071"]
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
merged_dense_pairs = self.origin_merged_dense_pairs if use_origin_program else self.merged_dense_pairs
merged_sparse_pairs = self.origin_merged_sparse_pairs if use_origin_program else self.merged_sparse_pairs
idx += self.get_dense_send_context(send_ctx, idx, merged_dense_pairs,
trainer_id, split_dense_table)
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
for merged in merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
splited_varname = []
for i in range(len(ep_list)):
splited_varname.append("{}.block{}".format(param_name, i))
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
shape = list(var.shape)
shape[0] = 0 if is_distributed else shape[0]
sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape,
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
def get_the_one_recv_context(self,
is_dense=True,
split_dense_table=False,
use_origin_program=False):
recv_id_maps = {}
if is_dense:
send_ctx = self.get_the_one_send_context(
split_dense_table=split_dense_table,
use_origin_program=use_origin_program)
for idx, (name, ctx) in enumerate(send_ctx.items()):
if ctx.is_sparse():
continue
if ctx.is_tensor_table():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
else:
send_ctx = self.get_the_one_send_context()
for idx, (name, ctx) in enumerate(send_ctx.items()):
if not ctx.is_sparse():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
return recv_id_maps
def get_server_runtime_config(self):
return self.strategy.get_server_runtime_config()
def get_var_distributed(self, varname, is_param):
var_distributed = []
offset = 0
if is_param:
params = self.param_var_mapping[varname]
param_varnames = [var.name for var in params]
for ep, pairs in self.param_grad_ep_mapping.items():
for p in pairs["params"]:
if p.name in param_varnames:
offset += p.shape[0]
var_distributed.append((p.name, ep, p.shape[0]))
else:
grads = self.grad_var_mapping[varname]
grad_varnames = [var.name for var in grads]
for ep, pairs in self.param_grad_ep_mapping.items():
for g in pairs["grads"]:
if g.name in grad_varnames:
var_distributed.append((g.name, ep, g.shape[0]))
return var_distributed
def _step_ctx(self, idx):
name = STEP_COUNTER
trainer_id = self.get_role_id()
endpoints = self.get_ps_endpoints()
sections = [1] * len(endpoints)
names = [name] * len(endpoints)
ctx = CommContext(name, names, endpoints, sections, [name], trainer_id,
True, False, False, idx, True)
return name, ctx
def _create_vars_from_blocklist(self, block_list):
"""
Create vars for each split.
NOTE: only grads need to be named for different trainers, use
add_trainer_suffix to rename the grad vars.
Args:
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map = collections.OrderedDict()
var_mapping = collections.OrderedDict()
for block_str in block_list:
varname, offset, size = block_str.split(":")
if varname not in block_map:
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, split in six.iteritems(block_map):
orig_var = self.merged_variable_map[varname]
if len(split) == 1:
var_mapping[varname] = [orig_var]
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=orig_var,
block_id=0,
offset=0,
is_slice=False,
vtype="Param")
else:
var_mapping[varname] = []
orig_shape = orig_var.shape
orig_dim1_flatten = 1
if len(orig_shape) >= 2:
orig_dim1_flatten = reduce(lambda x, y: x * y,
orig_shape[1:])
for i, block in enumerate(split):
size = block[1]
rows = size // orig_dim1_flatten
splited_shape = [rows]
if len(orig_shape) >= 2:
splited_shape.extend(orig_shape[1:])
new_var_name = "%s.block%d" % (varname, i)
slice_var = vars_metatools.VarStruct(
name=new_var_name,
shape=splited_shape,
dtype=orig_var.dtype,
type=orig_var.type,
lod_level=orig_var.lod_level,
persistable=False)
var_mapping[varname].append(slice_var)
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=slice_var,
block_id=i,
offset=-1,
is_slice=False,
vtype="Param")
return var_mapping
def _dispatcher(self):
ps_dispatcher = RoundRobin(self.get_ps_endpoints())
ps_dispatcher.reset()
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
sparse_gradnames = [grad.name for _, grad in self.origin_sparse_pairs]
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname in sparse_gradnames:
continue
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname not in sparse_gradnames:
continue
ps_dispatcher.reset()
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
def _slice_variable(self,
var_list,
slice_count,
min_block_size,
uniform=False):
"""
We may need to split dense tensor to one or more blocks and put
them equally onto parameter server. One block is a sub-tensor
aligned by dim[0] of the tensor.
We need to have a minimal block size so that the calculations in
the parameter server side can gain better performance. By default
minimum block size 8K elements (maybe 16bit or 32bit or 64bit).
Args:
var_list (list): List of variables.
slice_count (int): Numel of count that variables will be sliced, which
could be the pserver services' count.
min_block_size (int): Minimum split block size.
Returns:
blocks (list[(varname, block_id, current_block_size)]): A list
of VarBlocks. Each VarBlock specifies a shard of the var.
"""
blocks = []
for var in var_list:
if not uniform:
var_numel = reduce(lambda x, y: x * y, var.shape)
split_count = 1
if min_block_size == -1:
split_count = 1
else:
split_count = slice_count
max_pserver_count = int(
math.floor(var_numel / float(min_block_size)))
if max_pserver_count == 0:
max_pserver_count = 1
if max_pserver_count < slice_count:
split_count = max_pserver_count
block_size = int(math.ceil(var_numel / float(split_count)))
if len(var.shape) >= 2:
# align by dim1(width)
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
remains = block_size % dim1
if remains != 0:
block_size += dim1 - remains
# update split_count after aligning
split_count = int(math.ceil(var_numel / float(block_size)))
for block_id in range(split_count):
curr_block_size = min(block_size, var_numel - (
(block_id) * block_size))
block = vars_metatools.VarBlock(var.name, block_id,
curr_block_size)
blocks.append(str(block))
else:
block_size = var.shape[0] / slice_count
remainder = var.shape[0] % slice_count
if block_size == 0:
dim0s = [block_size] * remainder
else:
dim0s = [block_size] * slice_count
for i in range(remainder):
dim0s[i] = dim0s[i] + 1
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
for block_id in range(len(dim0s)):
numel = dim0s[block_id] * dim1
block = vars_metatools.VarBlock(var.name, block_id, numel)
blocks.append(str(block))
return blocks
def _get_param_grad_blocks(self, pairs, min_block_size, uniform=False):
param_list = []
grad_list = []
param_grad_set = set()
for p, g in pairs:
# todo(tangwei12) skip parameter marked not trainable
# if type(p) == Parameter and p.trainable == False:
# continue
p = p.merged_var
g = g.merged_var
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
grad_blocks = self._slice_variable(grad_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
param_blocks = self._slice_variable(param_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
return param_blocks, grad_blocks
def _var_slice_and_distribute(self):
# update these mappings for further transpile:
# 1. param_var_mapping : param var name->[split params vars]
# 2. grad_var_mapping : grad var name->[split grads vars]
# 3. grad_param_mapping : grad.blockx->param.blockx
# 4. param_grad_ep_mapping : ep->{"params" : [], "grads" : [] }
dps, dgs = self._get_param_grad_blocks(self.merged_dense_pairs,
self.min_block_size, False)
sps, sgs = self._get_param_grad_blocks(self.merged_sparse_pairs,
self.min_block_size, True)
param_blocks = dps + sps
grad_blocks = dgs + sgs
assert (len(grad_blocks) == len(param_blocks))
# origin_param_name->[splited_param_vars]
self.param_var_mapping = self._create_vars_from_blocklist(param_blocks)
self.grad_var_mapping = self._create_vars_from_blocklist(grad_blocks)
# dict(grad_splited_var->param_splited_var)
self.grad_param_mapping = collections.OrderedDict()
for g, p in zip(grad_blocks, param_blocks):
g_name, g_bid, _ = g.split(":")
p_name, p_bid, _ = p.split(":")
self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \
self.param_var_mapping[p_name][int(p_bid)]
print_maps = {}
for k, v in self.grad_param_mapping.items():
print_maps[str(k)] = str(v)
# create mapping of endpoint->split var to create pserver side program
self.param_grad_ep_mapping = collections.OrderedDict()
[
self.param_grad_ep_mapping.update({
ep: {
"params": [],
"grads": []
}
}) for ep in self.get_ps_endpoints()
]
def _build_var_distributed(self):
self.var_distributed = vars_metatools.VarsDistributed()
sparse_pairs, dense_pairs = self.get_param_grads()
origin_for_sparse = []
origin_for_dense = []
param_name_grad_name = dict()
grad_name_to_param_name = dict()
for param, grad in sparse_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_sparse.append((param, grad))
for param, grad in dense_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_dense.append((param, grad))
for dense_pair in origin_for_dense:
param, grad = dense_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_dense_pairs.append((m_param, m_grad))
for sparse_pair in origin_for_sparse:
param, grad = sparse_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_sparse_pairs.append((m_param, m_grad))
for merged in self.merged_variables_pairs:
m_param, m_grad = merged
self.merged_variable_map[
m_param.merged_var.name] = m_param.merged_var
self.merged_variable_map[m_grad.merged_var.name] = m_grad.merged_var
param_merges = []
param_merges.extend(origin_for_sparse)
param_merges.extend(origin_for_dense)
for param, grad in param_merges:
param_name_grad_name[param.name] = grad.name
grad_name_to_param_name[grad.name] = param.name
self.origin_sparse_pairs = origin_for_sparse
self.origin_dense_pairs = origin_for_dense
self.param_name_to_grad_name = param_name_grad_name
self.grad_name_to_param_name = grad_name_to_param_name
sparse_pair_map = collections.OrderedDict()
for pair in self.origin_sparse_pairs + self.origin_dense_pairs:
param, grad = pair
sparse_pair_map[param.name] = str(param)
sparse_pair_map[grad.name] = str(grad)
self._var_slice_and_distribute()
self._dispatcher()
def get_param_grads(self):
origin_program = self.origin_main_program
def _get_params_grads(sparse_varnames):
block = origin_program.global_block()
dense_param_grads = []
sparse_param_grads = []
optimize_params = set()
origin_var_dict = origin_program.global_block().vars
role_id = int(core.op_proto_and_checker_maker.OpRole.Backward)
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr("op_role", role_id)
continue
if op.attr(OP_ROLE_VAR_ATTR_NAME):
param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if param_name not in optimize_params:
optimize_params.add(param_name)
param_grad = (origin_var_dict[param_name],
origin_var_dict[grad_name])
if param_name in sparse_varnames:
sparse_param_grads.append(param_grad)
else:
dense_param_grads.append(param_grad)
return sparse_param_grads, dense_param_grads
def _get_sparse_varnames():
varnames = []
for op in origin_program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
varnames.append(param_name)
return list(set(varnames))
sparse_varnames = _get_sparse_varnames()
sparse_param_grads, dense_param_grads = _get_params_grads(
sparse_varnames)
return sparse_param_grads, dense_param_grads
def remove_var_pair_by_grad(self, var_name):
for index, pair in enumerate(self.merged_variables_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_variables_pairs[index]
for index, pair in enumerate(self.merged_dense_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_dense_pairs[index]
return
for index, pair in enumerate(self.merged_sparse_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_sparse_pairs[index]
return
print("Not find {} in self.merge_pairs".format(var_name))
def _is_opt_role_op(op):
# NOTE : depend on oprole to find out whether this op is for
# optimize
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and \
int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):
return True
return False
def _get_optimize_ops(_program):
block = _program.global_block()
opt_ops = []
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr(
"op_role",
int(core.op_proto_and_checker_maker.OpRole.Backward))
continue
opt_ops.append(op)
return opt_ops
def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps):
if hasattr(compiled_config.origin_main_program, 'lr_sheduler'):
from paddle.optimizer.lr import LRScheduler
assert isinstance(compiled_config.origin_main_program.lr_sheduler,
LRScheduler), "must be LRScheduler"
ops = _get_optimize_ops(compiled_config.origin_main_program)
lr_param_dict = _get_lr_param_dict(ops)
lr_decay_main_program, lr_decay_startup_program, lr_name = _get_lr_sheduler_program(
compiled_config.origin_main_program.lr_sheduler, lr_param_dict,
lr_decay_steps)
compiled_config.add_tensor_table(
"@LR_DECAY_COUNTER@", lr_name, lr_decay_startup_program,
lr_decay_main_program, "GlobalStepTable")
def _get_lr_param_dict(opt_ops):
lr_param_dict = {}
for op in opt_ops:
lr_name = op.input("LearningRate")[0]
param_name = op.input("Param")[0]
if lr_name not in lr_param_dict:
lr_param_dict[lr_name] = []
lr_param_dict[lr_name].append(param_name)
return lr_param_dict
def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
schedler_decay = [
'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', 'ExponentialDecay'
]
from paddle.optimizer.lr import ExponentialDecay, NoamDecay, PiecewiseDecay, NaturalExpDecay, InverseTimeDecay
from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, noam_decay, piecewise_decay, natural_exp_decay, inverse_time_decay
decay_main_program = fluid.framework.Program()
decay_startup_program = fluid.framework.Program()
lr_name = ""
if isinstance(lr_sheduler, ExponentialDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = exponential_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, NoamDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0)
lr_name = lr.name
logging.warn("NoamDecay is set, warmup steps is [ %d ]" %
lr_sheduler.warmup_steps)
elif isinstance(lr_sheduler, NaturalExpDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, InverseTimeDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma,
True)
lr_name = lr.name
logging.warn(
"InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
else:
raise ValueError(
"Not supported current LearningRate strategy, please use follow decay strategy: {}".
format(schedler_decay))
return decay_main_program, decay_startup_program, lr_name
def _get_varname_parts(varname):
# returns origin, blockid, trainerid
orig_var_name = ""
trainer_part = ""
block_part = ""
trainer_idx = varname.find(".trainer_")
if trainer_idx >= 0:
trainer_part = varname[trainer_idx + 1:]
else:
trainer_idx = len(varname)
block_index = varname.find(".block")
if block_index >= 0:
block_part = varname[block_index + 1:trainer_idx]
else:
block_index = len(varname)
orig_var_name = varname[0:min(block_index, trainer_idx)]
return orig_var_name, block_part, trainer_part
def _orig_varname(varname):
orig, _, _ = _get_varname_parts(varname)
return orig
| apache-2.0 | -7,821,824,388,392,121,000 | 38.75695 | 145 | 0.537979 | false | 3.973801 | false | false | false |
animekita/selvbetjening | selvbetjening/frontend/userportal/views.py | 1 | 7865 | # coding=UTF-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from django.contrib.formtools.preview import FormPreview
from django.contrib import messages
from django.contrib.auth import login, authenticate
from selvbetjening.core.user.models import SUser
from selvbetjening.businesslogic.members.forms import UserRegistrationForm, ProfileEditForm, UserWebsiteFormSet
from selvbetjening.frontend.userportal.forms import ChangePasswordForm, ChangePictureForm, \
PrivacyForm, ChangeUsernameForm
from selvbetjening.frontend.userportal.processor_handlers import profile_page_processors
from selvbetjening.frontend.userportal.models import UserPrivacy
def profile_redirect(request):
if isinstance(request.user, AnonymousUser):
return HttpResponseRedirect(reverse('members_login'))
else:
return HttpResponseRedirect(reverse('members_profile'))
@login_required
def public_profile_page(request,
username,
template_name='userportal/public_profile.html',
template_no_access_name='userportal/profile_no_access.html'):
user = get_object_or_404(SUser, username=username)
privacy, created = UserPrivacy.objects.get_or_create(user=user)
own_profile = False
if privacy.public_profile:
handler = profile_page_processors.get_handler(request, user)
add_to_profile = handler.view(own_profile)
return render(request,
template_name,
{
'viewed_user': user,
'privacy': privacy,
'add_to_profile': add_to_profile
})
else:
return render(request,
template_no_access_name,
{
'username': user.username
})
@login_required
def profile(request,
template_name='userportal/profile.html'):
user = request.user
privacy = UserPrivacy.full_access()
own_profile = True
own_privacy, created = UserPrivacy.objects.get_or_create(user=user)
handler = profile_page_processors.get_handler(request, user)
add_to_profile = handler.view(own_profile)
return render(request,
template_name,
{
'viewed_user': user,
'privacy': privacy,
'own_privacy': own_privacy,
'add_to_profile': add_to_profile
})
@login_required
def edit_profile(request,
template_name='userportal/edit_profile.html',
success_page='userportal_profile',
form_class=ProfileEditForm):
user = request.user
if request.method == 'POST':
form = form_class(request.POST, instance=user)
website_form = UserWebsiteFormSet(request.POST, instance=user)
if form.is_valid() and website_form.is_valid():
form.save()
website_form.save()
messages.success(request, _(u'Personal information updated'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class(instance=user)
website_form = UserWebsiteFormSet(instance=user)
return render(request,
template_name,
{
'form': form,
'website_form': website_form
})
@login_required
def edit_privacy(request,
form_class=PrivacyForm,
template_name='userportal/edit_privacy.html',
success_page='userportal_profile'):
privacy, created = UserPrivacy.objects.get_or_create(user=request.user)
if request.method == 'POST':
form = form_class(request.POST, instance=privacy)
if form.is_valid:
form.save()
messages.success(request, _(u'Privacy settings updated'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class(instance=privacy)
return render(request,
template_name,
{
'form': form
})
@login_required
def edit_picture(request,
form_class=ChangePictureForm,
success_page='userportal_profile',
template_name='userportal/edit_picture.html'):
profile = request.user
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
profile.picture = form.cleaned_data['picture']
profile.save()
messages.success(request, _(u'Profile picture changed'))
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class()
return render(request,
template_name,
{
'form': form,
'user': profile
})
@login_required
def edit_password(request,
template_name='userportal/edit_password.html',
post_change_redirect='userportal_profile',
change_password_form=ChangePasswordForm):
if request.method == 'POST':
form = change_password_form(request.user, request.POST)
if form.is_valid():
form.save()
messages.success(request, _(u'Password changed'))
return HttpResponseRedirect(reverse(post_change_redirect))
else:
form = change_password_form(request.user)
return render(request,
template_name,
{
'form': form,
})
class UsernameChangeView(FormPreview):
preview_template = 'userportal/edit_username_confirmed.html'
form_template = 'userportal/edit_username.html'
def __call__(self, request, *args, **kwargs):
return super(UsernameChangeView, self).__call__(request, *args, **kwargs)
def process_preview(self, request, form, context):
context['new_username'] = form.cleaned_data['new_username']
def done(self, request, cleaned_data):
request.user.username = cleaned_data['new_username']
request.user.save()
messages.success(request, _(u'Username changed'))
return HttpResponseRedirect(reverse('userportal_profile'))
edit_username = login_required(UsernameChangeView(ChangeUsernameForm))
def register(request,
success_page,
form_class=UserRegistrationForm,
login_on_success=False,
template_name='userportal/registration.html'):
""" Allows a new user to register an account.
success_page -- a reversable view name or a function returning
an url. The function takes a request and a user
object as input.
"""
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
user = form.save()
if login_on_success:
user = authenticate(username=user.username, password=request.POST['password'])
login(request, user)
if callable(success_page):
return HttpResponseRedirect(success_page(request, user))
else:
return HttpResponseRedirect(reverse(success_page))
else:
form = form_class()
return render(request,
template_name,
{
'form': form
}) | mit | 2,199,686,237,095,671,600 | 30.464 | 111 | 0.595168 | false | 4.507163 | false | false | false |
choderalab/ensembler | ensembler/pdb.py | 1 | 4246 | import sys
if sys.version_info > (3, 0):
from urllib.request import urlopen
from urllib.error import URLError
from io import StringIO
else:
from urllib2 import urlopen, URLError
from StringIO import StringIO
import gzip
import re
import six
def extract_residues_by_resnum(output_file, pdb_input_file, template):
"""
Parameters
----------
output_file: string or gzip.file_like
pdb_input_file: string or gzip.file_like
"""
if isinstance(pdb_input_file, six.string_types):
with gzip.open(pdb_input_file, 'r') as pdb_file:
pdbtext = pdb_file.readlines()
else:
pdbtext = pdb_input_file.readlines()
# list of resnum strings e.g. ['9', '29', '30B'] must be converted as follows to match the PDB format:
# [' 9 ', ' 29 ', ' 30B']
desired_resnums = ['%4s ' % r if re.match('[0-9]', r[-1]) else '%5s' % r for r in template.resolved_pdbresnums]
if isinstance(output_file, six.string_types):
ofile = open(output_file, 'w')
else:
ofile = output_file
try:
resnums_extracted = {}
model_index = 0
for bytesline in pdbtext:
line = bytesline.decode('UTF-8')
# For PDBs containing multiple MODELs (e.g. NMR structures), extract data only from the first model, ignore others.
if line[0:6] == 'MODEL ':
model_index += 1
if model_index == 2:
break
if line[0:6] in ['ATOM ', 'HETATM']:
resnum = line[22:27]
chainid = line[21]
if chainid == template.chainid:
if resnum in desired_resnums:
ofile.write(line)
resnums_extracted[resnum] = 1
except Exception as e:
print('Exception detected while extracting ATOM/HETATM records:')
print(e)
finally:
if isinstance(output_file, six.string_types):
ofile.close()
if len(resnums_extracted) != len(desired_resnums):
raise Exception(
'Number of residues (%d) extracted from PDB (%s) for template (%s) does not match desired number of residues (%d).' % (
len(resnums_extracted), template.pdbid, template.templateid, len(desired_resnums)
)
)
def retrieve_sifts(pdb_id):
"""Retrieves a SIFTS .xml file, given a PDB ID. Works by modifying the PDBe download URL.
Also removes annoying namespace stuff.
"""
sifts_download_base_url='ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
url = sifts_download_base_url + pdb_id.lower() + '.xml.gz'
try:
response = urlopen(url)
except URLError:
print('ERROR downloading SIFTS file with PDB ID: %s' % pdb_id)
raise
sifts_page = response.read(100000000) # Max 100MB
# Decompress string
sifts_page = gzip.GzipFile(fileobj=StringIO(sifts_page)).read()
# Removing all attribs from the entry tag, and the rdf tag and contents
sifts_page_processed = ''
skip_rdf_tag_flag = False
for line in sifts_page.splitlines():
if line[0:6] == '<entry':
sifts_page_processed += '<entry>' + '\n'
elif line[0:7] == ' <rdf:':
skip_rdf_tag_flag = True
pass
elif line[0:8] == ' </rdf:':
skip_rdf_tag_flag = False
pass
else:
if skip_rdf_tag_flag:
continue
sifts_page_processed += line + '\n'
return sifts_page_processed
def retrieve_pdb(pdb_id,compressed='no'):
"""Retrieves a PDB file, given a PDB ID. Works by modifying the PDB download URL.
"""
pdb_download_base_url='http://www.rcsb.org/pdb/files/'
url = pdb_download_base_url + pdb_id + '.pdb'
if compressed == 'yes':
url += '.gz'
response = urlopen(url)
pdb_file = response.read(10000000) # Max 10MB
return pdb_file
def extract_uniprot_acs_from_sifts_xml(siftsxml):
uniprot_crossrefs = siftsxml.findall('entity/segment/listResidue/residue/crossRefDb[@dbSource="UniProt"]')
uniprot_acs = list(set([uniprot_crossref.get('dbAccessionId') for uniprot_crossref in uniprot_crossrefs]))
return uniprot_acs
| gpl-2.0 | 5,171,825,560,777,637,000 | 35.921739 | 135 | 0.5935 | false | 3.562081 | false | false | false |
sdbondi/Arduino-Talk | Comet/python/ArduinoServer.py | 1 | 6552 | #!/usr/bin/python
import human_curl as requests
import serial
import platform
import sys
import getopt
import socket
import json
import time
_WINDOWS = (platform.system() == 'Windows')
_AJAXURL = 'http://localhost/arduino/comet-router.php?action=%(action)s'
#_AJAXURL = 'http://themousepotatowebsite.co.za/experiments/arduino/comet-router.php?action=%(action)s'
#_AUTH = ('stanb', 'arduino1')
_AUTH=None
_CHAROFFSET = 32
_CMDMAP = {
'ping' : chr(_CHAROFFSET + 0),
'pinMode' : chr(_CHAROFFSET + 1),
'digitalWrite': chr(_CHAROFFSET + 2),
'digitalRead' : chr(_CHAROFFSET + 3),
'analogWrite' : chr(_CHAROFFSET + 4),
'analogRead' : chr(_CHAROFFSET + 5),
'beep' : chr(_CHAROFFSET + 11)
}
class ArduinoCommandServer(object):
def __init__(self, sc, opts):
if not sc:
raise ValueError('Serial connection required')
self.serial = sc
self.options = opts or {}
def getIncomingCommands(self):
global _AJAXURL, _AUTH
opts = self.options
url = _AJAXURL % { 'action': 'get_web_data'}
while True:
while True:
try:
resp = requests.get(url, timeout=70, auth=_AUTH)
break;
except requests.exceptions.CurlError as ex:
print 'ERROR ', ex.message, ' Retrying...'
#except requests.exceptions.Timeout:
# print 'Get request timed out. Retrying...'
if resp.status_code != 200 or resp.content == False:
print 'ERROR: status_code %d or no content' % resp.status_code
continue
obj = json.loads(resp.content);
if obj == False:
print 'ERROR: content parse error'
print resp.content
continue
if obj['state'] != 'OK':
print 'ERROR: ', obj['message']
continue;
if obj['result'] == 'TMOUT':
continue
return obj['result']
def toArduinoCommand(self, command):
global _CMDMAP, _CHAROFFSET
if not command['command'] in _CMDMAP:
print 'Unrecognised command: ', command['command']
return False
op_chr = _CMDMAP[command['command']]
if 'pin' in command:
pin = str(command['pin'])
if pin[0] == 'A':
pin = 14 + int(pin[1])
pin = int(pin)
result = op_chr+chr(pin + _CHAROFFSET)
if 'mode' in command:
result += 'i' if command['mode'] == 'input' else 'o'
if 'args' in command and isinstance(command['args'], list):
command['args'] = [str(c) for c in command['args']]
result += '-'.join(command['args'])
return result+'\n'
def toWeb(self, ar_cmd):
op_chr = ar_cmd[0]
if op_chr == 'A':
return 'ACK'
if op_chr == 'R':
return int(ar_cmd[1:])
if op_chr == 'F':
return { 'error': ar_cmd[1:] }
return False
def processCommands(self, commands):
results = []
for command in commands:
cmd_str = self.toArduinoCommand(command)
if not cmd_str:
results.append(False)
continue
ar_reply = ''
i = 0
while len(ar_reply) == 0:
if i % 10 == 0:
self.serial.write(cmd_str)
time.sleep(0.1)
ar_reply = self.serial.readline()
i += 1
functionStr = command['command']+'('
if 'pin' in command:
functionStr += str(command['pin'])
if 'args' in command and isinstance(command['args'], list):
if 'pin' in command:
functionStr += ', '
functionStr += ', '.join(command['args'])
print functionStr + ') -> ' + ar_reply.strip()
results.append(self.toWeb(ar_reply))
return results
def sendResponse(self, batch_id, results):
global _AJAXURL, _AUTH
opts = self.options
url = _AJAXURL % { 'action': 'put_ar_data'}
data = { 'object' : json.dumps({ 'id': batch_id, 'object': results })}
while True:
try:
resp = requests.post(url, data, timeout=10, auth=_AUTH)
break;
except requests.exceptions.CurlError as ex:
print 'ERROR ', ex.message, ' Retrying...'
#except requests.exceptions.Timeout:
# print 'Send request timed out. Retrying...'
if resp.status_code != 200 or resp.content == False:
print 'ERROR: status_code %d or no content' % resp.status_code
return False
obj = json.loads(resp.content);
if obj == False:
print 'ERROR: content parse error'
print resp.content
return False
if obj['state'] != 'OK':
print 'ERROR: ', obj['message']
return False
if obj['result'] == 'TMOUT':
return False
if obj['result'] == 'PASS':
return True
print 'Got unknown result: ', obj
return False
def start(self):
opts = self.options
while True:
print 'Waiting for incoming commands...'
results = self.getIncomingCommands()
print '================================'
print 'Got command(s).'
for _object in results:
batch_id = _object['id']
commands = _object['object']
print 'Batch ID: %d. Processing...' % batch_id
results = self.processCommands(commands)
print 'Sending reply...'
self.sendResponse(batch_id, results)
print 'Done'
print '================================\n\n'
def get_opts(args):
global _WINDOWS
try:
opts, args = getopt.getopt(args, '', ['baud=', 'serialPort='])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
optsmap = {
'baud': 9600,
'serialPort': not _WINDOWS and '/dev/ttyACM0'
}
for o, a in opts:
if o == "--baud":
optsmap['baud'] = int(a)
elif o == "--serialPort":
optsmap['serialPort'] = a
else:
assert False, "unhandled option"
if optsmap['serialPort'] == False:
raise ValueError('Argument --serialPort= is mandatory')
return optsmap
def main(args):
opts = get_opts(args)
# Check for arduino serial port
try:
sc = serial.Serial(opts['serialPort'], opts['baud'], timeout=0)
except serial.SerialException, err:
print str(err)
print 'Please ensure your Arduino is connected and the port is correct.'
sys.exit(2)
if not sc.isOpen():
print 'Unable to open serial connection to Arduino.'
sys.exit(1)
print 'Connected to serial on', opts['serialPort']
try:
# Start relay server
while 1:
server = ArduinoCommandServer(sc, opts)
server.start()
finally:
if sc and sc.isOpen():
sc.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 1,833,642,196,712,729,600 | 24.297297 | 103 | 0.573107 | false | 3.6139 | false | false | false |
thorwhalen/ut | ml/sk/transformers.py | 1 | 4610 |
__author__ = 'thor'
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.neighbors import KNeighborsRegressor
from pandas import DataFrame
import numpy as np
from nltk import word_tokenize
from functools import reduce
class HourOfDayTransformer(TransformerMixin):
def __init__(self, date_field='datetime'):
self.date_field = date_field
def transform(self, X, **transform_params):
hours = DataFrame(X[self.date_field].apply(lambda x: x.hour))
return hours
def fit(self, X, y=None, **fit_params):
return self
class ModelTransformer(TransformerMixin):
"""
Sometimes transformers do need to be fitted.
ModelTransformer is used to wrap a scikit-learn model and make it behave like a transformer.
This is useful when you want to use something like a KMeans clustering model to generate features for another model.
It needs to be fitted in order to train the model it wraps.
"""
def __init__(self, model):
self.model = model
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
return self
def transform(self, X, **transform_params):
return DataFrame(self.model.predict(X))
class KVExtractor(TransformerMixin):
"""
Transform multiple key/value columns in a scikit-learn pipeline.
>>> import pandas as pd
>>> D = pd.DataFrame([ ['a', 1, 'b', 2], ['b', 2, 'c', 3]], columns = ['k1', 'v1', 'k2', 'v2'])
>>> kvpairs = [ ['k1', 'v1'], ['k2', 'v2'] ]
>>> KVExtractor( kvpairs ).transform(D)
[{'a': 1, 'b': 2}, {'c': 3, 'b': 2}]
"""
def __init__(self, kvpairs):
self.kpairs = kvpairs
def transform(self, X, *_):
result = []
for index, rowdata in X.iterrows():
rowdict = {}
for kvp in self.kpairs:
rowdict.update({rowdata[kvp[0]]: rowdata[kvp[1]]})
result.append(rowdict)
return result
def fit(self, *_):
return self
class ColumnSelectTransformer(BaseEstimator, TransformerMixin):
def __init__(self, keys):
self.keys = keys
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.keys]
class CategoryTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
D = []
for record in X.values:
D.append({k: 1 for k in record[0]})
return D
class AttributeTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def _flatten(self, d, parent_key='', sep='_'):
""" Flatten dictonary
"""
import collections
items = []
for k, v in list(d.items()):
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(list(self._flatten(v, new_key, sep=sep).items()))
else:
new_v = 1 if v == True else 0
items.append((new_key, new_v))
return dict(items)
def fit(self, X, y=None):
return self
def transform(self, X):
D = []
for record in X.values:
D.append(self._flatten(record[0]))
return D
class KNNImputer(TransformerMixin):
"""
Fill missing values using KNN Regressor
"""
def __init__(self, k):
self.k = k
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""
:param X: multidimensional numpy array like.
"""
rows, features = X.shape
mask = list([reduce(lambda h, t: h or t, x) for x in np.isnan(X)])
criteria_for_bad = np.where(mask)[0]
criteria_for_good = np.where(mask == np.zeros(len(mask)))[0]
X_bad = X[criteria_for_bad]
X_good = X[criteria_for_good]
knn = KNeighborsRegressor(n_neighbors=self.k)
for idx, x_bad in zip(criteria_for_bad.tolist(), X_bad):
missing = np.isnan(x_bad)
bad_dim = np.where(missing)[0]
good_dim = np.where(missing == False)[0]
for d in bad_dim:
x = X_good[:, good_dim]
y = X_good[:, d]
knn.fit(x, y)
X[idx, d] = knn.predict(x_bad[good_dim])
return X
class NLTKBOW(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return [{word: True for word in word_tokenize(document)}
for document in X] | mit | 1,656,264,518,869,154,300 | 25.964912 | 120 | 0.569848 | false | 3.682109 | false | false | false |
LucasFerreiraDaSilva/ScrapingINMET | geraBase.py | 1 | 3704 | """
Autor: Lucas Ferreira da Silva
Email: [email protected]
Descricao: Script para download dos dados referentes a cada estacao metereologica
e criacao de uma pequena "base de dados" em formato JSON referente a
todas as estacoes
Execucao (comando): python3 geraBase.py
Saida: Arquivo JSON (estacoes.json) contendo dados de todas as estacoes
metereologicas do INMET
"""
import requests
import json
import bs4
import re
# URL base para Scraping das estacoes
url_map = "http://www.inmet.gov.br/sonabra/maps/pg_mapa.php"
res = requests.get (url_map)
res.raise_for_status()
# Separacao das estacoes
list_markers = (res.text).split("//************* ESTACÃO ")
del list_markers[0]
# Inicializacao da lista de dados das estacoes para posterior tratamento
list_stations = []
# Scraping dos dados mais brutos de cada estacao
for i in list_markers:
st = (i.split("var imagem",maxsplit=1))[0].split("var ")
# Capturar id da estação
station_id = str((st[0].split(maxsplit=1))[0])
# Capturar label da estacao
station_label = re.search(r"(?<=')[^']+(?=')", str(st[-1])).group(0)
# Capturar html da estacao
station_html = str(st[2].split("html = ", maxsplit=1)[1])
# Criacao de dicionario auxiliar de dados de cada estacao
station_info = {}
station_info['id'] = station_id
station_info['label'] = station_label
station_info['html'] = station_html
list_stations.append(station_info)
# Inicializacao do dicionario de estacoes
stations = {}
# Scraping refinado dos dados de cada estacao
for x in list_stations:
soup = bs4.BeautifulSoup(x['html'], 'html.parser')
# Captura o link da tabela de dados
link = ""
for a in soup.find_all('a'):
l = a.get('href')
if (l.find("pg_dspDadosCodigo_sim.php?", 32) != -1):
link = l
break
aux = (x['html'].split("<b><b>", maxsplit=1))[1].split("<table ", maxsplit=1)
# Captura lista dos dados geograficos
localization = ((aux[1].split("</table>", maxsplit=1))[1].split("</font>", maxsplit=1)[0]).split("<br>")
# Captura demais dados da estacao
data_aux = ((aux[0].replace("<b>", "")).replace("</b>","")).split("<br>")
data = []
for d in data_aux:
if (d.find("<a ", 0, 4) == -1) and (d.find("</a>", 0, 4) == -1) and (len(d) > 0):
data.append(d)
# Criacao do objeto estacao para o JSON
station_data = {}
details = {}
details['estacao'] = data[0].split(": ")[1]
details['codigo_omm'] = data[1].split(": ")[1]
if (len(data) > 2):
details['registro'] = data[2].split(": ")[1]
details['temp_max'] = (data[3].split(": ")[1]).replace("º","")
details['temp_min'] = (data[4].split(": ")[1]).replace("º","")
details['umidade'] = data[5].split(": ")[1]
details['pressao'] = data[6].split(": ")[1]
details['precipitacao'] = data[7].split(": ")[1]
details['vento_dir'] = (data[8].split(": ")[1]).replace("º","graus")
details['vento_vel'] = data[9].split(": ")[1]
station_data['label'] = x['label']
station_data['url'] = link
station_data['latitude'] = (localization[1].split(": ")[1]).replace("º","")
station_data['longitude'] = (localization[2].split(": ")[1]).replace("º","")
station_data['altitude'] = localization[3].split(": ")[1]
station_data['abertura'] = localization[0].split(": ")[1]
station_data['detalhes'] = details
stations[str(x['id'])] = station_data
# Escrita dos dados em arquivo JSON
with open('estacoes.json', 'w') as fp:
json.dump(stations, fp, indent=4, ensure_ascii=False, sort_keys=True)
print("Database successfully generated!")
| mit | 8,083,539,564,384,112,000 | 31.707965 | 108 | 0.606061 | false | 2.865116 | false | false | false |
BlackHole/enigma2-1 | lib/python/Components/Converter/TemplatedMultiContent.py | 2 | 2918 | from Components.Converter.StringList import StringList
class TemplatedMultiContent(StringList):
"""Turns a python tuple list into a multi-content list which can be used in a listbox renderer."""
def __init__(self, args):
StringList.__init__(self, args)
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_HALIGN_RIGHT, RT_VALIGN_TOP, RT_VALIGN_CENTER, RT_VALIGN_BOTTOM, RT_WRAP, BT_SCALE
from skin import parseFont
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmap, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend, MultiContentTemplateColor, MultiContentEntryProgress
l = locals()
del l["self"] # cleanup locals a bit
del l["args"]
self.active_style = None
self.template = eval(args, {}, l)
assert "fonts" in self.template
assert "itemHeight" in self.template
assert "template" in self.template or "templates" in self.template
assert "template" in self.template or "default" in self.template["templates"] # we need to have a default template
if not "template" in self.template: # default template can be ["template"] or ["templates"]["default"]
self.template["template"] = self.template["templates"]["default"][1]
self.template["itemHeight"] = self.template["template"][0]
def changed(self, what):
if not self.content:
from enigma import eListboxPythonMultiContent
self.content = eListboxPythonMultiContent()
# also setup fonts (also given by source)
index = 0
for f in self.template["fonts"]:
self.content.setFont(index, f)
index += 1
# if only template changed, don't reload list
if what[0] == self.CHANGED_SPECIFIC and what[1] == "style":
pass
elif self.source:
self.content.setList(self.source.list)
self.setTemplate()
self.downstream_elements.changed(what)
def setTemplate(self):
if self.source:
style = self.source.style
if style == self.active_style:
return
# if skin defined "templates", that means that it defines multiple styles in a dict. template should still be a default
templates = self.template.get("templates")
template = self.template.get("template")
itemheight = self.template["itemHeight"]
selectionEnabled = self.template.get("selectionEnabled", True)
scrollbarMode = self.template.get("scrollbarMode", "showOnDemand")
if templates and style and style in templates: # if we have a custom style defined in the source, and different templates in the skin, look it up
template = templates[style][1]
itemheight = templates[style][0]
if len(templates[style]) > 2:
selectionEnabled = templates[style][2]
if len(templates[style]) > 3:
scrollbarMode = templates[style][3]
self.content.setTemplate(template)
self.content.setItemHeight(itemheight)
self.selectionEnabled = selectionEnabled
self.scrollbarMode = scrollbarMode
self.active_style = style
| gpl-2.0 | 1,706,497,243,091,871,200 | 40.098592 | 207 | 0.735778 | false | 3.580368 | false | false | false |
Grumblesaur/quickgen | quickgen.py | 1 | 3966 | #!/usr/local/bin/python -tt
# -*- coding: utf-8 -*-
import os, sys, random
#supply input as raw_input if running Python 3 or higher
if sys.version_info >= (3,0):
raw_input = input
def parse(structure, part, phonemes):
#grab a random phoneme from the relevant category and return it
#structure can be O, N, or C, passed as 0, 1, or 2, respectively
#initialize the segment string as empty
seg = ""
#focus in on relevant O, N, or C possibilities
pattern = part[structure]
#ensure that values fall within the bounds of list
listrange = len(pattern)
#pick an O, N, or C to construct
index = random.randrange(0, listrange)
onc = pattern[index] #obtain an onset, nucleus, or coda pattern
if "," in onc:
onc = onc.split(",") #if it is a cluster, split on commas
#this creates a list of indices to be accessed
#loop to construct O, N, or C
for i in range(0, len(onc)):
pclass = int(onc[i]) #obtain an index for a class of phoneme
phone = random.randrange(0, len(phonemes[pclass]))
#obtain an index for a specific phone
seg += phonemes[pclass][phone] #add phone to segment
return seg #return the segment to the main script
#end parse function definition
#ask for name of input file (default = "input.txt")
inn = raw_input("What is the name of your input file? (Leave blank for 'input.txt') ")
if inn == "":
inn = "input.txt"
#ask for name of output file (default = "output.txt")
out = raw_input("What is the name of your output file? (Leave blank for 'output.txt') ")
if out == "":
out = "output.txt"
seed = raw_input("Insert seed for RNG (leave blank for system time) ")
if seed == "":
seed = None
else:
seed = int(seed)
#use system time for seed
random.seed(seed)
#prepare lists
consonants = []
vowels = []
parts = []
structures = []
#prepare the output file
fout = open(out, 'w')
#extract from input file
with open(inn) as fin:
#get consonants
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
consonants.append(list)
#get vowels
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
vowels.append(list)
#get parts
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
parts.append(list)
#get structures
for line in fin:
if line.strip() == "":
continue
list = line.split()
if list[0][0] == '#':
break
elif list[0][0] != '/':
structures.append(list)
#un-nest the syllable patterns
structures = structures[0]
#ask for number of words (default = 100)
i = raw_input("How many words would you like to build? (Leave blank for 50) ")
if i == "":
i = 50
else:
i = int(i)
low = raw_input("Enter minimum number of syllables per word (Defaults to 1) ")
if low == "":
low = 1
else:
low = int(low)
high = raw_input("Enter maximum number of syllables per word (Defaults to 5) ")
if high == "":
high = 5
else:
high = int(high)
while i > 0:
#working word variable
word = ""
#create word in this loop
for j in range(0, int(random.triangular(low, high + 1, low + 1))):
#working syllable variable
syll = ""
#choose a random syllable pattern to follow
form = structures[random.randrange(0, len(structures))]
for k in range(0, len(form)):
if form[k] == "O":
#retrieve a string that is a valid onset
syll += parse(0, parts, consonants)
elif form[k] == "C":
#retrieve a string that is a valid coda
syll += parse(2, parts, consonants)
elif form[k] == "N":
#retrieve a string that is a valid nucleus
syll += parse(1, parts, vowels)
#add new syllable to the word
word += syll
#print out the word followed by a newline
fout.write(word)
fout.write('\n')
#decrement loop iterator
i -= 1
#close files
fin.close()
fout.close()
sys.stdout.write("Program finished. \n")
#end program
| gpl-2.0 | -7,959,536,635,195,563,000 | 21.793103 | 88 | 0.648512 | false | 2.926937 | false | false | false |
irblsensitivity/irblsensitivity | scripts/features/MethodFeatures.py | 1 | 6286 | #-*- coding: utf-8 -*-
'''
Created on 2016. 11. 19
Updated on 2016. 01. 09
'''
from __future__ import print_function
import os
import re
from utils import PrettyStringBuilder
from utils import Progress
import javalang
class Resource(object):
Stopwords = None
EngDictionary = None
@staticmethod
def init():
if Resource.Stopwords is None:
Resource.Stopwords = Resource.load_base(u'stopwords')
if Resource.EngDictionary is None:
Resource.EngDictionary = Resource.load_base(u'en.dict')
@staticmethod
def load_base(_filename):
listDic = {}
f = open(_filename, 'r')
while True:
word = f.readline()
if word is None or len(word)==0: break
if len(word) <= 2: continue
word = word[:-2]
listDic[word] = 1
return listDic
class MethodWorker(object):
__name__ = u'MethodWithComments'
basepath = u'/var/experiments/BugLocalization/dist/features/'
def run(self, _group, _project, _versionName, _srcBase):
print(u'preparing resources...', end=u'')
Resource.init()
print(u'Done')
workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods')
filename = os.path.join(workingPath, u'%s.txt' % _versionName)
if os.path.exists(workingPath) is False: os.makedirs(workingPath)
if os.path.exists(filename) is True: return
methods={}
files = self.listing_files(_srcBase)
progress = Progress(u'Calculating method', 2, 10, True)
progress.set_upperbound(len(files))
progress.start()
for fname in files:
text = open(fname, 'r').read()
key = fname[len(_srcBase) + 1:]
names = []
try:
ADT = javalang.parse.parse(text)
cntConstructors, cntConstComments, cntConstInDic = self.count(ADT, javalang.tree.ConstructorDeclaration)
cntMethods, cntComments, cntMethodInDic = self.count(ADT, javalang.tree.MethodDeclaration)
methods[key] = {'methods':cntMethods+ cntConstructors,
'withComments':cntComments + cntConstComments,
'InDicMethods':cntMethodInDic + cntConstInDic}
except javalang.parser.JavaSyntaxError as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0, 'error':'SyntaxError'}
except javalang.tokenizer.LexerError as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'LexerError'}
except Exception as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'Exception'}
progress.check()
progress.done()
self.storeData(filename, methods)
pass
def listing_files(self, _path):
results = []
for root, dirs, files in os.walk(_path):
for fname in files:
if fname.endswith('.java') is False:continue
results.append(os.path.join(root, fname))
return results
def count(self, _ADT, _filter):
cntMethods = 0
cntComments = 0
names = set([])
methodDecls = _ADT.filter(_filter)
for path, node in methodDecls:
cntMethods += 1
names.add(node.name)
if node.documentation is None or len(node.documentation) == 0: continue
doc = javalang.javadoc.parse(node.documentation)
if doc.description is None or len(doc.description) == 0: continue
cntComments += 1
cntInDic = 0
for name in names:
tokens = self.splitCamel(name)
tokens = self.removingStopwords(tokens)
if self.checkingEngDic(tokens) > 0:
cntInDic += 1
return cntMethods, cntComments, cntInDic #, list(names)
def splitCamel(self, token):
corpus = []
token = re.sub(r'([A-Z]+)(in|to|for|at|with|on|off|over)([A-Z]+\w+)', r'\1 \2 \3', token) # Lower case between Upper Cases (ex. XMLtoTEXT)
token = re.sub(r'([a-z0-9])([A-Z]\w+)', r'\1 \2', token) # UpperCase after LowerCase
items = token.split(' ')
for item in items:
item = item.strip()
if item == '': continue
if re.sub(r'[A-Z]+', '', item) != '':
item = re.sub(r'([A-Z]+)([A-Z]+\w+)', r'\1 \2', item) # ALLFiles ==> ALL Files
items2 = item.split(' ')
for item2 in items2:
if item.strip() == '': continue
corpus.append(item2)
else:
corpus.append(item)
return corpus
def removingStopwords(self, _tokens):
newer = set([])
for token in _tokens:
if len(token) <= 2: continue
if token.lower() in Resource.Stopwords: continue
newer.add(token)
return list(newer)
def checkingEngDic(self, _tokens):
count = 0
for token in _tokens:
if token in Resource.EngDictionary:
count += 1
continue
if token.lower() in Resource.EngDictionary:
count += 1
continue
nword = token[0].upper() + token[1:].lower()
if nword in Resource.EngDictionary:
count += 1
return count
#####################################
# managing cache
#####################################
def storeData(self, _filename, _data):
pretty = PrettyStringBuilder(_indent_depth=1)
text = pretty.toString(_data)
f = open(_filename, 'w')
f.write(text)
f.close()
def clear(self, _group, _project):
workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods')
try:
shutil.rmtree(workingPath)
print(u'Removed : %s' % workingPath)
except Exception as e:
print(u'No Path : %s' % workingPath)
###############################################################################################################
###############################################################################################################
###############################################################################################################
import shutil
from commons import Subjects
def clear():
S = Subjects()
for group in S.groups: # ['JBoss']: #
for project in S.projects[group]:
obj = MethodWorker()
obj.clear(group, project)
def work():
S = Subjects()
for group in ['JBoss', 'Wildfly']:#S.groups: # ['JBoss']: #
for project in S.projects[group]:
for versionName in S.bugs[project].keys():
if versionName == 'all' : continue
print(u'MethodWithComments for %s / %s / %s' % (group, project, versionName))
obj = MethodWorker()
obj.run(group, project, versionName, S.getPath_source(group, project, versionName))
if __name__ == "__main__":
#clear()
work()
pass | apache-2.0 | 4,348,889,472,794,515,500 | 30.081633 | 141 | 0.600223 | false | 3.216991 | false | false | false |
aplicatii-romanesti/allinclusive-kodi-pi | .kodi/addons/plugin.video.kidsplace/brightcovePlayer.py | 1 | 1587 | import httplib
from pyamf import AMF0, AMF3
from pyamf import remoting
from pyamf.remoting.client import RemotingService
height = 1080
def build_amf_request(const, playerID, videoPlayer, publisherID):
env = remoting.Envelope(amfVersion=3)
env.bodies.append(
(
"/1",
remoting.Request(
target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById",
body=[const, playerID, videoPlayer, publisherID],
envelope=env
)
)
)
return env
def get_clip_info(const, playerID, videoPlayer, publisherID, playerKey):
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = build_amf_request(const, playerID, videoPlayer, publisherID)
conn.request("POST", "/services/messagebroker/amf?playerKey=" + playerKey, str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
return response
def play(const, playerID, videoPlayer, publisherID, playerKey):
rtmpdata = get_clip_info(const, playerID, videoPlayer, publisherID, playerKey)
streamName = ""
streamUrl = rtmpdata['FLVFullLengthURL'];
for item in sorted(rtmpdata['renditions'], key=lambda item:item['frameHeight'], reverse=False):
streamHeight = item['frameHeight']
if streamHeight <= height:
streamUrl = item['defaultURL']
streamName = streamName + rtmpdata['displayName']
return [streamName, streamUrl];
| apache-2.0 | -1,561,841,478,157,068,000 | 35.068182 | 156 | 0.669187 | false | 3.742925 | false | false | false |
iamantony/PythonNotes | src/objects/matrix.py | 1 | 5188 | __author__ = 'Antony Cherepanov'
from exceptions import Exception
class MatrixException(Exception):
pass
class Matrix(object):
def __init__(self, t_rowNum=0, t_colNum=0, t_values=None):
if not self.__checkDimensionType(t_rowNum) or\
not self.__checkDimensionType(t_colNum):
raise MatrixException("Invalid number of matrix size")
self.__rows = max(t_rowNum, 0)
self.__cols = max(t_colNum, 0)
numOfElements = self.__rows * self.__cols
if t_values is None or \
not isinstance(t_values, list) or \
len(t_values) != numOfElements:
self.__matrix = [0 for i in range(numOfElements)]
else:
self.__matrix = t_values
def __checkDimensionType(self, t_dim):
if isinstance(t_dim, int):
return True
return False
def __str__(self):
return "Matrix of " + str(self.__rows) + " rows and " +\
str(self.__cols) + " cols: " + str(self.__matrix)
def __add__(self, other):
if not isinstance(other, Matrix) or \
(self.__rows != other.rows() and self.__cols != other.cols()):
raise MatrixException("Failed to add matrix")
sumData = list()
for i in range(self.__rows):
for j in range(self.__cols):
value = self.GetValue(i, j) + other.GetValue(i, j)
sumData.append(value)
result = Matrix(self.__rows, self.__cols, sumData)
return result
def __sub__(self, other):
if not isinstance(other, Matrix) or \
(self.__rows != other.rows() and self.__cols != other.cols()):
raise MatrixException("Failed to subtract matrix")
subData = list()
for i in range(self.__rows):
for j in range(self.__cols):
value = self.GetValue(i, j) - other.GetValue(i, j)
subData.append(value)
result = Matrix(self.__rows, self.__cols, subData)
return result
def __mul__(self, other):
if not isinstance(other, Matrix) or \
self.__cols != other.rows():
raise MatrixException("Failed to multiply matrix")
mulData = list()
# Iterate by elements of result matrix
for i in range(self.__rows):
for j in range(other.cols()):
sumValue = 0
for iter in range(self.__cols):
sumValue += self.GetValue(i, iter) * other.GetValue(iter, j)
mulData.append(sumValue)
result = Matrix(self.__rows, other.cols(), mulData)
return result
def rows(self):
return self.__rows
def cols(self):
return self.__cols
def IsSquare(self):
if self.__cols == self.__rows:
return True
return False
def __getIndex(self, t_row, t_col):
if not self.__checkDimensionType(t_row) or\
not self.__checkDimensionType(t_col):
raise MatrixException("Invalid coordinates type")
index = self.__cols * t_row + t_col
if index < 0 or len(self.__matrix) <= index:
return None
return index
def GetValue(self, t_row, t_col):
index = self.__getIndex(t_row, t_col)
if index is None:
raise MatrixException("Invalid index")
return self.__matrix[index]
def SetValue(self, t_row, t_col, t_value):
index = self.__getIndex(t_row, t_col)
if index is None:
raise MatrixException("Invalid index")
self.__matrix[index] = t_value
def GetSlice(self, t_topLeft, t_bottomRight):
# TODO: Definitely there could be a better approach
if 2 != len(t_topLeft) or 2 != len(t_bottomRight):
raise MatrixException("Invalid slice coordinates")
data = list()
startI = t_topLeft[0]
endI = t_bottomRight[0] + 1
startJ = t_topLeft[1]
endJ = t_bottomRight[1] + 1
for i in range(startI, endI):
for j in range(startJ, endJ):
value = self.GetValue(i, j)
data.append(value)
result = Matrix(endI - startI, endJ - startJ, data)
return result
def SetSlice(self, t_topLeft, t_bottomRight, t_slice):
if 2 != len(t_topLeft) or 2 != len(t_bottomRight) or \
not isinstance(t_slice, Matrix):
raise MatrixException("Invalid slice coordinates or slice matrix")
startI = t_topLeft[0]
endI = t_bottomRight[0] + 1
startJ = t_topLeft[1]
endJ = t_bottomRight[1] + 1
if (endI - startI) != t_slice.cols() or\
(endJ - startJ) != t_slice.rows():
return False
for i, slI in zip(range(startI, endI), range(t_slice.rows())):
for j, slJ in zip(range(startJ, endJ), range(t_slice.cols())):
value = t_slice.GetValue(slI, slJ)
self.SetValue(i, j, value)
return True | mit | -3,138,855,623,441,978,400 | 31.701299 | 80 | 0.528142 | false | 3.963331 | false | false | false |
jhermann/kunstkopf | src/kunstkopf/__init__.py | 1 | 1258 | # -*- coding: utf-8 -*-
# pylint: disable=bad-whitespace
"""
kunstkopf [ˈkʊnstkɔp͜f] is a set of tools that handle audio (meta-)data and control hi-fi gear.
Copyright © 2015 Jürgen Hermann <[email protected]>
Licensed under the GNU General Public License, Version 3.0
"""
# Copyright © 2015 Jürgen Hermann <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see {http://www.gnu.org/licenses/}.
__url__ = "https://github.com/jhermann/kunstkopf"
__version__ = "0.1.0"
__license__ = "GPLv3"
__author__ = "Jürgen Hermann"
__author_email__ = "[email protected]"
__keywords__ = "python audio tool tagging indexing searching syncing"
__all__ = []
| gpl-3.0 | -7,133,038,733,929,701,000 | 38.03125 | 99 | 0.68775 | false | 3.384824 | false | false | false |
HBNLdev/DataStore | db/sas_tools.py | 1 | 2566 | ''' tools for working with .sas7bdat files '''
import os
from collections import OrderedDict
import pandas as pd
from sas7bdat import SAS7BDAT
from .knowledge.questionnaires import map_ph4, map_ph4_ssaga
map_subject = {'core': {'file_pfixes': []}}
parent_dir = '/processed_data/zork/zork-phase4-69/session/'
n_header_lines = 30
def extract_descriptions(path):
''' given path to .sas7bdat file, returns dictionary mapping column labels
to their verbose descriptions in the SAS header.
dictionary will only contain an entry if there was new information present
(if there was a description, and it was different from the label) '''
f = SAS7BDAT(path)
kmap = OrderedDict()
for line in str(f.header).splitlines()[n_header_lines + 1:]:
line_parts = line.split(maxsplit=4)
label = line_parts[1]
try:
description = line_parts[4].rstrip()
if description == label or description[0] == '$':
continue
else:
kmap[label] = description
except IndexError:
pass
return kmap
def exemplary_files(kdict):
''' given a questionnaire knowledge map,
return a new dictionary mapping questionnaire names to the filepath
of an exemplary .sas7bdat file for each file prefix '''
exemplars = {}
for test, tdict in kdict.items():
for fpx in tdict['file_pfixes']:
fd = parent_dir + test
fn = fpx + '.sas7bdat'
fp = os.path.join(fd, fn)
if os.path.exists(fp):
exemplars[test] = fp
else:
print(fp, 'did not exist')
return exemplars
def build_labelmaps():
''' return a dict in which keys are questionnaires names and values are
dictionaries mapping column labels to descriptions '''
comb_dict = map_ph4.copy()
comb_dict.update(map_ph4_ssaga)
exemplars = exemplary_files(comb_dict)
big_kmap = {}
for test, fp in exemplars.items():
kmap = extract_descriptions(fp)
big_kmap[test] = kmap
return big_kmap
def df_fromsas(fullpath, id_lbl='ind_id'):
''' convert .sas7bdat to dataframe.
unused because fails on incorrectly formatted files. '''
# read csv in as dataframe
df = pd.read_sas(fullpath, format='sas7bdat')
# convert id to str and save as new column
df[id_lbl] = df[id_lbl].apply(int).apply(str)
df['ID'] = df[id_lbl]
return df
| gpl-3.0 | -7,685,914,298,742,378,000 | 30.481013 | 82 | 0.606002 | false | 3.767988 | false | false | false |
OpenTreeOfLife/gcmdr | run_synth_studies_mono.py | 1 | 1437 | import load_synth_extract
from plants import studytreelist as plantslist
from metazoa import studytreelist as metalist
from fungi import studytreelist as fungilist
from microbes import studytreelist as microbelist
studytreelist = []
studytreelist.extend(metalist)
studytreelist.extend(fungilist)
studytreelist.extend(microbelist)
studytreelist.extend(plantslist)
if __name__ == "__main__":
from wopr_conf_TEMP import *
synthottolid="93302" # cellular organisms
# studytreelist = ["420_522"]
# studytreelist = ["2460_5285"] # Pyron Squamata study
# studytreelist = ["2573_5959"] # Sauria
# studytreelist = ["2573_5959"]
# from metazoa import studytreelist as metalist
# studytreelist = []
# studytreelist.extend(metalist)
# studytreelist = [
# "1634_3303", # Chiroptera. Agnarsson et al. 2011. PLoS Currents Tree of Life
# ]
print "loading synthottolid:",synthottolid
print "loading studytreelist:",studytreelist
for i in studytreelist:
tstudy_list = [i]
generallogfileloc = "synth_studies_submission/"+i+".log"
ttfntreefn = "synth_studies_submission/"+i+".tre"
infmonofn = "synth_studies_submission/"+i+".inf_mono"
load_synth_extract.run_load_single_ttfn_inf_mono(dott,dload,studyloc,tstudy_list,javapre,
treemloc,generallogfileloc,dsynth,synthottolid,treefn,ttfntreefn,infmonofn)
| bsd-2-clause | -8,043,959,451,811,851,000 | 33.214286 | 123 | 0.695198 | false | 3.057447 | false | false | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/additions/porn/adultbay.py | 1 | 12372 | # -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2015
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
class adultbayGenreScreen(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.genreData)
def genreData(self):
self.filmliste.append(("--- Search ---", None))
self.filmliste.append(("Newest (Clips)", "http://adultbay.org/category/clips/"))
self.filmliste.append(("Newest (Movies)", "http://adultbay.org/category/movies/"))
self.filmliste.append(("Clips", None))
self.filmliste.append(("Movies", None))
self.filmliste.append(("HDTV", None))
self.filmliste.append(("DVD-R", "http://adultbay.org/category/dvd-r/"))
self.filmliste.append(("Hentai", "http://adultbay.org/category/hentai/"))
self.ml.setList(map(self._defaultlistcenter, self.filmliste))
self.keyLocked = False
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '+')
Link = self.suchString
Name = "--- Search ---"
self.session.open(adultbayListScreen, Link, Name)
def keyOK(self):
if self.keyLocked:
return
if not config.mediaportal.premiumize_use.value:
message = self.session.open(MessageBoxExt, _("The Adult Bay only works with enabled MP premiumize.me option (MP Setup)!"), MessageBoxExt.TYPE_INFO, timeout=10)
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
if Name == "--- Search ---":
self.suchen()
elif Link != None:
self.session.open(adultbayListScreen, Link, Name)
else:
self.session.open(adultbaySubGenreScreen, Name)
class adultbaySubGenreScreen(MPScreen):
def __init__(self, session, Name):
self.Name = Name
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
url = "http://adultbay.org/"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
parse = re.search('class="cat-item.*?>'+self.Name+'</a>(.*?)</ul>', data, re.S)
raw = re.findall('<li\sclass="cat-item.*?a\shref="(.*?)".*?>(.*?)</a>', parse.group(1), re.S)
if raw:
self.filmliste = []
for (Url, Title) in raw:
self.filmliste.append((decodeHtml(Title), Url))
self.filmliste.sort()
self.ml.setList(map(self._defaultlistcenter, self.filmliste))
self.keyLocked = False
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(adultbayListScreen, Link, Name)
class adultbayListScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.page = 1
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self.filmliste = []
if re.match(".*?Search", self.Name):
url = "http://adultbay.org/search/%s/page/%s/" % (self.Link, str(self.page))
else:
if self.page == 1:
url = self.Link
else:
url = self.Link + "page/" + str(self.page) + "/"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
if re.match('.*?<h2>Not Found</h2>', data, re.S):
self.filmliste.append((_('No movies found!'), None, None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
elif re.match('.*?<h2>Sorry: No Results</h2>', data, re.S):
self.filmliste.append((_('No movies found!'), None, None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
elif re.match('.*?Search is temporarily disabled', data, re.S):
self.filmliste.append(("Search is temporarily disabled...", None, None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
else:
parse = re.search('class="wp-pagenavi">(.*?)</div>', data, re.S)
if parse:
lastpage = re.findall('\d{0,1},{0,1}\d+', parse.group(1), re.S)
lastpage = [x.replace(',', '') for x in lastpage]
lastpage = [int(x) for x in lastpage]
lastpage.sort(key=int)
self.lastpage = int(lastpage[-1])
self['page'].setText("%s / %s" % (str(self.page), str(self.lastpage)))
else:
parse = re.search('class="navigation">.*?/page/(.*?)/.*?Older Entries', data, re.S)
if parse:
self.lastpage = int(parse.group(1))
else:
self.lastpage = 1
self['page'].setText("%s / %s" % (str(self.page), str(self.lastpage)))
raw = re.findall('class="post".*?<a\shref="(.*?)".*?img\ssrc="(.*?)".*?(<strong>|<p>)(.*?)(</strong>|<br\s/>|</p>).*?<p>(.*?)(Read\smore|\(more...\))', data, re.S)
if raw:
for (link, image, trash, title, trash, desc, trash) in raw:
title = stripAllTags(title).strip()
desc = stripAllTags(desc).strip()
self.filmliste.append((decodeHtml(title), link, image, desc))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
self['name'].setText(title)
desc = self['liste'].getCurrent()[0][3]
self['handlung'].setText(desc)
coverUrl = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(coverUrl)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][0]
if Link == None:
return
Title = self['liste'].getCurrent()[0][1]
Cover = self['liste'].getCurrent()[0][2]
self.session.open(StreamAuswahl, Link, Title, Cover)
class StreamAuswahl(MPScreen):
def __init__(self, session, Title, Link, Cover):
self.Link = Link
self.Title = Title
self.Cover = Cover
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel": self.keyCancel
}, -1)
self['title'] = Label("The Adult Bay")
self['ContentTitle'] = Label("%s" %self.Title)
self.filmliste = []
self.keyLocked = True
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
CoverHelper(self['coverArt']).getCover(self.Cover)
self.keyLocked = True
url = self.Link
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadPageData).addErrback(self.dataError)
def loadPageData(self, data):
parse = re.search('class="post_header">(.*?)Recommends:</h2>', data, re.S)
streams = re.findall('(http://(?!adultbay.org)(.*?)\/.*?)[\'|"|\&|<]', parse.group(1), re.S)
if streams:
for (stream, hostername) in streams:
if isSupportedHoster(hostername, True):
hostername = hostername.replace('www.','')
self.filmliste.append((hostername, stream))
# remove duplicates
self.filmliste = list(set(self.filmliste))
if len(self.filmliste) == 0:
self.filmliste.append((_('No supported streams found!'), None))
self.ml.setList(map(self._defaultlisthoster, self.filmliste))
self.keyLocked = False
def keyOK(self):
if self.keyLocked:
return
url = self['liste'].getCurrent()[0][1]
if url == None:
return
get_stream_link(self.session).check_link(url, self.got_link)
def got_link(self, stream_url):
if stream_url == None:
message = self.session.open(MessageBoxExt, _("Stream not found, try another Stream Hoster."), MessageBoxExt.TYPE_INFO, timeout=3)
else:
title = self.Title
self.session.open(SimplePlayer, [(self.Title, stream_url, self.Cover)], showPlaylist=False, ltype='adultbay', cover=True) | gpl-2.0 | 2,939,069,052,170,796,000 | 36.374622 | 166 | 0.66637 | false | 2.935453 | false | false | false |
kll334477/NewsScrapy | thepaper/thepaper/spiders/wshang_spider.py | 1 | 5240 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'yinzishao'
"""
手机版没有cookie,更方便
但是pc版的首页是所有分类混在一起的
手机版则是新闻在各个分类,所以爬取的时候需要爬各个分类。
"""
import re
import scrapy
from bs4 import BeautifulSoup
import logging
from thepaper.items import NewsItem
import json
logger = logging.getLogger("WshangSpider")
from thepaper.settings import *
from thepaper.util import judge_news_crawl
#TODO:
class NbdSpider(scrapy.spiders.Spider):
domain = "http://m.iwshang.com/"
name = "wshang"
# allowed_domains = ["i.wshang.com",]
flag = {}
start_urls = [
"http://m.iwshang.com/",
]
#pc端新闻页面url
pc_news_url = "http://i.wshang.com/Post/Default/Index/pid/%s.html"
def parse(self, response):
"""
:param response:
:return:抛出每个类别的post请求
post参数:
inslider
page
pagesize
Content-Type:application/x-www-form-urlencoded
"""
soup = BeautifulSoup(response.body)
menu = soup.find_all("a",class_="ui-more") #所有的类别的链接
if menu:
for topic in menu:
topic_name = topic.text.replace(u"查看","")
topic_url = topic.get("href")
self.flag.setdefault(topic_url,0)
page="1"
#post_data需要字符串
post_data = {
"inslider":"0",
"page":page,
"pagesize":"10"
}
# yield scrapy.Request(topic_url,
# callback=self.parse_topic,
# method="POST",
# headers={"Content-Type":"application/x-www-form-urlencoded"},
# body=json.dumps(post_data)
# )
yield scrapy.FormRequest(
url=topic_url,
formdata=post_data,
callback=self.parse_topic,
meta={"page":page,"topic_name":topic_name}
)
def parse_topic(self,response):
topic_url = response.url
# print topic_url
body = json.loads(response.body)
news_list = body["data"]
page = response.meta.get("page","1")
topic_name = response.meta.get("topic_name",None)
#http://m.iwshang.com/category/20 没有新闻
if not news_list:
self.flag[topic_url]=page
for news in news_list:
news_date_timestamp = news.get("published",None)
struct_date = datetime.datetime.fromtimestamp(int(news_date_timestamp))
news_date = struct_date.strftime("%Y-%m-%d %H:%M:%S")
title = news.get("title",None)
news_no = news.get("contentid",None)
abstract = news.get("description",None)
pic = news.get("thumb",None)
news_url = news.get("url",None) #手机端新闻页面链接
referenceid = news.get("referenceid",None) #pc端的id,手机端的id跟pc端的id不一样
pc_news_url = self.pc_news_url % referenceid #pc端新闻页面链接
item = NewsItem(
news_date=news_date,
title=title,
news_no=news_no,
abstract=abstract,
pic=pic,
news_url=pc_news_url,
topic=topic_name
)
item = judge_news_crawl(item)
if item:
# yield item
yield scrapy.Request(pc_news_url,callback=self.parse_news,meta={"item":item})
else:
self.flag[topic_url]=page
if not self.flag[topic_url]:
page = str(int(page)+1)
post_data = {
"inslider":"0",
"page":page,
"pagesize":"10"
}
yield scrapy.FormRequest(
url=topic_url,
formdata=post_data,
callback=self.parse_topic,
meta={"page":page}
)
def parse_news(self,response):
item = response.meta.get("item",NewsItem())
soup = BeautifulSoup(response.body)
#手机
# content = soup.find("div",id="content-show").get_text(strip=True) if soup.find("div",id="content-show") else None
#pc
content = soup.find("div",class_="article-cont").get_text(strip=True) if soup.find("div",class_="article-cont") else None
article_head = soup.find("div",class_="article-head")
author=None
if article_head:
author = article_head.p.text.split(u"/")[1]
article_tag_list = soup.find("div",class_="article-tag")("a") if soup.find("div",class_="article-tag") else []
tags = [tag.text for tag in article_tag_list]
item["tags"] = tags
item["author"] = author
item["content"] = content
item["crawl_date"] = NOW
yield item
| lgpl-3.0 | 526,355,286,195,562,100 | 34.7 | 129 | 0.503601 | false | 3.532155 | false | false | false |
archesproject/arches | arches/management/commands/card_component.py | 1 | 3937 | """
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import uuid
from arches.management.commands import utils
from arches.app.models import models
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import IntegrityError
class Command(BaseCommand):
"""
Commands for managing Arches functions
"""
def add_arguments(self, parser):
parser.add_argument("operation", nargs="?")
parser.add_argument("-s", "--source", action="store", dest="source", default="", help="Widget json file to be loaded")
parser.add_argument("-n", "--name", action="store", dest="name", default="", help="The name of the widget to unregister")
def handle(self, *args, **options):
if options["operation"] == "register":
self.register(source=options["source"])
if options["operation"] == "unregister":
self.unregister(name=options["name"])
if options["operation"] == "list":
self.list()
if options["operation"] == "update":
self.update(source=options["source"])
def register(self, source):
"""
Inserts a card component into the arches db
"""
import json
details = {}
with open(source) as f:
details = json.load(f)
try:
uuid.UUID(details["componentid"])
except:
details["componentid"] = str(uuid.uuid4())
print("Registering card component with componentid: {}".format(details["componentid"]))
instance = models.CardComponent(
componentid=details["componentid"],
name=details["name"],
description=details["description"],
component=details["component"],
componentname=details["componentname"],
defaultconfig=details["defaultconfig"],
)
instance.save()
def update(self, source):
"""
Updates an existing card component in the arches db
"""
import json
details = {}
with open(source) as f:
details = json.load(f)
instance = models.CardComponent.objects.get(name=details["name"])
instance.description = details["description"]
instance.component = details["component"]
instance.componentname = details["componentname"]
instance.defaultconfig = details["defaultconfig"]
instance.save()
def unregister(self, name):
"""
Removes a function from the system
"""
try:
instances = models.CardComponent.objects.filter(name=name)
if str(instances[0].componentid) != "f05e4d3a-53c1-11e8-b0ea-784f435179ea":
instances[0].delete()
else:
print("You cannot unregister the default card component.")
except Exception as e:
print(e)
def list(self):
"""
Lists registered card components
"""
try:
instances = models.CardComponent.objects.all()
for instance in instances:
print(instance.name)
except Exception as e:
print(e)
| agpl-3.0 | -963,692,031,037,702,900 | 30.496 | 129 | 0.626365 | false | 4.494292 | false | false | false |
wroersma/volatility | volatility/plugins/overlays/windows/win10.py | 1 | 21936 | # Volatility
# Copyright (c) 2008-2015 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: The Volatility Foundation
@license: GNU General Public License 2.0
@contact: [email protected]
This file provides support for Windows 10.
"""
import volatility.plugins.overlays.windows.windows as windows
import volatility.obj as obj
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.plugins.overlays.windows.win8 as win8
try:
import distorm3
has_distorm = True
except ImportError:
has_distorm = False
class _HMAP_ENTRY(obj.CType):
@property
def BlockAddress(self):
return self.PermanentBinAddress & 0xFFFFFFFFFFF0
class Win10Registry(obj.ProfileModification):
"""The Windows 10 registry HMAP"""
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4}
def modification(self, profile):
profile.object_classes.update({"_HMAP_ENTRY": _HMAP_ENTRY})
class Win10x64DTB(obj.ProfileModification):
"""The Windows 10 64-bit DTB signature"""
before = ['WindowsOverlay', 'Windows64Overlay', 'Win8x64DTB']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'memory_model': lambda x: x == '64bit',
}
def modification(self, profile):
profile.merge_overlay({
'VOLATILITY_MAGIC': [ None, {
'DTBSignature' : [ None, ['VolatilityMagic', dict(value = "\x03\x00\xb6\x00")]],
}]})
class Win10x86DTB(obj.ProfileModification):
"""The Windows 10 32-bit DTB signature"""
before = ['WindowsOverlay', 'Win8x86DTB']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'memory_model': lambda x: x == '32bit',
}
def modification(self, profile):
build = profile.metadata.get("build", 0)
if build >= 15063:
signature = "\x03\x00\x2C\x00"
else:
signature = "\x03\x00\x2A\x00"
profile.merge_overlay({
'VOLATILITY_MAGIC': [ None, {
'DTBSignature' : [ None, ['VolatilityMagic', dict(value = signature)]],
}]})
class Win10KDBG(windows.AbstractKDBGMod):
"""The Windows 10 KDBG signatures"""
before = ['Win8KDBG']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x >= 14393}
kdbgsize = 0x368
class ObHeaderCookieStore(object):
"""A class for finding and storing the nt!ObHeaderCookie value"""
_instance = None
def __init__(self):
self._cookie = None
def cookie(self):
return self._cookie
def findcookie(self, kernel_space):
"""Find and read the nt!ObHeaderCookie value.
On success, return True and save the cookie value in self._cookie.
On Failure, return False.
This method must be called before performing any tasks that require
object header validation including handles, psxview (due to pspcid)
and the object scanning plugins (psscan, etc).
NOTE: this cannot be implemented as a volatility "magic" class,
because it must be persistent across various classes and sources.
We don't want to recalculate the cookie value multiple times.
"""
meta = kernel_space.profile.metadata
vers = (meta.get("major", 0), meta.get("minor", 0))
# this algorithm only applies to Windows 10 or greater
if vers < (6, 4):
return True
# prevent subsequent attempts from recalculating the existing value
if self._cookie:
return True
if not has_distorm:
debug.warning("distorm3 module is not installed")
return False
kdbg = tasks.get_kdbg(kernel_space)
if not kdbg:
debug.warning("Cannot find KDBG")
return False
nt_mod = None
for mod in kdbg.modules():
nt_mod = mod
break
if nt_mod == None:
debug.warning("Cannot find NT module")
return False
addr = nt_mod.getprocaddress("ObGetObjectType")
if addr == None:
debug.warning("Cannot find nt!ObGetObjectType")
return False
# produce an absolute address by adding the DLL base to the RVA
addr += nt_mod.DllBase
if not nt_mod.obj_vm.is_valid_address(addr):
debug.warning("nt!ObGetObjectType at {0} is invalid".format(addr))
return False
# in theory...but so far we haven't tested 32-bits
model = meta.get("memory_model")
if model == "32bit":
mode = distorm3.Decode32Bits
else:
mode = distorm3.Decode64Bits
data = nt_mod.obj_vm.read(addr, 100)
ops = distorm3.Decompose(addr, data, mode, distorm3.DF_STOP_ON_RET)
addr = None
# search backwards from the RET and find the MOVZX
if model == "32bit":
# movzx ecx, byte ptr ds:_ObHeaderCookie
for op in reversed(ops):
if (op.size == 7 and
'FLAG_DST_WR' in op.flags and
len(op.operands) == 2 and
op.operands[0].type == 'Register' and
op.operands[1].type == 'AbsoluteMemoryAddress' and
op.operands[1].size == 8):
addr = op.operands[1].disp & 0xFFFFFFFF
break
else:
# movzx ecx, byte ptr cs:ObHeaderCookie
for op in reversed(ops):
if (op.size == 7 and
'FLAG_RIP_RELATIVE' in op.flags and
len(op.operands) == 2 and
op.operands[0].type == 'Register' and
op.operands[1].type == 'AbsoluteMemory' and
op.operands[1].size == 8):
addr = op.address + op.size + op.operands[1].disp
break
if not addr:
debug.warning("Cannot find nt!ObHeaderCookie")
return False
if not nt_mod.obj_vm.is_valid_address(addr):
debug.warning("nt!ObHeaderCookie at {0} is not valid".format(addr))
return False
cookie = obj.Object("unsigned int", offset = addr, vm = nt_mod.obj_vm)
self._cookie = int(cookie)
return True
@staticmethod
def instance():
if not ObHeaderCookieStore._instance:
ObHeaderCookieStore._instance = ObHeaderCookieStore()
return ObHeaderCookieStore._instance
class VolatilityCookie(obj.VolatilityMagic):
"""The Windows 10 Cookie Finder"""
def v(self):
if self.value is None:
return self.get_best_suggestion()
else:
return self.value
def get_suggestions(self):
if self.value:
yield self.value
for x in self.generate_suggestions():
yield x
def generate_suggestions(self):
store = ObHeaderCookieStore.instance()
store.findcookie(self.obj_vm)
yield store.cookie()
class Win10Cookie(obj.ProfileModification):
"""The Windows 10 Cookie Finder"""
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
}
def modification(self, profile):
profile.merge_overlay({
'VOLATILITY_MAGIC': [ None, {
'ObHeaderCookie' : [ 0x0, ['VolatilityCookie', dict(configname = "COOKIE")]],
}]})
profile.object_classes.update({'VolatilityCookie': VolatilityCookie})
class _OBJECT_HEADER_10(win8._OBJECT_HEADER):
@property
def TypeIndex(self):
"""Wrap the TypeIndex member with a property that decodes it
with the nt!ObHeaderCookie value."""
cook = obj.VolMagic(self.obj_vm).ObHeaderCookie.v()
addr = self.obj_offset
indx = int(self.m("TypeIndex"))
return ((addr >> 8) ^ cook ^ indx) & 0xFF
def is_valid(self):
"""Determine if a given object header is valid"""
if not obj.CType.is_valid(self):
return False
if self.InfoMask > 0x88:
return False
if self.PointerCount > 0x1000000 or self.PointerCount < 0:
return False
return True
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'Silo',
12: 'DebugObject',
13: 'Event',
14: 'Mutant',
15: 'Callback',
16: 'Semaphore',
17: 'Timer',
18: 'IRTimer',
19: 'Profile',
20: 'KeyedEvent',
21: 'WindowStation',
22: 'Desktop',
23: 'Composition',
24: 'RawInputManager',
25: 'TpWorkerFactory',
26: 'Adapter',
27: 'Controller',
28: 'Device',
29: 'Driver',
30: 'IoCompletion',
31: 'WaitCompletionPacket',
32: 'File',
33: 'TmTm',
34: 'TmTx',
35: 'TmRm',
36: 'TmEn',
37: 'Section',
38: 'Session',
39: 'Partition',
40: 'Key',
41: 'ALPC Port',
42: 'PowerRequest',
43: 'WmiGuid',
44: 'EtwRegistration',
45: 'EtwConsumer',
46: 'DmaAdapter',
47: 'DmaDomain',
48: 'PcwObject',
49: 'FilterConnectionPort',
50: 'FilterCommunicationPort',
51: 'NetworkNamespace',
52: 'DxgkSharedResource',
53: 'DxgkSharedSyncObject',
54: 'DxgkSharedSwapChainObject',
}
class _OBJECT_HEADER_10_1AC738FB(_OBJECT_HEADER_10):
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'DebugObject',
12: 'Event',
13: 'Mutant',
14: 'Callback',
15: 'Semaphore',
16: 'Timer',
17: 'IRTimer',
18: 'Profile',
19: 'KeyedEvent',
20: 'WindowStation',
21: 'Desktop',
22: 'Composition',
23: 'RawInputManager',
24: 'TpWorkerFactory',
25: 'Adapter',
26: 'Controller',
27: 'Device',
28: 'Driver',
29: 'IoCompletion',
30: 'WaitCompletionPacket',
31: 'File',
32: 'TmTm',
33: 'TmTx',
34: 'TmRm',
35: 'TmEn',
36: 'Section',
37: 'Session',
38: 'Partition',
39: 'Key',
40: 'ALPC Port',
41: 'PowerRequest',
42: 'WmiGuid',
43: 'EtwRegistration',
44: 'EtwConsumer',
45: 'DmaAdapter',
46: 'DmaDomain',
47: 'PcwObject',
48: 'FilterConnectionPort',
49: 'FilterCommunicationPort',
50: 'NetworkNamespace',
51: 'DxgkSharedResource',
52: 'DxgkSharedSyncObject',
53: 'DxgkSharedSwapChainObject',
}
class _OBJECT_HEADER_10_DD08DD42(_OBJECT_HEADER_10):
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'PsSiloContextPaged',
12: 'PsSiloContextNonPaged',
13: 'DebugObject',
14: 'Event',
15: 'Mutant',
16: 'Callback',
17: 'Semaphore',
18: 'Timer',
19: 'IRTimer',
20: 'Profile',
21: 'KeyedEvent',
22: 'WindowStation',
23: 'Desktop',
24: 'Composition',
25: 'RawInputManager',
26: 'CoreMessaging',
27: 'TpWorkerFactory',
28: 'Adapter',
29: 'Controller',
30: 'Device',
31: 'Driver',
32: 'IoCompletion',
33: 'WaitCompletionPacket',
34: 'File',
35: 'TmTm',
36: 'TmTx',
37: 'TmRm',
38: 'TmEn',
39: 'Section',
40: 'Session',
41: 'Partition',
42: 'Key',
43: 'RegistryTransaction',
44: 'ALPC',
45: 'PowerRequest',
46: 'WmiGuid',
47: 'EtwRegistration',
48: 'EtwConsumer',
49: 'DmaAdapter',
50: 'DmaDomain',
51: 'PcwObject',
52: 'FilterConnectionPort',
53: 'FilterCommunicationPort',
54: 'NdisCmState',
55: 'DxgkSharedResource',
56: 'DxgkSharedSyncObject',
57: 'DxgkSharedSwapChainObject',
58: 'VRegConfigurationContext',
59: 'VirtualKey',
}
class _OBJECT_HEADER_10_15063(_OBJECT_HEADER_10):
type_map = {
2: 'Type',
3: 'Directory',
4: 'SymbolicLink',
5: 'Token',
6: 'Job',
7: 'Process',
8: 'Thread',
9: 'UserApcReserve',
10: 'IoCompletionReserve',
11: 'ActivityReference',
12: 'PsSiloContextPaged',
13: 'PsSiloContextNonPaged',
14: 'DebugObject',
15: 'Event',
16: 'Mutant',
17: 'Callback',
18: 'Semaphore',
19: 'Timer',
20: 'IRTimer',
21: 'Profile',
22: 'KeyedEvent',
23: 'WindowStation',
24: 'Desktop',
25: 'Composition',
26: 'RawInputManager',
27: 'CoreMessaging',
28: 'TpWorkerFactory',
29: 'Adapter',
30: 'Controller',
31: 'Device',
32: 'Driver',
33: 'IoCompletion',
34: 'WaitCompletionPacket',
35: 'File',
36: 'TmTm',
37: 'TmTx',
38: 'TmRm',
39: 'TmEn',
40: 'Section',
41: 'Session',
42: 'Partition',
43: 'Key',
44: 'RegistryTransaction',
45: 'ALPC Port',
46: 'PowerRequest',
47: 'WmiGuid',
48: 'EtwRegistration',
49: 'EtwSessionDemuxEntry',
50: 'EtwConsumer',
51: 'DmaAdapter',
52: 'DmaDomain',
53: 'PcwObject',
54: 'FilterConnectionPort',
55: 'FilterCommunicationPort',
56: 'NdisCmState',
57: 'DxgkSharedResource',
58: 'DxgkSharedSyncObject',
59: 'DxgkSharedSwapChainObject',
60: 'DxgkCurrentDxgProcessObject',
61: 'VRegConfigurationContext'
}
class _HANDLE_TABLE_10_DD08DD42(win8._HANDLE_TABLE_81R264):
def decode_pointer(self, value):
value = value & 0xFFFFFFFFFFFFFFF8
value = value >> self.DECODE_MAGIC
if (value & (1 << 47)):
value = value | 0xFFFF000000000000
return value
class Win10ObjectHeader(obj.ProfileModification):
before = ["Win8ObjectClasses"]
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4}
def modification(self, profile):
metadata = profile.metadata
build = metadata.get("build", 0)
if build >= 15063:
header = _OBJECT_HEADER_10_15063
## update the handle table here as well
if metadata.get("memory_model") == "64bit":
profile.object_classes.update({
"_HANDLE_TABLE": _HANDLE_TABLE_10_DD08DD42})
elif build >= 14393:
header = _OBJECT_HEADER_10_DD08DD42
## update the handle table here as well
if metadata.get("memory_model") == "64bit":
profile.object_classes.update({
"_HANDLE_TABLE": _HANDLE_TABLE_10_DD08DD42})
elif build >= 10240:
header = _OBJECT_HEADER_10_1AC738FB
else:
header = _OBJECT_HEADER_10
profile.object_classes.update({"_OBJECT_HEADER": header})
class Win10PoolHeader(obj.ProfileModification):
before = ['WindowsOverlay']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x == 6,
'minor': lambda x: x == 4,
'build': lambda x: x == 10240}
def modification(self, profile):
meta = profile.metadata
memory_model = meta.get("memory_model", "32bit")
if memory_model == "32bit":
pool_types = {'_POOL_HEADER' : [ 0x8, {
'PreviousSize' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 9, native_type='unsigned short')]],
'PoolIndex' : [ 0x0, ['BitField', dict(start_bit = 9, end_bit = 16, native_type='unsigned short')]],
'BlockSize' : [ 0x2, ['BitField', dict(start_bit = 0, end_bit = 9, native_type='unsigned short')]],
'PoolType' : [ 0x2, ['BitField', dict(start_bit = 9, end_bit = 16, native_type='unsigned short')]],
'Ulong1' : [ 0x0, ['unsigned long']],
'PoolTag' : [ 0x4, ['unsigned long']],
'AllocatorBackTraceIndex' : [ 0x4, ['unsigned short']],
'PoolTagHash' : [ 0x6, ['unsigned short']],
}]}
else:
pool_types = {'_POOL_HEADER' : [ 0x10, {
'PreviousSize' : [ 0x0, ['BitField', dict(start_bit = 0, end_bit = 8, native_type='unsigned short')]],
'PoolIndex' : [ 0x0, ['BitField', dict(start_bit = 8, end_bit = 16, native_type='unsigned short')]],
'BlockSize' : [ 0x2, ['BitField', dict(start_bit = 0, end_bit = 8, native_type='unsigned short')]],
'PoolType' : [ 0x2, ['BitField', dict(start_bit = 8, end_bit = 16, native_type='unsigned short')]],
'Ulong1' : [ 0x0, ['unsigned long']],
'PoolTag' : [ 0x4, ['unsigned long']],
'ProcessBilled' : [ 0x8, ['pointer64', ['_EPROCESS']]],
'AllocatorBackTraceIndex' : [ 0x8, ['unsigned short']],
'PoolTagHash' : [ 0xa, ['unsigned short']],
}]}
profile.vtypes.update(pool_types)
class Win10x64(obj.Profile):
""" A Profile for Windows 10 x64 """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 9841
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x64_10586(obj.Profile):
""" A Profile for Windows 10 x64 (10.0.10586.306 / 2016-04-23) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 10240
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_1AC738FB_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x64_14393(obj.Profile):
""" A Profile for Windows 10 x64 (10.0.14393.0 / 2016-07-16) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 14393
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_DD08DD42_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x86(obj.Profile):
""" A Profile for Windows 10 x86 """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 9841
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x86_10586(obj.Profile):
""" A Profile for Windows 10 x86 (10.0.10586.420 / 2016-05-28) """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 10240
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_44B89EEA_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x86_14393(obj.Profile):
""" A Profile for Windows 10 x86 (10.0.14393.0 / 2016-07-16) """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 14393
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_9619274A_vtypes'
_md_product = ["NtProductWinNt"]
class Win2016x64_14393(Win10x64_14393):
""" A Profile for Windows Server 2016 x64 (10.0.14393.0 / 2016-07-16) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 14393
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_DD08DD42_vtypes'
_md_product = ["NtProductLanManNt", "NtProductServer"]
class Win10x86_15063(obj.Profile):
""" A Profile for Windows 10 x86 (10.0.15063.0 / 2017-04-04) """
_md_memory_model = '32bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 15063
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x86_15063_vtypes'
_md_product = ["NtProductWinNt"]
class Win10x64_15063(obj.Profile):
""" A Profile for Windows 10 x64 (10.0.15063.0 / 2017-04-04) """
_md_memory_model = '64bit'
_md_os = 'windows'
_md_major = 6
_md_minor = 4
_md_build = 15063
_md_vtype_module = 'volatility.plugins.overlays.windows.win10_x64_15063_vtypes'
_md_product = ["NtProductWinNt"]
| gpl-2.0 | -771,911,055,582,781,700 | 30.026874 | 119 | 0.552744 | false | 3.533505 | false | false | false |
StefanWinterfeldt/Buildicator | messageSinks/consoleMessageSink.py | 1 | 2127 | # Copyright 2014 Stefan Winterfeldt <[email protected]>
# <[email protected]
# BITZ GmbH <[email protected]>
#
#This file is part of Buildicator.
#
#Buildicator is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Buildicator is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Buildicator. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the console message sink.
All message sink modules must implement the 'getInstance' method, returning
an instance of the message sink class that has been initialized with the
appropriate args dictionary.
"""
from messageSinks.abstractMessageSink import AbstractMessageSink
import libs.statusEnum as statusEnum
class ConsoleMessageSink(AbstractMessageSink):
"""A message sink that simply displays messages on the console.
This message sink uses the following arguments:
errorMessage - The message to display in case of an error status.
failureMessage - The message to display in case of a failure status.
successMessage - The message to display in case of a success status.
"""
def __init__(self, args):
self.errorMessage = args['errorMessage']
self.failureMessage = args['failureMessage']
self.successMessage = args['successMessage']
def showStatus(self, status):
if status == statusEnum.STATUS_ERROR:
print(self.errorMessage)
elif status == statusEnum.STATUS_FAILURE:
print(self.failureMessage)
elif status == statusEnum.STATUS_SUCCESS:
print(self.successMessage)
def getInstance(args):
return ConsoleMessageSink(args) | gpl-3.0 | 237,320,716,383,971,300 | 38.407407 | 76 | 0.720733 | false | 4.228628 | false | false | false |
IMIO/django-fixmystreet | django_fixmystreet/api/reports/serializers.py | 1 | 1936 | # -*- coding: utf-8 -*-
from rest_framework import serializers
from . import models
class ReportAssignmentAcceptSerializer(serializers.Serializer):
reference_id = serializers.CharField()
comment = serializers.CharField(required=False)
created_at = serializers.DateTimeField()
def restore_object(self, attrs, instance=None):
# Update existing instance.
if instance:
instance.reference_id = attrs.get("reference_id", instance.reference_id)
instance.comment = attrs.get("comment", instance.comment)
instance.created_at = attrs.get("created_at", instance.created_at)
return instance
# Create new instance.
return models.ReportAssignmentAccept(**attrs)
class ReportAssignmentRejectSerializer(serializers.Serializer):
comment = serializers.CharField()
created_at = serializers.DateTimeField()
def restore_object(self, attrs, instance=None):
# Update existing instance.
if instance:
instance.comment = attrs.get("comment", instance.comment)
instance.created_at = attrs.get("created_at", instance.created_at)
return instance
# Create new instance.
return models.ReportAssignmentReject(**attrs)
class ReportAssignmentCloseSerializer(serializers.Serializer):
reference_id = serializers.CharField()
comment = serializers.CharField(required=False)
created_at = serializers.DateTimeField()
def restore_object(self, attrs, instance=None):
# Update existing instance.
if instance:
instance.reference_id = attrs.get("reference_id", instance.reference_id)
instance.comment = attrs.get("comment", instance.comment)
instance.created_at = attrs.get("created_at", instance.created_at)
return instance
# Create new instance.
return models.ReportAssignmentClose(**attrs)
| agpl-3.0 | -65,268,529,041,024,920 | 33.571429 | 84 | 0.681302 | false | 4.687651 | false | false | false |
nickaugust/pychatkit | clients.py | 1 | 1973 | #!/usr/bin/env python
import asyncio
import logging
logger = logging.getLogger("chatkit:" + __name__)
class WSClientManager:
def __init__(self):
self._clients = []
def all(self):
return self._clients
def add(self, client):
logging.info("+ WSClient {}".format(client))
self._clients.append(client)
def remove(self, client):
logging.info("- WSClient {}".format(client))
self._clients.remove(client)
class WSClient:
objects = WSClientManager()
def __init__(self, server, ws, user=None, token=None):
self.server = server
self._ws = ws
self.user = user
self.token = token
WSClient.objects.add(self)
@asyncio.coroutine
def disconnect(self, message):
self.server.disconnect(self, message)
@asyncio.coroutine
def send(self, data):
if self._ws.state != "OPEN":
logging.info("WS state not OPEN, disconnecting" +
str(self.user))
self.disconnect("WS state not OPEN.")
return
logging.info("> {} {}".format(self.user, data))
yield from self._ws.send(data)
@asyncio.coroutine
def send_one(self, to_client, data):
if to_client._ws.state != "OPEN":
to_client.disconnect("WS state not OPEN.")
yield from to_client._ws.send(data)
logging.info("> {} {}".format(to_client.user, data))
@asyncio.coroutine
def send_all(self, from_client, data):
for c in WSClient.clients:
yield from self.send_one(c, data)
@asyncio.coroutine
def send_others(self, from_client, data):
for c in WSClient.clients:
if c != from_client:
yield from self.send_one(c, data)
@asyncio.coroutine
def get_others(self, client):
for c in WSClient.clients:
resp = "join {}".format(c.user.username)
yield from self.send_one(self, resp)
| mit | -1,395,320,659,051,184,000 | 26.788732 | 61 | 0.581855 | false | 3.838521 | false | false | false |
likit/BioUtils | fetch_entrez_from_geneid.py | 1 | 1631 | '''Selects protein sequences from NCBI that are in a list
from Geisha text file.
Output is written to standard output.
'''
import os
import sys
import time
from Bio import SeqIO, Entrez
def parse(infile):
'''Return a set of gene IDs from an input file.'''
for line in open(infile):
geneid = line.split()[0]
yield geneid
def fetch(geneid):
print >> sys.stderr, 'fetching.. gene ID: %s' % geneid
handle = Entrez.efetch(db='gene', retmode='xml', id=geneid)
xmldata = Entrez.read(handle)
product = xmldata[0]['Entrezgene_locus'][0]\
['Gene-commentary_products'][0]
prodtype = product['Gene-commentary_type'].attributes['value']
print >> sys.stderr, 'product type = %s' % (prodtype)
seq_gi = xmldata[0]['Entrezgene_locus'][0]\
['Gene-commentary_products'][0]\
['Gene-commentary_seqs'][0]\
['Seq-loc_whole']['Seq-id']\
['Seq-id_gi']
handle = Entrez.efetch(db='nucleotide', retmode='text',
rettype='fasta', id=seq_gi)
seq = SeqIO.read(handle, 'fasta')
return seq
def main():
infile = sys.argv[1]
Entrez.email = sys.argv[2]
outfile = os.path.splitext(infile)[0] + ".fa"
records = []
for geneid in parse(infile):
try:
records.append(fetch(geneid))
except:
print >> sys.stderr, 'Cannot retrieve a sequence'
continue
time.sleep(3)
SeqIO.write(records, outfile, 'fasta')
print >> sys.stderr, 'Total sequences = %d' % len(records)
if __name__=='__main__':
main()
| bsd-2-clause | -5,944,984,507,646,516,000 | 24.888889 | 66 | 0.578786 | false | 3.40501 | false | false | false |
forseti-security/forseti-security | tests/services/scanner/scanner_base_db.py | 1 | 4263 | """Helper base class for testing scanners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
from datetime import timedelta
import os
import unittest.mock as mock
from sqlalchemy.orm import sessionmaker
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.scanner import scanner
from google.cloud.forseti.services.inventory import storage
from google.cloud.forseti.services.scanner import dao as scanner_dao
from tests.services.util.db import create_test_engine_with_file
from tests.unittest_utils import ForsetiTestCase
FAKE_INV_INDEX_ID = 'aaa'
FAKE_VIOLATION_HASH = (u'111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111'
'11111111111111111111')
FAKE_VIOLATIONS = [
{'resource_id': 'fake_firewall_111',
'full_name': 'full_name_111',
'rule_name': 'disallow_all_ports_111',
'rule_index': 111,
'violation_data':
{'policy_names': ['fw-tag-match_111'],
'recommended_actions':
{'DELETE_FIREWALL_RULES': ['fw-tag-match_111']}},
'violation_type': 'FIREWALL_BLACKLIST_VIOLATION_111',
'resource_type': 'firewall_rule',
'resource_data': 'inventory_data_111',
'resource_name': 'fw-tag-match_111',
},
{'resource_id': 'fake_firewall_222',
'full_name': 'full_name_222',
'rule_name': 'disallow_all_ports_222',
'rule_index': 222,
'violation_data':
{'policy_names': ['fw-tag-match_222'],
'recommended_actions':
{'DELETE_FIREWALL_RULES': ['fw-tag-match_222']}},
'violation_type': 'FIREWALL_BLACKLIST_VIOLATION_222',
'resource_type': 'firewall_rule',
'resource_data': 'inventory_data_222',
'resource_name': 'fw-tag-match_222',
}
]
# pylint: disable=bad-indentation
class ScannerBaseDbTestCase(ForsetiTestCase):
"""Base class for database centric tests."""
def setUp(self):
"""Setup method."""
ForsetiTestCase.setUp(self)
self.engine, self.dbfile = create_test_engine_with_file()
session_maker = sessionmaker()
self.session = session_maker(bind=self.engine)
storage.initialize(self.engine)
scanner_dao.initialize(self.engine)
self.session.flush()
self.violation_access = scanner_dao.ViolationAccess(self.session)
self.inv_index_id1, self.inv_index_id2, self.inv_index_id3 = (
_setup_inv_indices(self.session))
def tearDown(self):
"""Teardown method."""
os.unlink(self.dbfile)
ForsetiTestCase.tearDown(self)
def populate_db(
self, violations=FAKE_VIOLATIONS, inv_index_id=FAKE_INV_INDEX_ID,
scanner_index_id=None, succeeded=['IamPolicyScanner'], failed=[]):
"""Populate the db with violations.
Args:
violations (dict): the violations to write to the test database
inv_index_id (str): the inventory index to use
scanner_index_id (str): the scanner index to use
succeeded (list): names of scanners that ran successfully
failed (list): names of scanners that failed
"""
if not scanner_index_id:
scanner_index_id = scanner.init_scanner_index(
self.session, inv_index_id)
self.violation_access.create(violations, scanner_index_id)
scanner.mark_scanner_index_complete(
self.session, scanner_index_id, succeeded, failed)
return scanner_index_id
def _setup_inv_indices(session):
"""The method under test returns the newest `ScannerIndex` row."""
with mock.patch.object(date_time, 'get_utc_now_datetime') as mock_date_time:
time1 = datetime.utcnow()
time2 = time1 + timedelta(minutes=5)
time3 = time1 + timedelta(minutes=7)
mock_date_time.side_effect = [time1, time2, time3]
iidx1 = storage.InventoryIndex.create()
iidx2 = storage.InventoryIndex.create()
iidx3 = storage.InventoryIndex.create()
session.add(iidx1)
session.add(iidx2)
session.add(iidx3)
session.flush()
return (iidx1.id, iidx2.id, iidx3.id)
| apache-2.0 | -3,047,828,724,957,816,300 | 36.394737 | 80 | 0.65658 | false | 3.606599 | true | false | false |
vatlab/SOS | src/sos/tasks.py | 1 | 77822 | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import copy
import os
import fasteners
import pickle
import time
import lzma
import math
import struct
from enum import Enum
from collections import namedtuple
from collections.abc import Sequence
from datetime import datetime
from typing import Union, Dict, List
from .utils import (
env,
expand_time,
linecount_of_file,
sample_lines,
short_repr,
tail_of_file,
pretty_size,
expand_size,
format_HHMMSS,
DelayedAction,
format_duration,
)
from .targets import sos_targets
monitor_interval = 5
resource_monitor_interval = 60
class TaskParams(object):
"""A parameter object that encaptulates parameters sending to
task executors. This would makes the output of workers, especially
in the web interface much cleaner (issue #259)"""
def __init__(self, name, global_def, task, sos_dict, tags):
self.name = name
self.global_def = global_def
self.task = task
self.sos_dict = sos_dict
self.tags = tags
# remove builtins that could be saved in a dictionary
if "CONFIG" in self.sos_dict and "__builtins__" in self.sos_dict["CONFIG"]:
self.sos_dict["CONFIG"].pop("__builtins__")
def __repr__(self):
return self.name
class MasterTaskParams(TaskParams):
def __init__(self, num_workers=None):
self.ID = "t0"
self.name = self.ID
self.global_def = ""
self.task = ""
self.sos_dict = {
"_runtime": {"num_workers": num_workers},
"_input": sos_targets(),
"_output": sos_targets(),
"_depends": sos_targets(),
"step_input": sos_targets(),
"step_output": sos_targets(),
"step_depends": sos_targets(),
"step_name": "",
"_index": 0,
}
self.tags = []
# a collection of tasks that will be executed by the master task
self.task_stack = []
def _parse_num_workers(self, num_workers):
# return number of nodes and workers
if isinstance(num_workers, Sequence) and len(num_workers) >= 1:
val = str(num_workers[0])
n_workers = val.rsplit(":", 1)[-1] if ":" in val else val
n_nodes = len(num_workers)
elif isinstance(num_workers, str):
n_workers = (
num_workers.rsplit(":", 1)[-1] if ":" in num_workers else num_workers
)
n_nodes = 1
elif isinstance(num_workers, int):
n_workers = num_workers
n_nodes = 1
elif num_workers is None:
n_workers = 1
n_nodes = 1
else:
raise RuntimeError(
f"Unacceptable value for parameter trunk_workers {num_workers}"
)
try:
n_workers = int(n_workers)
except Exception:
raise ValueError(
f"Unacceptable value for option trunk_workers {num_workers}"
)
if n_workers <= 0:
raise ValueError(
f"Unacceptable value for option trunk_workers {num_workers}"
)
return n_nodes, n_workers
def num_tasks(self):
return len(self.task_stack)
def push(self, task_id, params):
# update walltime, cores, and mem
# right now we require all tasks to have same resource requirment, which is
# quite natural because they are from the same step
#
# update input, output, and depends
#
# walltime etc
n_nodes, n_workers = self._parse_num_workers(
self.sos_dict["_runtime"]["num_workers"]
)
if not self.task_stack:
for key in (
"walltime",
"max_walltime",
"cores",
"nodes",
"max_cores",
"mem",
"max_mem",
"name",
"workdir",
"verbosity",
"sig_mode",
"run_mode",
):
if (
key in params.sos_dict["_runtime"]
and params.sos_dict["_runtime"][key] is not None
):
self.sos_dict["_runtime"][key] = params.sos_dict["_runtime"][key]
self.sos_dict["step_name"] = params.sos_dict["step_name"]
self.tags = params.tags
else:
for key in (
"walltime",
"max_walltime",
"cores",
"max_cores",
"mem",
"max_mem",
"name",
"workdir",
):
val0 = self.task_stack[0][1].sos_dict["_runtime"].get(key, None)
val = params.sos_dict["_runtime"].get(key, None)
if val0 != val:
raise ValueError(f"All tasks should have the same resource {key}")
if val0 is None:
continue
# If there are multiple nodes and multiple workers, there are
# n_workers * n_nodes workers at the same time, so the jobs
# will be completed in n_batches
n_batches = math.ceil(
(len(self.task_stack) + 1) / (n_workers * n_nodes)
)
if key == "walltime":
# the real walltime would be the total time on one node
self.sos_dict["_runtime"]["walltime"] = format_HHMMSS(
n_batches * expand_time(val0)
)
elif key == "mem":
# number of columns * mem for each + 100M for master
self.sos_dict["_runtime"]["mem"] = n_workers * expand_size(val0)
elif key == "cores":
self.sos_dict["_runtime"]["cores"] = n_workers * val0
elif key == "name":
self.sos_dict["_runtime"][
"name"
] = f"{val0}_{len(self.task_stack) + 1}"
self.tags.extend(params.tags)
# if cores is unspecified but there are more than one workers
if (
"cores" not in self.sos_dict["_runtime"]
and n_workers is not None
and n_workers > 1
):
self.sos_dict["_runtime"]["cores"] = n_workers
#
# input, output, preserved vars etc
for key in ["_input", "_output", "_depends"]:
if key in params.sos_dict and isinstance(params.sos_dict[key], sos_targets):
if key == "__builtins__":
continue
# do not extend duplicated input etc
self.sos_dict[key].extend(params.sos_dict[key])
#
self.task_stack.append([task_id, params])
self.tags = sorted(list(set(self.tags)))
#
id_prefix = f't{len(self.task_stack)}'
self.ID = f"{id_prefix}{self.task_stack[0][0][:-(len(id_prefix))]}"
self.name = self.ID
def finalize(self):
if not self.task_stack:
return
common_dict = None
common_keys = set()
for _, params in self.task_stack:
if common_dict is None:
common_dict = params.sos_dict
common_keys = set(params.sos_dict.keys())
else:
common_keys = {
key
for key in common_keys
if key in params.sos_dict
and common_dict[key] == params.sos_dict[key]
}
if not common_keys:
break
# if there is only one subtask, _output will be moved out of subtasks and makes
# the retrival of outputs difficult.
common_keys.discard("_output")
self.common_dict = {x: common_dict[x] for x in common_keys}
for _, params in self.task_stack:
params.sos_dict = {
k: v for k, v in params.sos_dict.items() if k not in common_keys
}
#
n_nodes = self._parse_num_workers(self.sos_dict["_runtime"]["num_workers"])[0]
# trunk_workers and cores cannot be specified together, so if n_nodes > 1,
# nodes should not have been specified.
if n_nodes is not None and n_nodes > 1:
self.sos_dict["_runtime"]["nodes"] = n_nodes
return self
def combine_results(task_id, results):
# now we collect result
all_res = {
"ret_code": 0,
"output": None,
"subtasks": {},
"shared": {},
"skipped": 0,
"signature": {},
}
for res in results:
tid = res["task"]
all_res["subtasks"][tid] = res
if "exception" in res:
all_res["exception"] = res["exception"]
all_res["ret_code"] += 1
continue
all_res["ret_code"] += res["ret_code"]
if all_res["output"] is None:
all_res["output"] = copy.deepcopy(res["output"])
else:
try:
all_res["output"].extend(res["output"], keep_groups=True)
except Exception:
env.logger.warning(
f"Failed to extend output {all_res['output']} with {res['output']}"
)
all_res["shared"].update(res["shared"])
# does not care if one or all subtasks are executed or skipped.
all_res["skipped"] += res.get("skipped", 0)
if "signature" in res:
all_res["signature"].update(res["signature"])
if all_res["ret_code"] != 0:
if all_res["ret_code"] == len(results):
if env.config["run_mode"] == "run":
env.logger.info(f"All {len(results)} tasks in {task_id} ``failed``")
else:
env.logger.debug(f"All {len(results)} tasks in {task_id} ``failed``")
else:
if env.config["run_mode"] == "run":
env.logger.info(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
else:
env.logger.debug(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
# if some failed, some skipped, not skipped
if "skipped" in all_res:
all_res.pop("skipped")
elif all_res["skipped"]:
if all_res["skipped"] == len(results):
if env.config["run_mode"] == "run":
env.logger.info(
f"All {len(results)} tasks in {task_id} ``ignored`` or skipped"
)
else:
env.logger.debug(
f"All {len(results)} tasks in {task_id} ``ignored`` or skipped"
)
else:
# if only partial skip, we still save signature and result etc
if env.config["run_mode"] == "run":
env.logger.info(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
else:
env.logger.debug(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
all_res.pop("skipped")
else:
if env.config["run_mode"] == "run":
env.logger.info(f"All {len(results)} tasks in {task_id} ``completed``")
else:
env.logger.debug(f"All {len(results)} tasks in {task_id} ``completed``")
return all_res
class TaskStatus(Enum):
new = 0
pending = 1
submitted = 2
running = 3
aborted = 4
failed = 5
completed = 6
class TaskFile(object):
"""
The task file has the following format:
1. A binary header with the information of the structure of the file
with field defined by TaskHeader
2. compressed pickled param of task
3. compressed pulse file
4. compressed pickled result
5. compressed stdout
6. compressed stderr
7. compressed pickled signatures
"""
TaskHeader_v1 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader_v2 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size shell_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader_v3 = namedtuple(
"TaskHeader",
"version status last_modified "
"new_time pending_time submitted_time running_time aborted_time failed_time completed_time "
"params_size runtime_size shell_size pulse_size stdout_size stderr_size result_size signature_size "
"tags",
)
TaskHeader = TaskHeader_v3
header_fmt_v1 = "!2h 8d 6i 128s"
header_fmt_v2 = "!2h 8d 7i 124s"
header_fmt_v3 = "!2h 8d 8i 120s"
header_fmt = header_fmt_v3
header_size = 220 # struct.calcsize(header_fmt)
tags_offset = [92, 96, 100] # struct.calcsize(status_fmt + '6i')
tags_size = [128, 124, 120]
def __init__(self, task_id: str):
self.task_id = task_id
self.task_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task_id + ".task"
)
def save(self, params):
if os.path.isfile(self.task_file):
if self.status == "running":
env.logger.debug(f"Task {self.task_id} is running and is not updated")
return
# keep original stuff but update params, which could contain
# new runtime info
self.params = params
return
# updating job_file will not change timestamp because it will be Only
# the update of runtime info
now = time.time()
# we keep in both places because params.tags is the only place to have it for subtasks
tags = params.tags
params_block = lzma.compress(pickle.dumps(params))
# env.logger.error(f'saving {self.task_id} params of size {len(params_block)}')
header = self.TaskHeader(
version=3,
status=TaskStatus.new.value,
last_modified=now,
new_time=now,
pending_time=0,
running_time=0,
submitted_time=0,
aborted_time=0,
failed_time=0,
completed_time=0,
params_size=len(params_block),
runtime_size=0,
shell_size=0,
pulse_size=0,
stdout_size=0,
stderr_size=0,
result_size=0,
signature_size=0,
tags=" ".join(sorted(tags)).ljust(128).encode(),
)
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "wb+") as fh:
self._write_header(fh, header)
fh.write(params_block)
def exists(self):
return os.path.isfile(self.task_file)
def _reset(self, fh):
# remove result, input, output etc and set the status of the task to new
header = self._read_header(fh)
now = time.time()
header = header._replace(
version=2,
status=TaskStatus.new.value,
last_modified=now,
new_time=now,
pending_time=0,
submitted_time=0,
running_time=0,
aborted_time=0,
failed_time=0,
completed_time=0,
runtime_size=0,
shell_size=0,
pulse_size=0,
stdout_size=0,
stderr_size=0,
result_size=0,
signature_size=0,
)
self._write_header(fh, header)
fh.truncate(self.header_size + header.params_size)
return header
def reset(self):
# remove result, input, output etc and set the status of the task to new
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
self._reset(fh)
def _read_header(self, fh):
fh.seek(0, 0)
data = fh.read(self.header_size)
if struct.unpack("!h", data[:2])[0] == 1:
header = self.TaskHeader_v1._make(struct.unpack(self.header_fmt_v1, data))
if header.version not in (1, 2, 3):
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
return self.TaskHeader(
runtime_size=0, shell_size=0, **header._asdict()
)._replace(version=3)
if struct.unpack("!h", data[:2])[0] == 2:
header = self.TaskHeader_v2._make(struct.unpack(self.header_fmt_v2, data))
if header.version not in (1, 2, 3):
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
return self.TaskHeader(runtime_size=0, **header._asdict())._replace(
version=3
)
header = self.TaskHeader._make(struct.unpack(self.header_fmt, data))
if header.version not in (1, 2, 3):
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
return header
def _write_header(self, fh, header):
fh.seek(0, 0)
fh.write(struct.pack(self.header_fmt, *header))
def _get_content(self, exts):
if isinstance(exts, str):
exts = [exts]
content = b""
for ext in exts:
filename = self.task_file[:-5] + ext
if not os.path.isfile(filename):
continue
with open(filename, "rb") as fh:
content += fh.read()
if not content:
return b""
return lzma.compress(content)
def add_outputs(self, keep_result=False):
# get header
shell = self._get_content(".sh")
pulse = self._get_content(".pulse")
stdout = self._get_content([".out", ".sosout"])
stderr = self._get_content([".err", ".soserr"])
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
if header.result_size != 0:
if not keep_result:
result_size = 0
signature_size = 0
else:
result_size = header.result_size
signature_size = header.signature_size
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size,
0,
)
result = fh.read(header.result_size)
signature = fh.read(header.signature_size)
else:
result_size = 0
signature_size = 0
header = header._replace(
shell_size=len(shell),
pulse_size=len(pulse),
stdout_size=len(stdout),
stderr_size=len(stderr),
result_size=result_size,
signature_size=signature_size,
)
self._write_header(fh, header)
fh.seek(self.header_size + header.params_size + header.runtime_size, 0)
if shell:
fh.write(shell)
if pulse:
fh.write(pulse)
if stdout:
fh.write(stdout)
if stderr:
fh.write(stderr)
if result_size > 0:
fh.write(result)
if signature_size > 0:
fh.write(signature)
def add_result(self, result: dict = {}):
if not result:
params = self._get_params()
# this is a master task, get all sub task IDs
if hasattr(params, "task_stack"):
missing_tasks = set([x[0] for x in params.task_stack])
#
cache_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", self.task_id + ".cache"
)
results = []
if os.path.isfile(cache_file):
try:
with open(cache_file, "rb") as f:
while True:
res = pickle.load(f)
if not "task" in res:
# something is wrong
break
missing_tasks.remove(res["task"])
results.append(res)
os.remove(cache_file)
except Exception:
# we read until an error occurs
pass
if not results:
# if there is no result at all, do not save result
return
else:
# now, if we have some results, we need to fill the rest of the aborted ones
results.extend(
[
{
"task": t,
"ret_code": 2,
"shared": {},
"exception": RuntimeError(f"Subtask {t} is aborted"),
}
for t in missing_tasks
]
)
result = combine_results(self.task_id, results)
else:
# single task, no result, do not save
return
# add signature if exists
signature = result.get("signature", {})
result.pop("signature", None)
#
result_block = lzma.compress(pickle.dumps(result))
signature_block = lzma.compress(pickle.dumps(signature)) if signature else b""
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
header = header._replace(
result_size=len(result_block),
signature_size=len(signature_block),
)
self._write_header(fh, header)
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
)
fh.write(result_block)
if signature:
fh.write(signature_block)
def _get_info(self):
with open(self.task_file, "rb") as fh:
return self._read_header(fh)
def _set_info(self, info):
with open(self.task_file, "r+b") as fh:
fh.write(struct.pack(self.header_fmt, *info))
info = property(_get_info, _set_info)
def has_shell(self):
return self.info.shell_size > 0
def has_pulse(self):
return self.info.pulse_size > 0
def has_result(self):
return self.info.result_size > 0
def has_stdout(self):
return self.info.stdout_size > 0
def has_stderr(self):
return self.info.stderr_size > 0
def has_signature(self):
return self.info.signature_size > 0
def _get_params(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.params_size == 0 and header.runtime_size == 0:
return {}
fh.seek(self.header_size, 0)
if header.params_size == 0:
return {}
else:
try:
return pickle.loads(lzma.decompress(fh.read(header.params_size)))
except Exception as e:
raise RuntimeError(
f"Failed to obtain params of task {self.task_id}: {e}"
)
def _set_params(self, params):
params_block = lzma.compress(pickle.dumps(params))
# env.logger.error(f'updating {self.task_id} params of size {len(params_block)}')
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
if len(params_block) == header.params_size:
fh.seek(self.header_size, 0)
fh.write(params_block)
else:
fh.read(header.params_size)
runtime = fh.read(header.runtime_size)
shell = fh.read(header.shell_size)
pulse = fh.read(header.pulse_size)
stdout = fh.read(header.stdout_size)
stderr = fh.read(header.stderr_size)
result = fh.read(header.result_size)
signature = fh.read(header.signature_size)
header = header._replace(params_size=len(params_block))
self._write_header(fh, header)
fh.write(params_block)
if runtime:
fh.write(runtime)
if shell:
fh.write(shell)
if pulse:
fh.write(pulse)
if stdout:
fh.write(stdout)
if stderr:
fh.write(stderr)
if result:
fh.write(result)
if signature:
fh.write(signature)
fh.truncate(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
+ header.result_size
+ header.signature_size
)
params = property(_get_params, _set_params)
def _get_runtime(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.runtime_size == 0:
return {}
fh.seek(self.header_size + header.params_size, 0)
try:
return pickle.loads(lzma.decompress(fh.read(header.runtime_size)))
except Exception as e:
env.logger.error(
f"Failed to obtain runtime of task {self.task_id}: {e}"
)
return {"_runtime": {}}
def _set_runtime(self, runtime):
runtime_block = lzma.compress(pickle.dumps(runtime))
# env.logger.error(f'updating {self.task_id} params of size {len(params_block)}')
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
header = self._read_header(fh)
if len(runtime_block) == header.runtime_size:
fh.seek(self.header_size + header.params_size, 0)
fh.write(runtime_block)
else:
params = fh.read(header.params_size)
fh.seek(
self.header_size + header.params_size + header.runtime_size, 0
)
shell = fh.read(header.shell_size) if header.shell_size else b""
pulse = fh.read(header.pulse_size) if header.pulse_size else b""
stdout = fh.read(header.stdout_size) if header.stdout_size else b""
stderr = fh.read(header.stderr_size) if header.stderr_size else b""
result = fh.read(header.result_size) if header.result_size else b""
signature = (
fh.read(header.signature_size) if header.signature_size else b""
)
header = header._replace(runtime_size=len(runtime_block))
self._write_header(fh, header)
fh.write(params)
fh.write(runtime_block)
if shell:
fh.write(shell)
if pulse:
fh.write(pulse)
if stdout:
fh.write(stdout)
if stderr:
fh.write(stderr)
if result:
fh.write(result)
if signature:
fh.write(signature)
fh.truncate(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
+ header.result_size
+ header.signature_size
)
runtime = property(_get_runtime, _set_runtime)
def get_params_and_runtime(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.params_size == 0 and header.runtime_size == 0:
return {}
fh.seek(self.header_size, 0)
if header.params_size == 0:
params = {}
else:
try:
params = pickle.loads(lzma.decompress(fh.read(header.params_size)))
except Exception as e:
env.logger.error(
f"Failed to obtain params with runtime of task {self.task_id}: {e}"
)
params = {}
if "_runtime" not in params.sos_dict:
params.sos_dict["_runtime"] = {}
if header.runtime_size > 0:
try:
runtime = pickle.loads(
lzma.decompress(fh.read(header.runtime_size))
)
except Exception as e:
env.logger.error(
f"Failed to obtain runtime of task {self.task_id}: {e}"
)
runtime = {"_runtime": {}}
else:
runtime = {"_runtime": {}}
return params, runtime
def _get_status(self):
if not os.path.isfile(self.task_file):
return "missing"
try:
with open(self.task_file, "rb") as fh:
fh.seek(2, 0)
return TaskStatus(struct.unpack("!h", fh.read(2))[0]).name
except Exception as e:
env.logger.warning(
f"Incompatible task file {self.task_file} is removed. This might was most likely generated by a previous version of SoS but please report a bug if you can reproduce this warning message: {e}"
)
os.remove(self.task_file)
def _get_version(self):
with open(self.task_file, "rb") as fh:
fh.seek(0, 0)
return struct.unpack("!h", fh.read(2))[0]
version = property(_get_version)
def _get_last_updated(self):
with open(self.task_file, "rb") as fh:
fh.seek(4, 0)
return struct.unpack("!d", fh.read(8))[0]
last_updated = property(_get_last_updated)
def _set_status(self, status):
with fasteners.InterProcessLock(
os.path.join(env.temp_dir, self.task_id + ".lck")
):
with open(self.task_file, "r+b") as fh:
fh.seek(2, 0)
if status == "skipped":
# special status, set completed_time = running_time
# to make sure duration is zero
now = time.time()
sts = TaskStatus["completed"].value
# update status and last modified
fh.write(struct.pack("!hd", sts, now))
# also set 'run'
fh.seek(3 * 8, 1)
fh.write(struct.pack("!d", now))
# from the current location, move by status
fh.seek(2 * 8, 1)
fh.write(struct.pack("!d", now))
else:
if status == "running":
# setting to running status ... refresh the pulse file
pulse_file = os.path.join(
os.path.expanduser("~"),
".sos",
"tasks",
self.task_id + ".pulse",
)
with open(pulse_file, "w") as pd:
pd.write(f"#task: {self.task_id}\n")
pd.write(
f'#started at {datetime.now().strftime("%A, %d. %B %Y %I:%M%p")}\n#\n'
)
# wait for the pulse file to be created before updating task status
while True:
if os.path.isfile(pulse_file):
break
else:
time.sleep(0.01)
# if completed, we make sure that the duration will not
# be zero even if the task is completed very rapidly
now = time.time() + (0.01 if status == "completed" else 0)
sts = TaskStatus[status].value
# update status and last modified
fh.write(struct.pack("!hd", sts, now))
# from the current location, move by status
fh.seek(sts * 8, 1)
fh.write(struct.pack("!d", now))
# if restarting the task, make sure all irrelevant files
# are removed or finishing tasks.
if status in ("aborted", "completed", "failed", "pending"):
# terminal status
remove_task_files(
self.task_id,
[
".sh",
".job_id",
".sosout",
".soserr",
".out",
".err",
".pulse",
".cache",
],
)
status = property(_get_status, _set_status)
def _get_tags(self):
try:
with open(self.task_file, "rb") as fh:
fh.seek(0, 0)
ver = struct.unpack("!h", fh.read(2))[0]
fh.seek(self.tags_offset[ver - 1], 0)
return fh.read(self.tags_size[ver - 1]).decode().strip()
except Exception:
raise RuntimeError(
f"Corrupted task file {self.task_file}. Please report a bug if you can reproduce the generation of this file."
)
def _set_tags(self, tags: list):
with open(self.task_file, "r+b") as fh:
fh.seek(0, 0)
ver = struct.unpack("!h", fh.read(2))[0]
fh.seek(self.tags_offset[ver - 1], 0)
fh.write(" ".join(sorted(tags)).ljust(self.tags_size[ver - 1]).encode())
tags = property(_get_tags, _set_tags)
def _get_shell(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.shell_size == 0:
return ""
fh.seek(self.header_size + header.params_size + header.runtime_size, 0)
try:
return lzma.decompress(fh.read(header.shell_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode shell: {e}")
return ""
shell = property(_get_shell)
def _get_pulse(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.pulse_size == 0:
return ""
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size,
0,
)
try:
return lzma.decompress(fh.read(header.pulse_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode pulse: {e}")
return ""
pulse = property(_get_pulse)
def _get_stdout(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.stdout_size == 0:
return ""
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.pulse_size
+ header.shell_size,
0,
)
try:
return lzma.decompress(fh.read(header.stdout_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode stdout: {e}")
return ""
stdout = property(_get_stdout)
def _get_stderr(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.stderr_size == 0:
return ""
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size,
0,
)
try:
return lzma.decompress(fh.read(header.stderr_size)).decode()
except Exception as e:
env.logger.warning(f"Failed to decode stderr: {e}")
return ""
stderr = property(_get_stderr)
def _get_result(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.result_size == 0:
return {}
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size,
0,
)
try:
return pickle.loads(lzma.decompress(fh.read(header.result_size)))
except Exception as e:
env.logger.warning(f"Failed to decode result: {e}")
return {"ret_code": 1}
result = property(_get_result)
def _get_signature(self):
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
if header.signature_size == 0:
return {}
fh.seek(
self.header_size
+ header.params_size
+ header.runtime_size
+ header.shell_size
+ header.pulse_size
+ header.stdout_size
+ header.stderr_size
+ header.result_size,
0,
)
try:
return pickle.loads(lzma.decompress(fh.read(header.signature_size)))
except Exception as e:
env.logger.warning(f"Failed to decode signature: {e}")
return {"ret_code": 1}
signature = property(_get_signature)
def tags_created_start_and_duration(self, formatted=False):
try:
with open(self.task_file, "rb") as fh:
header = self._read_header(fh)
try:
tags = header.tags.decode().strip()
except Exception:
raise ValueError(
f"{self.task_file} is in a format that is no longer supported."
)
ct = header.new_time
if header.running_time != 0:
st = header.running_time
if TaskStatus(header.status) == TaskStatus.running:
dr = time.time() - st
else:
dr = header.last_modified - st
else:
return (
tags,
("Created " + format_duration(time.time() - ct, True) + " ago")
if formatted
else ct,
"",
"",
)
if not formatted:
return tags, ct, st, dr
#
return (
tags,
"Created " + format_duration(time.time() - ct, True) + " ago",
"Started " + format_duration(time.time() - st) + " ago",
("Ran for " + format_duration(int(dr)))
if dr > 0
else "Signature checked",
)
except Exception:
# missing tag file or something went wrong
return "", "", "", ""
def taskDuration(task):
filename = os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{task}.task")
return os.path.getatime(filename) - os.path.getmtime(filename)
def remove_task_files(task: str, exts: list):
task_dir = os.path.join(os.path.expanduser("~"), ".sos", "tasks")
for ext in exts:
filename = os.path.join(task_dir, task + ext)
if os.path.isfile(filename):
try:
os.remove(filename)
except Exception:
# if the file cannot be removed now, we use a thread to wait a
# bit and try to remove it later. The function should not
# wait for the thread though
try:
DelayedAction(os.remove, filename)
except Exception:
pass
def check_task(task, hint={}) -> Dict[str, Union[str, Dict[str, float]]]:
# when testing. if the timestamp is 0, the file does not exist originally, it should
# still does not exist. Otherwise the file should exist and has the same timestamp
if (
hint
and hint["status"] not in ("pending", "running")
and all(
(os.path.isfile(f) and os.stat(f).st_mtime == v)
if v
else (not os.path.isfile(f))
for f, v in hint["files"].items()
)
):
return {}
# status of the job, please refer to https://github.com/vatlab/SOS/issues/529
# for details.
#
task_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".task")
if not os.path.isfile(task_file):
return dict(status="missing", files={task_file: 0})
mtime = os.stat(task_file).st_mtime
def task_changed():
return os.stat(task_file).st_mtime != mtime
tf = TaskFile(task)
status = tf.status
if status in ["failed", "completed", "aborted"]:
# thse are terminal states. We simply return them
# only change of the task file will trigger recheck of status
stdout_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".sosout"
)
stderr_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"
)
# 1242
if os.path.isfile(stdout_file) or os.path.isfile(stderr_file):
tf.add_outputs(keep_result=True)
# 1323
tf.add_result()
remove_task_files(task, [".sosout", ".soserr", ".out", ".err"])
# stdout and stderr files should not exist
status_files = {
task_file: os.stat(task_file).st_mtime,
stdout_file: 0,
stderr_file: 0,
}
return dict(status=status, files=status_files)
pulse_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".pulse")
# check the existence and validity of .pulse file
if os.path.isfile(pulse_file):
try:
status_files = {
task_file: os.stat(task_file).st_mtime,
pulse_file: os.stat(pulse_file).st_mtime,
}
# if we have hint, we know the time stamp of last
# status file.
if (
not hint
or pulse_file not in hint["files"]
or status_files[pulse_file] != hint["files"][pulse_file]
):
return dict(status="running", files=status_files)
elapsed = time.time() - status_files[pulse_file]
if elapsed < 60:
return dict(status="running", files=status_files)
syserr_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".err"
)
# if the system does not return any error message, write sos-specific one
if os.path.isfile(syserr_file) and os.path.getsize(syserr_file) > 0:
try:
with open(syserr_file) as syserr:
env.logger.warning("".join(syserr.readlines()[-5:]))
except Exception as e:
env.logger.warning(
f"{task} is suspected to be killed but {syserr_file} cannot be read: {e}"
)
else:
soserr_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"
)
with open(soserr_file, "a") as err:
err.write(
f"Task {task} inactive for more than {int(elapsed)} seconds, might have been killed."
)
env.logger.warning(
f"Task {task} inactive for more than {int(elapsed)} seconds, might have been killed."
)
tf.add_outputs()
# 1323
tf.add_result()
# assume aborted
tf.status = "aborted"
return dict(
status="aborted",
files={task_file: os.stat(task_file).st_mtime, pulse_file: 0},
)
except Exception:
# the pulse file could disappear when the job is completed.
if task_changed():
return check_task(task)
raise
elif status == "running":
# starting of task will create a pulse file. If the pulse file is gone
# and the status is still showing as running, something is wrong.
# if there is no pulse file .
tf.status = "aborted"
with open(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"),
"a",
) as err:
err.write(f"Task {task} considered as aborted due to missing pulse file.")
env.logger.warning(
f"Task {task} considered as aborted due to missing pulse file."
)
tf.add_outputs()
# 1323
tf.add_result()
return dict(
status="aborted",
files={task_file: os.stat(task_file).st_mtime, pulse_file: 0},
)
# if there is no pulse file
job_file = os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".sh")
def has_job():
job_id_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ".job_id"
)
return (
os.path.isfile(job_file)
and os.stat(job_file).st_mtime >= os.stat(task_file).st_mtime
and os.path.isfile(job_id_file)
and os.stat(job_id_file).st_mtime >= os.stat(job_file).st_mtime
)
if has_job():
try:
if status != "submitted":
tf.status = "submitted"
return dict(
status="submitted",
files={
task_file: os.stat(task_file).st_mtime,
job_file: os.stat(job_file).st_mtime,
pulse_file: 0,
},
)
except Exception:
# the pulse file could disappear when the job is completed.
if task_changed():
return check_task(task)
else:
raise
else:
# status not changed
try:
if (
hint
and hint["status"] in ("new", "pending")
and hint["files"][task_file] == os.stat(task_file).st_mtime
):
return {}
else:
return dict(
status=status,
files={task_file: os.stat(task_file).st_mtime, job_file: 0},
)
except Exception:
# the pulse file could disappear when the job is completed.
if task_changed():
return check_task(task)
else:
raise
def check_tasks(tasks, is_all: bool):
if not tasks:
return {}
cache_file: str = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", "status_cache.pickle"
)
#
status_cache: Dict = {}
if os.path.isfile(cache_file):
try:
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "rb") as cache:
status_cache = pickle.load(cache)
except Exception:
# if the cache file is corrupted, remove it. #1275
os.remove(cache_file)
# at most 20 threads
from multiprocessing.pool import ThreadPool as Pool
p = Pool(min(20, len(tasks)))
# the result can be {} for unchanged, or real results
raw_status = p.starmap(check_task, [(x, status_cache.get(x, {})) for x in tasks])
# if check all, we clear the cache and record all existing tasks
has_changes: bool = any(x for x in raw_status)
if has_changes:
if is_all:
status_cache = {
k: v if v else status_cache[k] for k, v in zip(tasks, raw_status)
}
else:
status_cache.update({k: v for k, v in zip(tasks, raw_status) if v})
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "wb") as cache:
pickle.dump(status_cache, cache)
return status_cache
def print_task_status(
tasks,
check_all=False,
verbosity: int = 1,
html: bool = False,
numeric_times=False,
age=None,
tags=None,
status=None,
):
# # verbose is ignored for now
# if not check_all and not tasks:
# from .signatures import WorkflowSignatures
# workflow_signatures = WorkflowSignatures()
# tasks = [
# x for x in workflow_signatures.tasks() if os.path.isfile(
# os.path.join(
# os.path.expanduser('~'), '.sos', 'tasks', x + '.task'))
# ]
import glob
all_tasks: List = []
if check_all:
tasks = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task")
)
all_tasks = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in tasks]
if not all_tasks:
return
else:
for t in tasks:
matched_names = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task")
)
matched = [
(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in matched_names
]
if not matched:
all_tasks.append((t, None))
else:
all_tasks.extend(matched)
if age is not None:
age = expand_time(age, default_unit="d")
if age > 0:
all_tasks = [x for x in all_tasks if time.time() - x[1] >= age]
else:
all_tasks = [x for x in all_tasks if time.time() - x[1] <= -age]
all_tasks = sorted(list(set(all_tasks)), key=lambda x: 0 if x[1] is None else x[1])
if tags:
all_tasks = [
x
for x in all_tasks
if TaskFile(x[0]).exists()
and any(y in tags for y in TaskFile(x[0]).tags.split())
]
if not all_tasks:
env.logger.debug("No matching tasks are identified.")
return
raw_status = check_tasks([x[0] for x in all_tasks], check_all)
obtained_status = [raw_status[x[0]]["status"] for x in all_tasks]
#
# automatically remove non-running tasks that are more than 30 days old
to_be_removed = [
t
for s, (t, d) in zip(obtained_status, all_tasks)
if d is not None and time.time() - d > 30 * 24 * 60 * 60 and s != "running"
]
if status:
all_tasks = [x for x, s in zip(all_tasks, obtained_status) if s in status]
obtained_status = [x for x in obtained_status if x in status]
#
from .monitor import summarizeExecution
if html:
# HTML output
from .utils import isPrimitive
import pprint
print('<table width="100%" class="resource_table">')
def row(th=None, td=None):
if td is None:
print(f'<tr><th align="right" width="30%">{th}</th><td></td></tr>')
elif th is None:
print(f'<tr><td colspan="2" align="left" width="30%">{td}</td></tr>')
else:
print(
f'<tr><th align="right" width="30%">{th}</th><td align="left"><div class="one_liner">{td}</div></td></tr>'
)
for s, (t, d) in zip(obtained_status, all_tasks):
tf = TaskFile(t)
ts, ct, st, dr = tf.tags_created_start_and_duration(formatted=True)
row("ID", t)
row("Status", s)
row("Created", ct)
if st:
row("Started", st)
if dr:
row("Duration", dr)
params = tf.params
row("Task")
if hasattr(params, "task_stack"):
row(
td=f'<pre style="text-align:left">{params.task_stack[0][1].task}</pre>'
)
else:
row(td=f'<pre style="text-align:left">{params.task}</pre>')
row("Tags")
row(td=f'<pre style="text-align:left">{tf.tags}</pre>')
if params.global_def:
row("Global")
row(td=f'<pre style="text-align:left">{params.global_def}</pre>')
# row('Environment')
global_runtime = tf.runtime["_runtime"]
job_vars = params.sos_dict
job_vars["_runtime"].update(global_runtime)
for k in sorted(job_vars.keys()):
v = job_vars[k]
if not k.startswith("__") and not k == "CONFIG":
if k == "_runtime":
for _k, _v in v.items():
if isPrimitive(_v) and _v not in (None, "", [], (), {}):
row(_k, _v)
elif isPrimitive(v) and v not in (None, "", [], (), {}):
row(
k, f'<pre style="text-align:left">{pprint.pformat(v)}</pre>'
)
pulse_content = ""
if tf.has_result():
if s not in ("pending", "submitted", "running"):
res = tf.result
if "start_time" in res and "end_time" in res:
row(
"Duration",
format_duration(res["end_time"] - res["start_time"]),
)
if "peak_cpu" in res:
row("Peak CPU", f'{res["peak_cpu"]*100} %')
if "peak_mem" in res:
row("Peak mem", pretty_size(res["peak_mem"]))
# this is a placeholder for the frontend to draw figure
row(td=f'<div id="res_{t}"></div>')
elif s == "running":
pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", t + ".pulse"
)
if os.path.isfile(pulse_file):
with open(pulse_file) as pulse:
pulse_content = pulse.read()
summary = summarizeExecution(t, pulse_content, status=s)
if summary:
# row('Execution')
for line in summary.split("\n"):
fields = line.split(None, 1)
if fields[0] == "task":
continue
row(fields[0], "" if fields[1] is None else fields[1])
# this is a placeholder for the frontend to draw figure
row(td=f'<div id="res_{t}"></div>')
if s not in ("pending", "submitted", "running"):
#
if tf.has_shell():
shell = tf.shell
numLines = shell.count("\n")
row("shell", f"{numLines} lines")
row(td=f'<small><pre style="text-align:left">{shell}</pre></small>')
if tf.has_stdout():
stdout = tf.stdout
numLines = stdout.count("\n")
row(
"stdout",
"(empty)"
if numLines == 0
else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}',
)
if numLines > 200:
stdout = "\n".join(stdout.splitlines()[-200:])
row(
td=f'<small><pre style="text-align:left">{stdout}</pre></small>'
)
if tf.has_stderr():
stderr = tf.stderr
numLines = stderr.count("\n")
row(
"stderr",
"(empty)"
if numLines == 0
else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}',
)
if numLines > 200:
stderr = "\n".join(stderr.splitlines()[-200:])
row(
td=f'<small><pre style="text-align:left">{stderr}</pre></small>'
)
elif s == "running":
files = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", t + ".*")
)
for f in sorted(
[
x
for x in files
if os.path.splitext(x)[-1] not in (".task", ".pulse")
]
):
numLines = linecount_of_file(f)
rhead = os.path.splitext(f)[-1]
if rhead == ".sh":
rhead = "shell"
elif rhead == ".job_id":
rhead = "job ID"
elif rhead == ".err":
rhead = "stderr"
elif rhead == ".out":
rhead = "stdout"
elif rhead == ".soserr":
rhead = "sos error"
elif rhead == ".sosout":
rhead = "sos output"
row(
rhead,
"(empty)"
if numLines == 0
else f'{numLines} lines{"" if numLines < 200 else " (showing last 200)"}',
)
try:
row(
td=f'<small><pre style="text-align:left">{tail_of_file(f, 200, ansi2html=True)}</pre></small>'
)
except Exception:
row(
td='<small><pre style="text-align:left">ignored.</pre><small>'
)
print("</table>")
#
if not pulse_content:
return
# A sample of 400 point should be enough to show the change of resources
lines = sample_lines(pulse_content, 400).splitlines()
if len(lines) <= 2:
return
# read the pulse file and plot it
# time proc_cpu proc_mem children children_cpu children_mem
try:
etime = []
cpu = []
mem = []
for line in lines:
if line.startswith("#") or not line.strip():
continue
fields = line.split()
etime.append(float(fields[0]))
cpu.append(float(fields[1]) + float(fields[4]))
mem.append(float(fields[2]) / 1e6 + float(fields[5]) / 1e6)
if not etime:
return
except Exception:
return
#
print(
"""
<script>
function loadFiles(files, fn) {
if (!files.length) {
files = [];
}
var head = document.head || document.getElementsByTagName('head')[0];
function loadFile(index) {
if (files.length > index) {
if (files[index].endsWith('.css')) {
var fileref = document.createElement('link');
fileref.setAttribute("rel", "stylesheet");
fileref.setAttribute("type", "text/css");
fileref.setAttribute("href", files[index]);
} else {
var fileref = document.createElement('script');
fileref.setAttribute("type", "text/javascript");
fileref.setAttribute("src", files[index]);
}
console.log('Load ' + files[index]);
head.appendChild(fileref);
index = index + 1;
// Used to call a callback function
fileref.onload = function() {
loadFile(index);
}
} else if (fn) {
fn();
}
}
loadFile(0);
}
function plotResourcePlot_"""
+ t
+ """() {
// get the item
// parent element is a table cell, needs enlarge
document.getElementById(
"res_"""
+ t
+ """").parentElement.setAttribute("height", "300px;");
$("#res_"""
+ t
+ """").css("height", "300px");
$("#res_"""
+ t
+ """").css("width", "100%");
$("#res_"""
+ t
+ """").css("min-height", "300px");
var cpu = ["""
+ ",".join([f"[{x*1000},{y}]" for x, y in zip(etime, cpu)])
+ """];
var mem = ["""
+ ",".join([f"[{x*1000},{y}]" for x, y in zip(etime, mem)])
+ """];
$.plot('#res_"""
+ t
+ """', [{
data: cpu,
label: "CPU (%)"
},
{
data: mem,
label: "mem (M)",
yaxis: 2
}
], {
xaxes: [{
mode: "time"
}],
yaxes: [{
min: 0
}, {
position: "right",
tickFormatter: function(v, axis) {
return v.toFixed(1) + 'M';
}
}],
legend: {
position: "nw"
}
});
}
var dt = 100;
// the frontend might be notified before the table is inserted as results.
function showResourceFigure_"""
+ t
+ """() {
if ( $("#res_"""
+ t
+ """").length === 0) {
dt = dt * 1.5; // slow-down checks for datatable as time goes on;
setTimeout(showResourceFigure_"""
+ t
+ """, dt);
return;
} else {
$("#res_"""
+ t
+ """").css('width', "100%").css('height', "300px");
loadFiles(["http://www.flotcharts.org/flot/jquery.flot.js",
"http://www.flotcharts.org/flot/jquery.flot.time.js"
], plotResourcePlot_"""
+ t
+ """);
}
}
showResourceFigure_"""
+ t
+ """()
</script>
"""
)
elif verbosity == 0:
print("\n".join(obtained_status))
elif verbosity == 1:
for s, (t, d) in zip(obtained_status, all_tasks):
print(f"{t}\t{s}")
elif verbosity == 2:
tsize = 20
for s, (t, d) in zip(obtained_status, all_tasks):
ts, _, _, dr = TaskFile(t).tags_created_start_and_duration(
formatted=not numeric_times
)
tsize = max(tsize, len(ts))
print(f"{t}\t{ts.ljust(tsize)}\t{dr:<14}\t{s}")
elif verbosity == 3:
tsize = 20
for s, (t, d) in zip(obtained_status, all_tasks):
ts, ct, st, dr = TaskFile(t).tags_created_start_and_duration(
formatted=not numeric_times
)
tsize = max(tsize, len(ts))
print(f"{t}\t{ts.ljust(tsize)}\t{ct:<14}\t{st:<14}\t{dr:<14}\t{s}")
elif verbosity == 4:
import pprint
for s, (t, d) in zip(obtained_status, all_tasks):
tf = TaskFile(t)
if s == "missing":
print(f"{t}\t{s}\n")
continue
ts, ct, st, dr = tf.tags_created_start_and_duration(formatted=True)
print(f"{t}\t{s}\n")
print(f"{ct}")
if st:
print(f"{st}")
if dr:
print(f"{dr}")
params = tf.params
print("TASK:\n=====")
if hasattr(params, "task_stack"):
# show task of subtask
print(f"#1 of {len(params.task_stack)} subtasks:")
print(params.task_stack[0][1].task)
else:
print(params.task)
print("TAGS:\n=====")
print(tf.tags)
print()
if params.global_def:
print("GLOBAL:\n=======")
print(params.global_def)
print()
print("ENVIRONMENT:\n============")
global_runtime = tf.runtime["_runtime"]
job_vars = params.sos_dict
job_vars["_runtime"].update(global_runtime)
for k in sorted(job_vars.keys()):
v = job_vars[k]
print(f"{k:22}{short_repr(v) if verbosity == 3 else pprint.pformat(v)}")
print()
if tf.has_result():
if s not in ("pending", "submitted", "running"):
res = tf.result
print("EXECUTION STATS:\n================")
if "start_time" in res and "end_time" in res:
print(
f'Duration:\t{format_duration(res["end_time"] - res["start_time"])}'
)
if "peak_cpu" in res:
print(f'Peak CPU:\t{res["peak_cpu"]*100} %')
if "peak_mem" in res:
print(f'Peak mem:\t{pretty_size(res["peak_mem"])}')
elif s == "running":
# we have separate pulse, out and err files
pulse_file = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", t + ".pulse"
)
if os.path.isfile(pulse_file):
print("EXECUTION STATS:\n================")
with open(pulse_file) as pulse:
print(summarizeExecution(t, pulse.read(), status=s))
# if there are other files such as job file, print them.
def show_file(task, exts):
if isinstance(exts, str):
exts = [exts]
for ext in exts:
f = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", task + ext
)
if not os.path.isfile(f) or os.path.getsize(f) == 0:
return
print(
f'\n{os.path.basename(f)}:\n{"="*(len(os.path.basename(f))+1)}'
)
try:
with open(f) as fc:
print(fc.read())
except Exception:
print("Binary file")
if s == "running":
show_file(t, ".sh")
show_file(t, ".job_id")
show_file(t, [".sosout", ".out"])
show_file(t, [".soserr", ".err"])
elif s == "submitted":
show_file(t, ".sh")
show_file(t, ".job_id")
elif s != "pending":
if tf.has_shell():
print("\nexecution script:\n================\n" + tf.shell)
else:
show_file(t, ".sh")
if tf.has_stdout():
print("\nstandard output:\n================\n" + tf.stdout)
else:
show_file(t, [".sosout", ".out"])
if tf.has_stderr():
print("\nstandard error:\n================\n" + tf.stderr)
else:
show_file(t, [".soserr", ".err"])
# remove jobs that are older than 1 month
if to_be_removed:
purge_tasks(to_be_removed, verbosity=0)
def kill_tasks(tasks, tags=None):
#
import glob
from multiprocessing.pool import ThreadPool as Pool
if not tasks:
tasks = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task")
)
all_tasks = [os.path.basename(x)[:-5] for x in tasks]
else:
all_tasks = []
for t in tasks:
matched = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task")
)
matched = [os.path.basename(x)[:-5] for x in matched]
if not matched:
env.logger.warning(f"{t} does not match any existing task")
else:
all_tasks.extend(matched)
if tags:
all_tasks = [
x for x in all_tasks if any(x in tags for x in TaskFile(x).tags.split())
]
if not all_tasks:
env.logger.debug("No task to kill")
return
all_tasks = sorted(list(set(all_tasks)))
# at most 20 threads
p = Pool(min(20, len(all_tasks)))
killed = p.map(kill_task, all_tasks)
for s, t in zip(killed, all_tasks):
print(f"{t}\t{s}")
def kill_task(task):
tf = TaskFile(task)
status = tf.status
if status == "completed":
return "completed"
with open(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", task + ".soserr"), "a"
) as err:
err.write(f"Task {task} killed by sos kill command or task engine.")
tf.add_outputs()
# 1323
tf.add_result()
TaskFile(task).status = "aborted"
remove_task_files(
task, [".sosout", ".soserr", ".out", ".err", ".pulse", ".sh", ".job_id"]
)
return "aborted"
def purge_tasks(tasks, purge_all=None, age=None, status=None, tags=None, verbosity=2):
# verbose is ignored for now
# if not tasks and not purge_all:
# # if not --all and no task is specified, find all tasks in the current directory
# from .signatures import WorkflowSignatures
# workflow_signatures = WorkflowSignatures()
# tasks = [
# x for x in workflow_signatures.tasks() if os.path.isfile(
# os.path.join(
# os.path.expanduser('~'), '.sos', 'tasks', x + '.task'))
# ]
import glob
if tasks:
all_tasks = []
for t in tasks:
matched = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", f"{t}*.task")
)
matched = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in matched]
if not matched:
print(f"{t}\tmissing")
all_tasks.extend(matched)
elif purge_all or age or status or tags:
tasks = glob.glob(
os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*.task")
)
all_tasks = [(os.path.basename(x)[:-5], os.path.getmtime(x)) for x in tasks]
else:
raise ValueError(
"Please specify either tasks or one or more of --all, --status, --tags--age"
)
#
if age is not None:
age = expand_time(age, default_unit="d")
if age > 0:
all_tasks = [x for x in all_tasks if time.time() - x[1] >= age]
else:
all_tasks = [x for x in all_tasks if time.time() - x[1] <= -age]
if status:
# at most 20 threads
task_status = check_tasks([x[0] for x in all_tasks], not tasks)
all_tasks = [x for x in all_tasks if task_status[x[0]]["status"] in status]
if tags:
all_tasks = [
x for x in all_tasks if any(x in tags for x in TaskFile(x[0]).tags.split())
]
#
# remoe all task files
all_tasks = set([x[0] for x in all_tasks])
if all_tasks:
#
# find all related files, including those in nested directories
from collections import defaultdict
to_be_removed = defaultdict(list)
for dirname, _, filelist in os.walk(
os.path.join(os.path.expanduser("~"), ".sos", "tasks")
):
for f in filelist:
ID = os.path.basename(f).split(".", 1)[0]
if ID in all_tasks:
to_be_removed[ID].append(os.path.join(dirname, f))
#
cache_file: str = os.path.join(
os.path.expanduser("~"), ".sos", "tasks", "status_cache.pickle"
)
if os.path.isfile(cache_file):
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "rb") as cache:
status_cache = pickle.load(cache)
else:
status_cache = {}
for task in all_tasks:
removed = True
for f in to_be_removed[task]:
try:
if verbosity > 3:
if (
"TASK" in env.config["SOS_DEBUG"]
or "ALL" in env.config["SOS_DEBUG"]
):
env.log_to_file("TASK", f"Remove {f}")
os.remove(f)
except Exception as e:
removed = False
if verbosity > 0:
env.logger.warning(f"Failed to purge task {task[0]}: {e}")
status_cache.pop(task, None)
if removed and verbosity > 1:
print(f"{task}\tpurged")
with fasteners.InterProcessLock(cache_file + "_"):
with open(cache_file, "wb") as cache:
pickle.dump(status_cache, cache)
elif verbosity > 1:
env.logger.debug("No matching tasks to purge")
if purge_all and age is None and status is None and tags is None:
matched = glob.glob(os.path.join(os.path.expanduser("~"), ".sos", "tasks", "*"))
count = 0
for f in matched:
if os.path.isdir(f):
import shutil
try:
shutil.rmtree(f)
count += 1
except Exception as e:
if verbosity > 0:
env.logger.warning(f"Failed to remove {f}: {e}")
else:
try:
os.remove(f)
count += 1
except Exception as e:
if verbosity > 0:
env.logger.warning(f"Failed to remove {e}")
if count > 0 and verbosity > 1:
env.logger.info(f"{count} other files and directories are removed.")
return ""
| gpl-3.0 | -1,311,812,755,292,229,400 | 36.3247 | 207 | 0.464547 | false | 4.217995 | false | false | false |
bjodah/PyLaTeX | pylatex/base_classes/command.py | 1 | 10139 | # -*- coding: utf-8 -*-
"""
This module implements a class that implements a latex command.
This can be used directly or it can be inherited to make an easier interface
to it.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from .latex_object import LatexObject
from ..utils import dumps_list
class CommandBase(LatexObject):
"""A class that represents a LaTeX command.
The name of this class (when lowercased) will be the name of this command.
To supply a different name set the ``_latex_name`` attribute.
"""
def __init__(self, arguments=None, options=None, *,
extra_arguments=None):
r"""
Args
----
arguments: None, str, list or `~.Arguments`
The main arguments of the command.
options: None, str, list or `~.Options`
Options of the command. These are placed in front of the arguments.
extra_arguments: None, str, list or `~.Arguments`
Extra arguments for the command. When these are supplied the
options will be placed before them instead of before the normal
arguments. This allows for a way of having one or more arguments
before the options.
"""
self._set_parameters(arguments, 'arguments')
self._set_parameters(options, 'options')
if extra_arguments is None:
self.extra_arguments = None
else:
self._set_parameters(extra_arguments, 'extra_arguments')
super().__init__()
def _set_parameters(self, parameters, argument_type):
parameter_cls = Options if argument_type == 'options' else Arguments
if parameters is None:
parameters = parameter_cls()
elif not isinstance(parameters, parameter_cls):
parameters = parameter_cls(parameters)
# Pass on escaping to generated parameters
parameters._default_escape = self._default_escape
setattr(self, argument_type, parameters)
def __key(self):
"""Return a hashable key, representing the command.
Returns
-------
tuple
"""
return (self.latex_name, self.arguments, self.options,
self.extra_arguments)
def __eq__(self, other):
"""Compare two commands.
Args
----
other: `~.Command` instance
The command to compare this command to
Returns
-------
bool:
If the two instances are equal
"""
if isinstance(other, Command):
return self.__key() == other.__key()
return False
def __hash__(self):
"""Calculate the hash of a command.
Returns
-------
int:
The hash of the command
"""
return hash(self.__key())
def dumps(self):
"""Represent the command as a string in LaTeX syntax.
Returns
-------
str
The LaTeX formatted command
"""
options = self.options.dumps()
arguments = self.arguments.dumps()
if self.extra_arguments is None:
return r'\{command}{options}{arguments}'\
.format(command=self.latex_name, options=options,
arguments=arguments)
extra_arguments = self.extra_arguments.dumps()
return r'\{command}{arguments}{options}{extra_arguments}'\
.format(command=self.latex_name, arguments=arguments,
options=options, extra_arguments=extra_arguments)
class Command(CommandBase):
"""A class that represents a LaTeX command.
This class is meant for one-off commands. When a command of the same type
is used multiple times it is better to subclass `.CommandBase`.
"""
_repr_attributes_mapping = {'command': 'latex_name'}
def __init__(self, command=None, arguments=None, options=None, *,
extra_arguments=None, packages=None):
r"""
Args
----
command: str
Name of the command
arguments: None, str, list or `~.Arguments`
The main arguments of the command.
options: None, str, list or `~.Options`
Options of the command. These are placed in front of the arguments.
extra_arguments: None, str, list or `~.Arguments`
Extra arguments for the command. When these are supplied the
options will be placed before them instead of before the normal
arguments. This allows for a way of having one or more arguments
before the options.
packages: list of `~.Package` instances
A list of the packages that this command requires
Examples
--------
>>> Command('documentclass',
>>> options=Options('12pt', 'a4paper', 'twoside'),
>>> arguments='article').dumps()
'\\documentclass[12pt,a4paper,twoside]{article}'
>>> Command('com')
'\\com'
>>> Command('com', 'first')
'\\com{first}'
>>> Command('com', 'first', 'option')
'\\com[option]{first}'
>>> Command('com', 'first', 'option', 'second')
'\\com{first}[option]{second}'
"""
self.latex_name = command
if packages is not None:
self.packages |= packages
super().__init__(arguments, options, extra_arguments=extra_arguments)
class UnsafeCommand(Command):
"""An unsafe version of the `Command` class.
This class is meant for one-off commands that should not escape their
arguments and options. Use this command with care and only use this when
the arguments are hardcoded.
When an unsafe command of the same type is used multiple times it is better
to subclass `.CommandBase` and set the ``_default_escape`` attribute to
false.
"""
_default_escape = False
class Parameters(LatexObject):
"""The base class used by `~Options` and `~Arguments`.
This class should probably never be used on its own and inhereting from it
is only useful if a class like `~Options` or `~Arguments` is needed again.
"""
def __init__(self, *args, **kwargs):
r"""
Args
----
\*args:
Positional parameters
\*\*kwargs:
Keyword parameters
"""
if len(args) == 1 and not isinstance(args[0], str):
if hasattr(args[0], 'items') and len(kwargs) == 0:
kwargs = args[0] # do not just iterate over the dict keys
args = ()
elif hasattr(args[0], '__iter__'):
args = args[0]
self._positional_args = list(args)
self._key_value_args = dict(kwargs)
super().__init__()
def __key(self):
"""Generate a unique hashable key representing the parameter object.
Returns
-------
tuple
"""
return tuple(self._list_args_kwargs())
def __eq__(self, other):
"""Compare two parameters.
Returns
-------
bool
"""
return type(self) == type(other) and self.__key() == other.__key()
def __hash__(self):
"""Generate a hash of the parameters.
Returns
-------
int
"""
return hash(self.__key())
def _format_contents(self, prefix, separator, suffix):
"""Format the parameters.
The formatting is done using the three arguments suplied to this
function.
Arguments
---------
prefix: str
separator: str
suffix: str
Returns
-------
str
"""
params = self._list_args_kwargs()
if len(params) <= 0:
return ''
string = prefix + dumps_list(params, escape=self.escape,
token=separator) + suffix
return string
def _list_args_kwargs(self):
"""Make a list of strings representing al parameters.
Returns
-------
list
"""
params = []
params.extend(self._positional_args)
params.extend(['{k}={v}'.format(k=k, v=v) for k, v in
self._key_value_args.items()])
return params
class Options(Parameters):
"""A class implementing LaTex options for a command.
It supports normal positional parameters, as well as key-value pairs.
Options are the part of a command located between the square brackets
(``[]``). The positional parameters will be outputted in order and will
appear before the key-value-pairs. The key value-pairs won't be outputted
in the order in which they were entered
Examples
--------
>>> args = Options('a', 'b', 'c').dumps()
'[a,b,c]'
>>> Options('clip', width=50, height='25em', trim='1 2 3 4').dumps()
'[clip,trim=1 2 3 4,width=50,height=25em]'
"""
def dumps(self):
"""Represent the parameters as a string in LaTeX syntax.
This is to be appended to a command.
Returns
-------
str
"""
return self._format_contents('[', ',', ']')
class Arguments(Parameters):
"""A class implementing LaTex arguments for a command.
It supports normal positional parameters, as well as key-value pairs.
Arguments are the part of a command located between the curly braces
(``{}``). The positional parameters will be outputted in order and will
appear before the key-value-pairs. The key value-pairs won't be outputted
in the order in which they were entered
Examples
--------
>>> args = Arguments('a', 'b', 'c').dumps()
'{a}{b}{c}'
>>> args = Arguments('clip', width=50, height='25em').dumps()
>>> args.dumps()
'{clip}{width=50}{height=25em}'
"""
def dumps(self):
"""Represent the parameters as a string in LaTeX syntax.
This is to be appended to a command.
Returns
-------
str
"""
return self._format_contents('{', '}{', '}')
| mit | -7,111,534,106,801,539,000 | 27.085873 | 79 | 0.56909 | false | 4.579494 | false | false | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_ischildoffiltermatch.py | 1 | 2492 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
from _matchesfilter import MatchesFilter
#-------------------------------------------------------------------------
#
# IsChildOfFilterMatch
#
#-------------------------------------------------------------------------
class IsChildOfFilterMatch(Rule):
"""Rule that checks for a person that is a child
of someone matched by a filter"""
labels = [ _('Filter name:') ]
name = _('Children of <filter> match')
category = _('Family filters')
description = _("Matches children of anybody matched by a filter")
def prepare(self,db):
self.db = db
self.map = set()
filt = MatchesFilter(self.list)
filt.requestprepare(db)
for person in db.iter_people():
if filt.apply(db, person):
self.init_list(person)
filt.requestreset()
def reset(self):
self.map.clear()
def apply(self,db,person):
return person.handle in self.map
def init_list(self,person):
if not person:
return
for fam_id in person.get_family_handle_list():
fam = self.db.get_family_from_handle(fam_id)
if fam:
self.map.update(child_ref.ref
for child_ref in fam.get_child_ref_list())
| gpl-2.0 | -4,666,658,151,900,319,000 | 32.226667 | 75 | 0.536116 | false | 4.597786 | false | false | false |
cheery/spirthon | annotator.py | 1 | 5192 | # Annotator needs to find the least generic type for everything.
# To do that, it needs to hold a model of our types.
class Annotator(object):
def __init__(self, unit):
self.unit = unit
self.stack = []
def update(self, func):
for block in func:
for op in block.ops:
if not op.queued:
self.stack.append(op)
op.queued = True
def run(self):
while len(self.stack) > 0:
op = self.stack.pop()
op.queued = False
if op.name == 'call':
print 'annotate', op
elif op.name == 'return':
a = union(op.block.func.annotation.restype, op.args[0].annotation)
op.args[0].annotation = a
op.block.func.annotation.restype = a
op.annotation = a
print 'return update', op, a
# bit incorrect, should push uses of argument in too.
else:
assert False
# Should annotate here, if some of the fields change,
# should reschedule the used fields.
# SPIR-V annotation may need much simpler rules than specified here.
# Anything -annotation in translation unit most likely means
# that the translation failed.
class Anything(object):
specificity = 0
parametric = False
def __repr__(self):
return 'anything'
# The next most specific type after 'Unbound'.
class Constant(object):
def __init__(self, type, value):
self.type = type
self.value = value
def __repr__(self):
return 'Constant({}, {})'.format(self.type, self.value)
class FuncType(object):
def __init__(self, restype, argtypes):
self.restype = restype
self.argtypes = argtypes
def __getitem__(self, index):
return self.argtypes[index]
def __len__(self):
return len(self.argtypes)
def __repr__(self):
return '({}) ->'.format(', '.join(map(repr, self.argtypes)), self.restype)
class Type(object):
def __call__(self, parameter):
assert self.parametric
return Parametric(self, parameter)
def __init__(self, name, generic, parametric=False):
self.name = name
self.generic = generic
self.parametric = parametric
self.specificity = generic.specificity+1
def __repr__(self):
return self.name
class Parametric(object):
def __init__(self, func, parameter):
self.func = func
self.parameter = parameter
def __repr__(self):
return "{}({})".format(self.func, self.parameter)
# Types are treated as notation. They should be uniquely identified.
anything = Anything()
# not sure whether these belong here.
t_int = Type('int', anything)
t_uint = Type('uint', t_int)
t_bool = Type('bool', t_uint)
t_float = Type('float', anything)
t_vec2 = Type('vec2', anything, parametric=True)
t_vec3 = Type('vec3', anything, parametric=True)
t_vec4 = Type('vec4', anything, parametric=True)
# Thought about doing them this way, but realized types
# would require unification by their type hierarchies.
# # nullable = Type('nullable', anything, parametric=True)
# # instance = Type('instance', nullable, parametric=True)
# # t_null = Type('null', nullable)
# I don't want parametric types to leak from
# their parametric container.
def union(a, b):
c = union_raw(a, b)
while isinstance(c, Type) and c.parametric:
c = c.generic
return c
# But we still may use unification results which
# return parametric types.
def union_raw(a, b):
if a is b:
return a
if a is None:
return b
if b is None:
return a
if isinstance(a, Constant) and isinstance(b, Constant):
if a.value == b.value:
return a
else:
return union_raw(a.type, b.type)
elif isinstance(a, Constant):
return union_raw(a.type, b)
elif isinstance(b, Constant):
return union_raw(a, b.type)
if isinstance(a, Type) and isinstance(b, Type):
specificity = min(a.specificity, b.specificity)
while a.specificity > specificity:
a = a.generic
while b.specificity > specificity:
b = b.generic
while a is not b:
a = a.generic
b = b.generic
assert a is not None
return a
elif isinstance(a, Parametric) and isinstance(b, Parametric):
tp = union_raw(a.func, b.func)
if tp.parametric:
return Parametric(tp, union(a.parameter, b.parameter))
else:
return tp
elif isinstance(a, Parametric):
tp = union_raw(a.func, b)
if tp.parametric:
return Parametric(tp, a.parameter)
else:
return tp
elif isinstance(b, Parametric):
tp = union_raw(b.func, a)
if tp.parametric:
return Parametric(tp, b.parameter)
else:
return tp
elif isinstance(a, FuncType) and isinstance(b, FuncType) and len(a) == len(b):
return FuncType(
union(a.restype, b.restype),
[union(c, d) for c, d in zip(a, b)])
return anything
| mit | 7,691,338,691,917,519,000 | 30.08982 | 82 | 0.589176 | false | 3.7815 | false | false | false |
tibor95/phatch-python2.7 | build/lib.linux-i686-2.7/phatch/core/pil.py | 1 | 26512 | # Phatch - Photo Batch Processor
# Copyright (C) 2007-2009 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Phatch recommends SPE (http://pythonide.stani.be) for editing python files.
# Follows PEP8
"""All PIL related issues."""
#FIXME:
# - info should be defined on layer level
# -> move call afterwards also to layer level
# -> adapt image inspector
import datetime
import os
import re
import types
from PIL import Image
#todo make this lazy
from lib import formField
from lib import imtools
from lib import metadata
from lib import openImage
from lib import system
from lib import thumbnail
from lib import unicoding
from lib.reverse_translation import _t
from lib.formField import RE_FILE_IN, RE_FILE_OUT
from ct import TITLE
from config import USER_BIN_PATH
#from other import EXIF
system.set_bin_paths([USER_BIN_PATH])
try:
import pyexiv2
from lib import _pyexiv2 as exif
except:
pyexiv2 = None
exif = False
WWW_PYEXIV2 = 'http://tilloy.net/dev/pyexiv2/'
NEEDS_PYEXIV2 = _('pyexiv2 needs to be installed') + ' (%s)' % WWW_PYEXIV2
CONVERTED_MODE = \
_('%(mode)s has been converted to %(mode_copy)s to save as %(format)s.')
DYNAMIC_VARS = set(('width', 'height', 'size', 'mode', 'transparency'))
IMAGE_DEFAULT_DPI = 72
SEPARATOR = '_' # should be same as in core.translations
MONTHS = (_t('January'), _t('February'), _t('March'), _t('April'),
_t('May'), _t('June'), _t('July'), _t('August'), _t('September'),
_t('October'), _t('November'), _t('December'))
WEEKDAYS = (_t('Monday'), _t('Tuesday'), _t('Wednesday'), _t('Thursday'),
_t('Friday'), _t('Saturday'), _t('Sunday'))
DATETIME_KEYS = ['year', 'month', 'day', 'hour', 'minute', 'second']
re_DATETIME = re.compile(
'(?P<year>\d{4})[-:](?P<month>\d{2})[-:](?P<day>\d{2}) '
'(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})')
re_TAG = re.compile('(Pil|Exif|Iptc|Pexif|Zexif)([.]\w+)+')
re_KEY = re.compile('(#*)((\w|[.])*$|[$])')
TRANSPARENCY_ERROR = _('Only palette images have transparency.')
IMAGE_READ_EXTENSIONS = set(formField.IMAGE_READ_EXTENSIONS)\
.union(openImage.WITHOUT_PIL.extensions)
IMAGE_READ_EXTENSIONS = list(IMAGE_READ_EXTENSIONS)
IMAGE_READ_EXTENSIONS.sort()
IMAGE_EXTENSIONS = [ext for ext in IMAGE_READ_EXTENSIONS
if ext in formField.IMAGE_WRITE_EXTENSIONS]
BASE_VARS = ['dpi', 'compression', 'filename', 'format',
'orientation', 'path', 'transparency', 'type']
def split_data(d):
"""Provide attribute access to the variables.
:param d: a dumped metadata dictionary
:type d: dict
>>> d = {'date': '2008-11-27 13:54:33', 'tuple': (1, 2)}
"""
value = d.values()[0]
#tuples or list
if type(value) in (types.ListType, types.TupleType):
if len(value) > 1:
for k, v in d.items():
for i, x in enumerate(v):
d['%s.%d' % (k, i)] = v[i]
return
#datetime strings
done = False
for k, v in d.items():
if type(v) in types.StringTypes:
dt = re_DATETIME.match(v)
if dt:
for key in DATETIME_KEYS:
d['%s.%s' % (k, key)] = dt.group(key)
done = True
if done:
return
#date time values
if type(value) == datetime.datetime:
for k, v in d.items():
for key in DATETIME_KEYS:
d['%s.%s' % (k, key)] = getattr(v, key)
def fix_EXIF(tag):
if not tag.startswith('EXIF'):
tag = 'EXIF.' + tag
return tag.replace(' ', SEPARATOR)
def image_to_dict(filename, im=None):
folder, name = os.path.split(filename)
d = {'path': filename, 'filename': name}
if im:
width, height = im.size
d['width'] = width
d['height'] = height
d['mode'] = im.mode
return d
def get_photo(filename):
return Photo(metadata.InfoExtract(filename, vars=BASE_VARS).dump())
def split_vars_static_dynamic(vars):
vars = set(vars)
static = vars.difference(DYNAMIC_VARS)
dynamic = vars.intersection(DYNAMIC_VARS)
return list(static), list(dynamic)
class NotWritableTagError(Exception):
pass
class InfoPhoto(dict):
def __init__(self, info, info_to_dump, get_pil, image=None):
"""The ``get_pil`` parameter is necessary for tags as width,
height, size and mode.
:param info: pil, pyexiv2, ... tag, value info
:type info: dict
:param get_pil: method to retrieve the pil image
:type get_pil: callable
"""
#parameters
self.get_pil = get_pil
path = info['path']
#sources
if image == None:
image = get_pil()
sources = {
metadata.InfoPil: image,
metadata.InfoPexif: image,
metadata.InfoZexif: image}
#check format -> readable/writable metadata with pyexiv2
if exif and exif.is_readable_format(image.format):
self.pyexiv2 = pyexiv2.ImageMetadata(path)
self.pyexiv2.read()
self.writable_exif = exif.is_writable_format_exif(image.format)
self.writable_iptc = exif.is_writable_format_exif(image.format)
self.writable = self.writable_exif or self.writable_iptc
if self.writable_exif:
self.pyexiv2['Exif.Image.Software'] = TITLE
sources[metadata.InfoExif] = sources[metadata.InfoIptc] =\
self.pyexiv2
else:
self.pyexiv2 = None
self.writable = self.writable_exif = self.writable_iptc = False
#retrieve dump info
try:
info_dumped = info_to_dump.open(path, sources).dump(free=True)
except Exception, details:
reason = unicoding.exception_to_unicode(details)
#log error details
message = u'%s:%s:\n%s' % (_('Unable extract variables from file'),
path, reason)
raise Exception(message)
self.update(info, explicit=False)
self.update(info_dumped, explicit=False)
#private vars
self._original_size = image.size # to compare if changed later
self._dirty = False
self._log = ''
self._flushed = True
def close(self):
"""Remove circular reference."""
del self.get_pil
def is_dirty(self):
"""The photo can become dirty in two ways:
* new metadata has been set
* the image has changes size
In case the image size has changed it will update the
``Exif.Photo.PixelXDimension`` and ``Exif.Photo.PixelYimension``
accordingly.
:returns: True, if dirty
:rtype: boolean
"""
if self._dirty:
return True
self.update_size()
return self._dirty
def set(self, tag, value):
super(InfoPhoto, self).__setitem__(tag, value)
def update(self, d, explicit=True):
"""Do this explicitly so __setitem__ gets called."""
if explicit:
for key, value in d.items():
self[key] = value
else:
super(InfoPhoto, self).update(d)
def update_size(self):
"""If the image is exif writable and if the size has changed,
it will update ``Exif.Photo.PixelXDimension`` and
``Exif.Photo.PixelYimension``.
"""
if not self.writable_exif:
return
size = width, height = self.get_pil().size
if self._original_size != size:
self.pyexiv2['Exif.Photo.PixelXDimension'] = width
self.pyexiv2['Exif.Photo.PixelYDimension'] = height
self._dirty = True
def __getitem__(self, tag):
"""If a dynamic tag (size, mode) is requested, it will
extract it from the image. Otherwise get it normally.
:param tag: metadata tag
:type tag: string
:returns: value
"""
if tag in DYNAMIC_VARS:
#this can maybe be optimized if necessary
if tag == 'size':
return self.get_pil().size
elif tag in ('width', 'Exif_Photo_PixelXDimension'):
return self.get_pil().size[0]
elif tag in ('height', 'Exif_Photo_PixelYDimension'):
return self.get_pil().size[1]
elif tag == 'mode':
return self.get_pil().mode
elif tag == 'transparency':
self.assert_transparency()
return self.get_pil().info['transparency']
else:
raise KeyError('Fatal Error: tag "%s" is not dynamic?!' % tag)
elif tag in metadata.ORIENTATION_TAGS:
#give priority to writable tag
if 'Exif_Image_Orientation' in self:
return super(InfoPhoto, self).\
__getitem__('Exif_Image_Orientation')
else:
return super(InfoPhoto, self).__getitem__(tag)
else:
return super(InfoPhoto, self).__getitem__(tag)
def __contains__(self, tag):
"""
"""
if super(InfoPhoto, self).__contains__(tag):
return True
if tag == 'transparency' and tag in self.get_pil().info:
return self['mode'] == 'P'
return tag in DYNAMIC_VARS
def __delitem__(self, tag):
"""Delete a tag after :method:`InfoPhoto.assert_writable`.
:param tag: metadata tag
:type tag: string
"""
self.assert_writable(tag)
if tag == 'transparency':
self.assert_transparency()
del self.get_pil().info[tag]
return
pyexiv2_tag = self._fix(tag) # pexiv2 demands str
# a bit clumsy but pyexiv2 does not support get or in
try:
pyexiv2_tag_value = self.pyexiv2[pyexiv2_tag]
except KeyError:
pyexiv2_tag_value = None
if self.pyexiv2 and pyexiv2_tag_value != None:
self.pyexiv2[pyexiv2_tag] = None
if tag in self:
super(InfoPhoto, self).__delitem__(tag)
def __setitem__(self, tag, value):
"""Delete a tag after :method:`InfoPhoto.assert_writable`.
:param tag: metadata tag
:type tag: string
:param value: new value
"""
self.assert_writable(tag)
if tag in metadata.ORIENTATION_TAGS:
if self.pyexiv2 is None and value == 1:
#allow to ignore this (e.g. transpose method)
return
#redirect to writable tag
tag = 'Exif_Image_Orientation'
if tag in DYNAMIC_VARS:
if tag == 'transparency':
self.assert_transparency()
self.get_pil().info['transparency'] = value
else:
raise KeyError(_('Tag "%s" is read only.') % tag)
else:
super(InfoPhoto, self).__setitem__(tag, value)
if metadata.RE_PYEXIV2_TAG_EDITABLE.match(tag):
try:
self.pyexiv2[self._fix(tag)] = value
except Exception, message:
raise KeyError('%s:\n%s'
% (_('Impossible to write tag "%s"') % tag, message))
self._dirty = True
self._flushed = False
def assert_transparency(self):
"""Raise a ``KeyError`` for ``'transparency'`` when ``image.mode``
is not ``'P'``.
"""
if self['mode'] != 'P':
raise KeyError(TRANSPARENCY_ERROR)
def log(self, message):
"""Log a message
:param message: message
:type message: string
"""
self._log += message + '\n'
def clear_log(self):
"""Clears the log."""
self._log = ''
def get_log(self):
"""Get the log contents.
:returns: the log
:rtype: string
"""
return self._log
@classmethod
def _fix(cls, tag):
"""Phatch uses ``_`` as a separator while pyexiv2 uses a
dot (``.``). Moreover pyexiv2 demands str.
>>> InfoPhoto._fix('Exif_Photo_PixelXDimension')
'Exif.Photo.PixelXDimension'
:param tag: tag in info notation
:type tag: string
:returns: tag in pyexiv2 notation
:rtype: string
"""
return str(tag.replace('_', '.'))
def assert_writable(self, tag):
"""Assert that the tag is writable. This can raise an
``NotWritableTagError`` because of several reasons:
* Tag might be read-only (e.g. Exif_Photo_PixelXDimension)
* Tag might be not Exif or Iptc
* Image file format might not allow writing of this tag
:param tag: tag name
:type tag: string
:returns: True, if writable
:rtype: bool
"""
if not metadata.is_writable_tag(tag):
raise NotWritableTagError(_('Tag "%s" is not writable.') % tag)
if not ((self.writable_exif and tag.startswith('Exif'))
or (self.writable_iptc and tag.startswith('Iptc'))
or metadata.is_writeable_not_exif_tag(tag, self['mode'])):
raise NotWritableTagError(
_('Format %(format)s does not support overwriting "%(tag)s".')\
% {'format': self['format'], 'tag': tag})
def save(self, target, target_format=None, thumbdata=None):
"""
:param target: target filename
:type target: string
:param target_format: target format e.g. obtained by PIL
:type target_format: string
:param thumbdata: new thumbnail (eg with StringIO, see :mod:`imtools`)
:type thumbdata: string
"""
if not exif:
raise ImportError(NEEDS_PYEXIV2)
if not pyexiv2:
#FIXME: when starting with a not exif image png
#but save as exif jpg
return
if target == self['path']:
if self.is_dirty() and not self._flushed: # includes update_size
warnings = exif.flush(self.pyexiv2, thumbdata)
self._flushed = True
else:
self.update_size()
warnings = exif.write_metadata(self.pyexiv2, target,
self['format'], target_format, thumbdata)
return warnings
class Photo:
"""Use :func:`get_photo` to obtain a photo from a filename."""
def __init__(self, info, info_to_dump=None):
self.modify_date = None # for time shift action
self.report_files = [] # for reports
self._exif_transposition_reverse = None
#layer
path = info['path']
name = self.current_layer_name = _t('background')
layer = Layer(path, load=True)
self.layers = {name: layer}
#info
self.info = InfoPhoto(info, info_to_dump, self.get_flattened_image,
layer.image)
self.rotate_exif()
def close(self):
"""Remove circular references."""
self.info.close()
del self.info
def log(self, message):
self.info.log(message)
def clear_log(self):
self.info.clear_log()
def get_log(self):
return self.info.get_log()
def get_filename(self, folder, filename, typ):
return os.path.join(folder, '%s.%s' % (filename, typ))\
.replace('<', '%(').replace('>', ')s') % self.__dict__
#---layers
def get_flattened_image(self):
return self.get_layer().image.copy()
def get_layer(self, name=None):
if name is None:
name = self.current_layer_name
return self.layers[name]
def get_thumb(self, size=thumbnail.SIZE):
return thumbnail.thumbnail(self.get_flattened_image(),
size=size, checkboard=True)
def set_layer(self, layer, name=None):
if name is None:
name = self.current_layer_name
self.layers[name] = layer
#---image operations affecting all layers
def save(self, filename, format=None, save_metadata=True, **options):
"""Saves a flattened image"""
#todo: flatten layers
if format is None:
format = imtools.get_format_filename(filename)
image = self.get_flattened_image()
image_copy = imtools.convert_save_mode_by_format(image, format)
if image_copy.mode == 'P' and 'transparency' in image_copy.info:
options['transparency'] = image_copy.info['transparency']
if image_copy.mode != image.mode:
self.log(CONVERTED_MODE % {'mode': image.mode,
'mode_copy': image_copy.mode, 'format': format} + '\n')
#reverse exif previously applied exif orientation
#exif thumbnails are usually within 160x160
#desktop thumbnails size is defined by thumbnail.py and is
#probably 128x128
save_metadata = save_metadata and exif \
and exif.is_writable_format(format)
if save_metadata:
# Exif thumbnails are stored in their own format (eg JPG)
thumb = thumbnail.thumbnail(image_copy, (160, 160))
thumbdata = imtools.get_format_data(thumb, format)
image_copy = imtools.transpose(image_copy,
self._exif_transposition_reverse)
#thumb = thumbnail.thumbnail(thumb, copy=False)
else:
thumbdata = None
#postpone thumbnail production to see later if it is needed
thumb = None
if 'compression.tif' in options:
compression = options['compression.tif']
del options['compression.tif']
else:
compression = 'none'
try:
if compression.lower() in ['raw', 'none']:
#save image with pil
file_mode = imtools.save_check_mode(image_copy, filename,
**options)
#did PIL silently change the image mode?
if file_mode:
#PIL did change the image mode without throwing
# an exception.
#Do not save thumbnails in this case
# as they won't be reliable.
if image_copy.mode.endswith('A') and \
not file_mode.endswith('A'):
#force RGBA when transparency gets lost
#eg saving TIFF format with LA mode
mode = image_copy.mode
image_copy = image_copy.convert('RGBA')
file_mode = imtools.save_check_mode(image_copy,
filename, **options)
if file_mode:
# RGBA failed
self.log(CONVERTED_MODE % {'mode': mode,
'mode_copy': file_mode, 'format': format} \
+ '\n')
else:
# RGBA succeeded
self.log(CONVERTED_MODE % {'mode': mode,
'mode_copy': 'RGBA', 'format': format} + '\n')
else:
self.log(CONVERTED_MODE % {'mode': image_copy.mode,
'mode_copy': file_mode, 'format': format} + '\n')
elif thumbnail.is_needed(image_copy, format):
# save thumbnail in system cache if needed
if thumb is None:
thumb = image_copy
thumb_info = {
'width': image.size[0],
'height': image.size[1]}
thumbnail.save_to_cache(filename, thumb,
thumb_info=thumb_info, **options)
# copy metadata if needed (problematic for tiff)
# FIXME: if metdata corrupts the image, there should be
# no thumbnail
if save_metadata:
self.info.save(filename, thumbdata=thumbdata)
else:
# save with pil>libtiff
openImage.check_libtiff(compression)
self.log(openImage.save_libtiff(image_copy, filename,
compression=compression, **options))
if self.modify_date:
# Update file access and modification date
os.utime(filename, (self.modify_date, self.modify_date))
self.append_to_report(filename, image_copy)
except IOError, message:
# clean up corrupted drawing
if os.path.exists(filename):
os.remove(filename)
raise IOError(message)
#update info
if hasattr(options, 'dpi'):
self.info['dpi'] = options['dpi'][0]
def append_to_report(self, filename, image=None):
report = image_to_dict(filename, image)
report[_t('source')] = self.info['path']
self.report_files.append(report)
def convert(self, mode, *args, **keyw):
"""Converts all layers to a different mode."""
for layer in self.layers.values():
if layer.image.mode == mode:
continue
if mode == 'P' and imtools.has_alpha(layer.image):
layer.image = imtools.convert(layer.image, mode, *args, **keyw)
self.info['transparency'] = 255
elif mode == 'P':
layer.image = imtools.convert(layer.image, mode, *args, **keyw)
self.info['transparency'] = None
else:
layer.image = imtools.convert(layer.image, mode, *args, **keyw)
def safe_mode(self, format):
"""Convert the photo into a safe mode for this specific format"""
layer = self.get_layer()
layer.image = imtools.convert_save_mode_by_format(layer.image, format)
def resize(self, size, method):
"""Resizes all layers to a different size"""
size = (max(1, size[0]), max(1, size[1]))
for layer in self.layers.values():
layer.image = layer.image.resize(size, method)
def rotate_exif(self, reverse=False):
layers = self.layers.values()
if reverse:
transposition = self._exif_transposition_reverse
self._exif_transposition_reverse = ()
else:
transposition, self._exif_transposition_reverse = \
imtools.get_exif_transposition(self.info['orientation'])
if transposition:
for layer in layers:
layer.image = imtools.transpose(layer.image, transposition)
#---pil
def apply_pil(self, function, *arg, **keyw):
for layer in self.layers.values():
layer.apply_pil(function, *arg, **keyw)
#---external
def call(self, command, check_exe=True, shell=None, size=None,
unlock=False, output_filename=None, mode=None):
if shell is None:
shell = not system.WINDOWS
#get command line
info = self.info
layer = self.get_layer()
image = layer.image
if mode != image.mode:
image = imtools.convert(image, mode)
if size != None and size[0] < image.size[0]:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
#loop over input -> save to temp files
temp_files = []
done = []
error = None
for match in RE_FILE_IN.finditer(command):
source = match.group()
if not(source in done):
ext = match.group(1)
target = system.TempFile(ext)
try:
imtools.save_safely(image, target.path)
except Exception, error:
pass
temp_files.append((source, target))
done.append(source)
if error:
break
# check if we have a file_in
# clean up in case of error
if error:
for source, target in temp_files:
target.close() # os.remove(target)
raise error
# loop over output
output = None
for index, match in \
enumerate(RE_FILE_OUT.finditer(command)):
if index > 0:
# only 1 is allowed
raise Exception('Only one file_out.* is allowed.')
source = match.group()
ext = match.group(1)
output = system.TempFile(ext, output_filename)
command = command.replace(source, system.fix_quotes(output.path))
# tweak command line
for source, target in temp_files:
command = command.replace(source, system.fix_quotes(target.path))
# execute
system.call(command, shell=shell)
# give back filename
if output and not os.path.exists(output.path):
error = True
else:
error = False
for source, target in temp_files:
target.close() # os.remove(target)
if error:
raise Exception(
_('Command did not produce an output image:\n%s')\
% command)
if output:
layer.open(output.path)
# DO NOT REMOVE image.load() or output.close will fail on windows
layer.image.load()
output.close()
class Layer:
def __init__(self, filename, position=(0, 0), load=True):
self.open(filename)
self.position = position
# VERY IMPORTANT
# do not remove load option, otherwise openImage.py won't work
# correctly with group4 tiff compression
if load:
self.image.load()
def open(self, uri):
self.image = openImage.open(uri)
if self.image.mode in ['F', 'I']:
# Phatch doesn't support F and I
# FIXME: It will better to add some sort of warning here
self.image = self.image.convert('L')
def apply_pil(self, function, *arg, **keyw):
self.image = function(self.image, *arg, **keyw)
| gpl-3.0 | 3,443,809,914,918,861,000 | 35.021739 | 79 | 0.55688 | false | 3.961153 | false | false | false |
klpdotorg/dubdubdub | apps/ivrs/migrations/0026_auto_20170101_2313.py | 1 | 1139 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
State = apps.get_model("ivrs", "State")
User = apps.get_model("users", "User")
states = State.objects.all()
for state in states:
# Trimming the starting 0. Have checked to make sure
# all telephones on the State table have 11 digits
# including the 0 at the beginning.
telephone = state.telephone[1:]
try:
user = User.objects.get(mobile_no=telephone)
state.user = user
except:
pass
state.telephone = telephone
state.save()
def reverse_func(apps, schema_editor):
State = apps.get_model("ivrs", "State")
states = State.objects.all()
for state in states:
telephone = "0" + state.telephone
state.telephone = telephone
state.user = None
state.save()
class Migration(migrations.Migration):
dependencies = [
('ivrs', '0025_state_user'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| mit | 7,417,438,785,882,545,000 | 24.886364 | 60 | 0.604917 | false | 3.87415 | false | false | false |
google/starthinker | dags/test_dag.py | 1 | 4439 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Test Script
Used by tests.
- This should be called by the tests scripts only.
- When run will generate a say hello log.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {}
RECIPE = {
'setup': {
'day': [
'Mon',
'Tue',
'Wed',
'Thu',
'Fri',
'Sat',
'Sun'
],
'hour': [
1,
3,
23
]
},
'tasks': [
{
'hello': {
'auth': 'user',
'hour': [
1
],
'say': 'Hello At 1',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
3
],
'say': 'Hello At 3',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
],
'say': 'Hello Manual',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
23
],
'say': 'Hello At 23 Sleep',
'sleep': 30
}
},
{
'hello': {
'auth': 'user',
'say': 'Hello At Anytime',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
1,
3,
23
],
'say': 'Hello At 1, 3, 23',
'sleep': 0
}
},
{
'hello': {
'auth': 'user',
'hour': [
3
],
'say': 'Hello At 3 Reordered',
'sleep': 0
}
}
]
}
dag_maker = DAG_Factory('test', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| apache-2.0 | -252,957,889,067,501,380 | 24.079096 | 145 | 0.514305 | false | 4.035455 | false | false | false |
DongjunLee/kino-bot | kino/slack/plot.py | 1 | 2684 | from matplotlib import pyplot as plt
import matplotlib.dates as dt
import seaborn
seaborn.set()
import datetime
class Plot(object):
def __init__(self):
pass
def make_bar(
x,
y,
f_name,
title=None,
legend=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None,
):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
if x_ticks is not None:
plt.xticks(x, x_ticks)
if y_ticks is not None:
plt.yticks(y_ticks)
plt.bar(x, y, align="center")
if legend is not None:
plt.legend(legend)
plt.savefig(f_name)
plt.close(fig)
def make_line(
x,
y,
f_name,
title=None,
legend=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None,
):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
if x_ticks is not None:
plt.xticks(x, x_ticks)
if y_ticks is not None:
plt.yticks(y_ticks)
if isinstance(y[0], list):
for data in y:
plt.plot(x, data)
else:
plt.plot(x, y)
if legend is not None:
plt.legend(legend)
plt.savefig(f_name)
plt.close(fig)
def make_efficiency_date(
total_data,
avg_data,
f_name,
title=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None,
):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
v_date = []
v_val = []
for data in total_data:
dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M"))
to_int = round(float(data[1]))
plt.plot_date(dates, data[1], color=plt.cm.brg(to_int))
for data in avg_data:
dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M"))
v_date.append(dates)
v_val.append(data[1])
plt.plot_date(v_date, v_val, "^y-", label="Average")
plt.legend()
plt.savefig(f_name)
plt.close(fig)
| mit | 2,999,971,998,501,694,500 | 22.137931 | 77 | 0.492921 | false | 3.494792 | false | false | false |
sylvchev/mdla | examples/example_benchmark_performance.py | 1 | 6309 | """Benchmarking dictionary learning algorithms on random dataset"""
from multiprocessing import cpu_count
from time import time
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from numpy.linalg import norm
from numpy.random import permutation, rand, randint, randn
from mdla import MiniBatchMultivariateDictLearning, MultivariateDictLearning
# TODO:
# investigate perf break from pydico
def benchmarking_plot(figname, pst, plot_sep, minibatchRange, mprocessRange):
_ = plt.figure(figsize=(15, 10))
bar_width = 0.35
_ = plt.bar(
np.array([0]),
pst[0],
bar_width,
color="b",
label="Online, no multiprocessing (baseline)",
)
index = [0]
for i in range(1, plot_sep[1]):
if i == 1:
_ = plt.bar(
np.array([i + 1]),
pst[i],
bar_width,
color="r",
label="Online with minibatch",
)
else:
_ = plt.bar(np.array([i + 1]), pst[i], bar_width, color="r")
index.append(i + 1)
for _ in range(plot_sep[1], plot_sep[2]):
if i == plot_sep[1]:
_ = plt.bar(
np.array([i + 2]),
pst[i],
bar_width,
label="Batch with multiprocessing",
color="magenta",
)
else:
_ = plt.bar(np.array([i + 2]), pst[i], bar_width, color="magenta")
index.append(i + 2)
plt.ylabel("Time per iteration (s)")
plt.title("Processing time for online and batch processing")
tick = [""]
tick.extend(map(str, minibatchRange))
tick.extend(map(str, mprocessRange))
plt.xticks(index, tuple(tick))
plt.legend()
plt.savefig(figname + ".png")
def _generate_testbed(
kernel_init_len,
n_nonzero_coefs,
n_kernels,
n_samples=10,
n_features=5,
n_dims=3,
snr=1000,
):
"""Generate a dataset from a random dictionary
Generate a random dictionary and a dataset, where samples are combination of
n_nonzero_coefs dictionary atoms. Noise is added, based on SNR value, with
1000 indicated that no noise should be added.
Return the dictionary, the dataset and an array indicated how atoms are combined
to obtain each sample
"""
print("Dictionary sampled from uniform distribution")
dico = [rand(kernel_init_len, n_dims) for i in range(n_kernels)]
for i in range(len(dico)):
dico[i] /= norm(dico[i], "fro")
signals = list()
decomposition = list()
for _ in range(n_samples):
s = np.zeros(shape=(n_features, n_dims))
d = np.zeros(shape=(n_nonzero_coefs, 3))
rk = permutation(range(n_kernels))
for j in range(n_nonzero_coefs):
k_idx = rk[j]
k_amplitude = 3.0 * rand() + 1.0
k_offset = randint(n_features - kernel_init_len + 1)
s[k_offset : k_offset + kernel_init_len, :] += k_amplitude * dico[k_idx]
d[j, :] = array([k_amplitude, k_offset, k_idx])
decomposition.append(d)
noise = randn(n_features, n_dims)
if snr == 1000:
alpha = 0
else:
ps = norm(s, "fro")
pn = norm(noise, "fro")
alpha = ps / (pn * 10 ** (snr / 20.0))
signals.append(s + alpha * noise)
signals = np.array(signals)
return dico, signals, decomposition
rng_global = np.random.RandomState(1)
n_samples, n_dims = 1500, 1
n_features = kernel_init_len = 5
n_nonzero_coefs = 3
n_kernels, max_iter, learning_rate = 50, 10, 1.5
n_jobs, batch_size = -1, None
iter_time, plot_separator, it_separator = list(), list(), 0
generating_dict, X, code = _generate_testbed(
kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims
)
# Online without mini-batch
print(
"Processing ",
max_iter,
"iterations in online mode, " "without multiprocessing:",
end="",
)
batch_size, n_jobs = n_samples, 1
learned_dict = MiniBatchMultivariateDictLearning(
n_kernels=n_kernels,
batch_size=batch_size,
n_iter=max_iter,
n_nonzero_coefs=n_nonzero_coefs,
n_jobs=n_jobs,
learning_rate=learning_rate,
kernel_init_len=kernel_init_len,
verbose=1,
dict_init=None,
random_state=rng_global,
)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)
# Online with mini-batch
minibatch_range = [cpu_count()]
minibatch_range.extend([cpu_count() * i for i in range(3, 10, 2)])
n_jobs = -1
for mb in minibatch_range:
print(
"\nProcessing ",
max_iter,
"iterations in online mode, with ",
"minibatch size",
mb,
"and",
cpu_count(),
"processes:",
end="",
)
batch_size = mb
learned_dict = MiniBatchMultivariateDictLearning(
n_kernels=n_kernels,
batch_size=batch_size,
n_iter=max_iter,
n_nonzero_coefs=n_nonzero_coefs,
n_jobs=n_jobs,
learning_rate=learning_rate,
kernel_init_len=kernel_init_len,
verbose=1,
dict_init=None,
random_state=rng_global,
)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)
# Batch learning
mp_range = range(1, cpu_count() + 1)
for p in mp_range:
print(
"\nProcessing ",
max_iter,
"iterations in batch mode, with",
p,
"processes:",
end="",
)
n_jobs = p
learned_dict = MultivariateDictLearning(
n_kernels=n_kernels,
max_iter=max_iter,
verbose=1,
n_nonzero_coefs=n_nonzero_coefs,
n_jobs=n_jobs,
learning_rate=learning_rate,
kernel_init_len=kernel_init_len,
dict_init=None,
random_state=rng_global,
)
ts = time()
learned_dict = learned_dict.fit(X)
iter_time.append((time() - ts) / max_iter)
it_separator += 1
plot_separator.append(it_separator)
print("Done benchmarking")
figname = "minibatch-performance"
print("Plotting results in", figname)
benchmarking_plot(figname, iter_time, plot_separator, minibatch_range, mp_range)
print("Exiting.")
| gpl-3.0 | -6,148,818,530,008,922,000 | 27.547511 | 84 | 0.592645 | false | 3.379218 | false | false | false |
tferr/ASA | scripting-examples/3D_Analysis_ImageStack.py | 1 | 2179 | #@ImagePlus imp
#@LogService log
'''
This script uses an outdated API. For a modern replacement, have a look at
https://github.com/morphonets/SNT/tree/master/src/main/resources/script_templates/Neuroanatomy
'''
from sholl import Sholl_Analysis
from sholl import Options
from os.path import expanduser
def spacedDistances(start, end, step):
"""Retrieves a list of Sholl sampling distances"""
leng = (end - start) / step + 1
return [start + i * step for i in range(leng)]
# x,y,z coordinates of center of analysis
xc, yc, zc = 100, 100, 10
# Threshold values for segmentation
lower_t, upper_t = 88, 255
# Definitions for sampling distances
start_radius, end_radius, step_size, = 10, 100, 10
# Destination directory for saving plots and tables
export_path = expanduser("~")
sa = Sholl_Analysis()
if sa.validateImage(imp):
# Specify plugin settings
sa.setDescription(imp.getTitle(), True)
sa.setExportPath(export_path, True)
sa.setInteractiveMode(False)
# Customize output options
so = Options()
so.setMetric(Options.MEDIAN_INTERS, False) # "Sholl Results" table
so.setPlotOutput(Options.NO_PLOTS) # Which plots should be generated?
so.setPromptChoice(Options.HIDE_SAVED_FILES, True) # Main prompt option
so.setPromptChoice(Options.OVERLAY_SHELLS, True) # Main prompt option
sa.setOptions(so)
# Specify analysis settings
sa.setCenter(xc, yc, zc)
sa.setThreshold(lower_t, upper_t)
# Retrieve intersection counts
distances = spacedDistances(start_radius, end_radius, step_size)
counts = sa.analyze3D(xc, yc, zc, distances, imp)
if all(c == 0 for c in counts):
log.warn("All intersection counts were zero")
else:
# Do something with sampled data if analysis was successful
for idx, inters in enumerate(counts):
log.info("r=%s: %s inters." % (distances[idx],inters))
# Retrieve metrics
sa.analyzeProfile(distances, counts, True)
log.info("Analysis finished. Files saved to %s" % export_path)
log.info("Sholl Results Table has not been saved")
else:
log.error(imp.getTitle() + " is not a valid image")
| gpl-3.0 | -3,595,713,342,764,342,300 | 28.849315 | 98 | 0.693437 | false | 3.480831 | false | false | false |
jschaul/ComplexNetworkSim | examples/getting started code/first_visualisation.py | 1 | 1471 | '''
Complete code file only from ComplexNetworkSim's "getting started" documentation section, for visualising a simulation. For explanations refer to the documentation page.
Current link: http://complexnetworksim.0sites.net/start.html (documentation hosting may change place - see the PyPi index page.)
@author: Joe Schaul <[email protected]>
'''
from ComplexNetworkSim import PlotCreator, AnimationCreator
directory = 'test' #location of simulation result files
myName = "SIR" #name that you wish to give your image output files
title = "Simulation of agent-based simple SIR"
#define three simulation-specific constants:
SUSCEPTIBLE = 0
INFECTED = 1
RECOVERED = 2
statesToMonitor = [INFECTED, SUSCEPTIBLE] #even if we have states 0,1,2,3,... plot only 1 and 0
colours = ["r", "g"] #state 1 in red, state 0 in green
labels = ["Infected", "Susceptible"] #state 1 named 'Infected', 0 named 'Susceptible'
mapping = {SUSCEPTIBLE:"w", INFECTED:"r", RECOVERED:"0.4"}
trialToVisualise = 0
p = PlotCreator(directory, myName, title, statesToMonitor, colours, labels)
p.plotSimulation(show=False)
#show=True shows the graph directly,
#otherwise only a png file is created in the directory defined above.
visualiser = AnimationCreator(directory, myName, title, mapping, trial=trialToVisualise)
#gif speed can be changed by giving a parameter 'delay' (default=100) to AnimationCreator
visualiser.create_gif(verbose=True) | bsd-2-clause | 2,120,398,871,978,929,200 | 44.03125 | 170 | 0.743712 | false | 3.358447 | false | false | false |
jonfoster/pyxb1 | pyxb/__init__.py | 1 | 10123 | """PyXB stands for Python U{W3C XML
Schema<http://www.w3.org/XML/Schema>} Bindings, and is pronounced
"pixbee". It enables translation between XML instance documents and
Python objects following rules specified by an XML Schema document.
This is the top-level entrypoint to the PyXB system. Importing this
gets you all the L{exceptions<pyxb.exceptions_.PyXBException>}, and
L{pyxb.namespace}. For more functionality, delve into these
submodules:
- L{pyxb.xmlschema} Module holding the
L{structures<pyxb.xmlschema.structures>} that convert XMLSchema
from a DOM model to a Python class model based on the XMLSchema
components. Use this when you need to operate on the component
model.
- L{pyxb.binding} Module used to generate the bindings and at runtime
to support the generated bindings. Use this if you need to use the
binding model or content model.
- L{pyxb.utils} Common utilities used in parsing, generating, and
executing. The submodules must be imported separately.
"""
import logging
_log = logging.getLogger(__name__)
class cscRoot (object):
"""This little bundle of joy exists because in Python 2.6 it
became an error to invoke C{object.__init__} with parameters (unless
you also override C{__new__}, in which case it's only a warning.
Whatever.). Since I'm bloody not going to check in every class
whether C{super(Myclass,self)} refers to C{object} (even if I could
figure out how to do that, 'cuz the obvious solutions don't work),
we'll just make this thing the root of all U{cooperative super
calling<http://www.geocities.com/foetsch/python/new_style_classes.htm#super>}
hierarchies. The standard syntax in PyXB for this pattern is::
def method_csc (self, *args, **kw):
super_fn = getattr(super(ThisClass, self), 'method_csc', lambda *a,**kw: self)
return super_fn(*args, **kw)
"""
def __init__ (self, *args, **kw):
# Oh gross. If this class descends from list (and probably dict), we
# get here when object is *not* our direct superclass. In that case,
# we have to pass the arguments on up, or the strings don't get
# created right. Below is the only way I've figured out to detect the
# situation.
#
# Note that we might also get here if you mix-in a class that used
# object as a parent instead of cscRoot. Don't do that. Printing the
# mro() is a decent way of identifying the problem.
if issubclass(self.__class__.mro()[-2], ( list, dict )):
super(cscRoot, self).__init__(*args)
__version__ = '1.1.5-DEV'
"""The version of PyXB"""
__url__ = 'http://pyxb.sourceforge.net'
"""The URL for PyXB's homepage"""
__license__ = 'Apache License 2.0'
# Bring in the exception hierarchy
from exceptions_ import *
# Bring in namespace stuff
import namespace
class BIND (object):
"""Bundle data for automated binding generation.
Instances of this class capture positional and keyword arguments that are
used to create binding instances based on context. For example, if C{w}
is an instance of a complex type whose C{option} element is declared to be
an anonymous class with simple content of type integer and an attribute of
C{units}, a correct assignment to that element could be achieved with::
w.option = BIND(54, units="m")
"""
__args = None
__kw = None
def __init__ (self, *args, **kw):
"""Cache parameters for subsequent binding creation.
Invoke just as you would the factory for a binding class."""
self.__args = args
self.__kw = kw
def createInstance (self, factory, **kw):
"""Invoke the given factory method.
Position arguments to the factory are those cached in this instance.
Keyword arguments are the ones on the command line, updated from the
ones in this instance."""
kw.update(self.__kw)
return factory(*self.__args, **kw)
XMLStyle_minidom = 0
"""Use xml.dom.minidom for XML processing. This is the fastest, but does not
provide location information. It produces DOM instances."""
XMLStyle_saxdom = 1
"""Use pyxb.utils.saxdom for XML processing. This is the slowest, but both
provides location information and generates a DOM instance."""
XMLStyle_saxer = 2
"""Use pyxb.binding.saxer when converting documents to binding instances.
This style supports location information in the bindings. It produces binding
instances directly, without going through a DOM stage, so is faster than
XMLStyle_saxdom. However, since the pyxb.xmlschema.structures classes require
a DOM model, XMLStyle_saxdom will be used for pyxb.utils.domutils.StringToDOM
if this style is selected."""
_XMLStyle = XMLStyle_saxer
"""The current XML processing style."""
_XMLStyleMap = { 'minidom' : XMLStyle_minidom,
'saxdom' : XMLStyle_saxdom,
'saxer' : XMLStyle_saxer }
_XMLStyleMapReverse = dict([ (_v, _k) for (_k, _v) in _XMLStyleMap.items() ])
_XMLStyle_envvar = 'PYXB_XML_STYLE'
def _SetXMLStyle (style=None):
"""Set the interface used to parse XML content.
This can be invoked within code. The system default of L{XMLStyle_saxer}
can also be overridden at runtime by setting the environment variable
C{PYXB_XML_STYLE} to one of C{minidom}, C{saxdom}, or C{saxer}.
@param style: One of L{XMLStyle_minidom}, L{XMLStyle_saxdom},
L{XMLStyle_saxer}. If not provided, the system default is used.
"""
global _XMLStyle
if style is None:
import os
style_name = os.environ.get(_XMLStyle_envvar)
if style_name is None:
style_name = 'saxer'
style = _XMLStyleMap.get(style_name)
if style is None:
raise PyXBException('Bad value "%s" for %s' % (style_name, _XMLStyle_envvar))
if _XMLStyleMapReverse.get(style) is None:
raise PyXBException('Bad value %s for _SetXMLStyle' % (style,))
_XMLStyle = style
#_log.debug("XML style %s", _XMLStyleMapReverse.get(_XMLStyle))
_SetXMLStyle()
# Global flag that we can use to determine whether optimization is active in
# this session. There may be cases where we can bypass methods that just
# check for things we don't care about in an optimized context
_OptimizationActive = False
try:
assert False
_OptimizationActive = True
except:
pass
_CorruptionDetectionEnabled = not _OptimizationActive
"""If C{True}, blocks attempts to assign to attributes that are reserved for
PyXB methods.
Applies only at compilation time; dynamic changes are ignored.
"""
_GenerationRequiresValid = True
def RequireValidWhenGenerating (value=None):
"""Query or set a flag that controls validation checking in XML generation.
Normally any attempts to convert a binding instance to a DOM or XML
representation requires that the binding validate against the content
model, since only in this way can the content be generated in the correct
order. In some cases it may be necessary or useful to generate a document
from a binding that is incomplete. If validation is not required, the
generated documents may not validate even if the content validates,
because ordering constraints will be ignored.
@keyword value: If absent or C{None}, no change is made; otherwise, this
enables (C{True}) or disables (C{False}) the requirement that instances
validate before being converted to XML.
@type value: C{bool}
@return: C{True} iff attempts to generate XML for a binding that does not
validate should raise an exception. """
global _GenerationRequiresValid
if value is None:
return _GenerationRequiresValid
if not isinstance(value, bool):
raise TypeError(value)
_GenerationRequiresValid = value
return _GenerationRequiresValid
_ParsingRequiresValid = True
def RequireValidWhenParsing (value=None):
"""Query or set a flag that controls validation checking in XML parsing.
Normally any attempts to convert XML to a binding instance to a binding
instance requires that the document validate against the content model.
In some cases it may be necessary or useful to process a document that is
incomplete. If validation is not required, the generated documents may
not validate even if the content validates, because ordering constraints
will be ignored.
@keyword value: If absent or C{None}, no change is made; otherwise, this
enables (C{True}) or disables (C{False}) the requirement that documents
validate when being converted to bindings.
@type value: C{bool}
@return: C{True} iff attempts to generate bindings for a document that
does not validate should raise an exception."""
global _ParsingRequiresValid
if value is None:
return _ParsingRequiresValid
if not isinstance(value, bool):
raise TypeError(value)
_ParsingRequiresValid = value
return _ParsingRequiresValid
_PreserveInputTimeZone = False
def PreserveInputTimeZone (value=None):
"""Control whether time values are converted to UTC during input.
The U{specification <http://www.w3.org/TR/xmlschema-2/#dateTime>} makes
clear that timezoned times are in UTC and that times in other timezones
are to be translated to UTC when converted from literal to value form.
Provide an option to bypass this step, so the input timezone is preserved.
@note: Naive processing of unnormalized times--i.e., ignoring the
C{tzinfo} field--may result in errors."""
global _PreserveInputTimeZone
if value is None:
return _PreserveInputTimeZone
if not isinstance(value, bool):
raise TypeError(value)
_PreserveInputTimeZone = value
return _PreserveInputTimeZone
_OutputEncoding = 'utf-8'
"""Default unicode encoding to use when creating output.
Material being written to an XML parser is not output."""
_InputEncoding = 'utf-8'
"""Default unicode encoding to assume when decoding input.
Material being written to an XML parser is treated as input."""
## Local Variables:
## fill-column:78
## End:
| apache-2.0 | -4,845,504,037,243,957,000 | 38.236434 | 89 | 0.708683 | false | 4.039505 | false | false | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/PySide/examples/widgets/analogclock.py | 1 | 3131 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from PySide import QtCore, QtGui
class AnalogClock(QtGui.QWidget):
hourHand = QtGui.QPolygon([
QtCore.QPoint(7, 8),
QtCore.QPoint(-7, 8),
QtCore.QPoint(0, -40)
])
minuteHand = QtGui.QPolygon([
QtCore.QPoint(7, 8),
QtCore.QPoint(-7, 8),
QtCore.QPoint(0, -70)
])
hourColor = QtGui.QColor(127, 0, 127)
minuteColor = QtGui.QColor(0, 127, 127, 191)
def __init__(self, parent=None):
super(AnalogClock, self).__init__(parent)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update)
timer.start(1000)
self.setWindowTitle("Analog Clock")
self.resize(200, 200)
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QtCore.QTime.currentTime()
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(side / 200.0, side / 200.0)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(AnalogClock.hourColor)
painter.save()
painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0)))
painter.drawConvexPolygon(AnalogClock.hourHand)
painter.restore()
painter.setPen(AnalogClock.hourColor)
for i in range(12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(AnalogClock.minuteColor)
painter.save()
painter.rotate(6.0 * (time.minute() + time.second() / 60.0))
painter.drawConvexPolygon(AnalogClock.minuteHand)
painter.restore()
painter.setPen(AnalogClock.minuteColor)
for j in range(60):
if (j % 5) != 0:
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
clock = AnalogClock()
clock.show()
sys.exit(app.exec_())
| epl-1.0 | -2,085,368,576,522,494,700 | 30 | 77 | 0.608432 | false | 3.749701 | false | false | false |
google/clusterfuzz | src/local/butler/reproduce_tool/android.py | 1 | 3980 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android emulator installation and management."""
import os
import time
from local.butler.reproduce_tool import errors
from local.butler.reproduce_tool import prompts
from platforms.android import adb
from platforms.android import device
from system import environment
from system import new_process
ADB_DEVICES_SEPARATOR_STRING = 'List of devices attached'
EMULATOR_RELATIVE_PATH = os.path.join('local', 'bin', 'android-sdk', 'emulator',
'emulator')
def start_emulator():
"""Return a ProcessRunner configured to start the Android emulator."""
root_dir = environment.get_value('ROOT_DIR')
runner = new_process.ProcessRunner(
os.path.join(root_dir, EMULATOR_RELATIVE_PATH),
['-avd', 'TestImage', '-writable-system', '-partition-size', '2048'])
emulator_process = runner.run()
# If we run adb commands too soon after the emulator starts, we may see
# flake or errors. Delay a short while to account for this.
# TODO(mbarbella): This is slow and flaky, but wait-for-device isn't usable if
# another device is connected (as we don't know the serial yet). Find a better
# solution.
time.sleep(30)
return emulator_process
def get_devices():
"""Get a list of all connected Android devices."""
adb_runner = new_process.ProcessRunner(adb.get_adb_path())
result = adb_runner.run_and_wait(additional_args=['devices'])
if result.return_code:
raise errors.ReproduceToolUnrecoverableError('Unable to run adb.')
# Ignore non-device lines (those before "List of devices attached").
store_devices = False
devices = []
for line in result.output.splitlines():
if line == ADB_DEVICES_SEPARATOR_STRING:
store_devices = True
continue
if not store_devices or not line:
continue
devices.append(line.split()[0])
return devices
def prepare_environment(disable_android_setup):
"""Additional environment overrides needed to run on an Android device."""
environment.set_value('OS_OVERRIDE', 'ANDROID')
# Bail out if we can't determine which Android device to use.
serial = environment.get_value('ANDROID_SERIAL')
if not serial:
devices = get_devices()
if len(devices) == 1:
serial = devices[0]
environment.set_value('ANDROID_SERIAL', serial)
elif not devices:
raise errors.ReproduceToolUnrecoverableError(
'No connected Android devices were detected. Run with the -e '
'argument to use an emulator.')
else:
raise errors.ReproduceToolUnrecoverableError(
'You have multiple Android devices or emulators connected. Please '
'set the ANDROID_SERIAL environment variable and try again.\n\n'
'Attached devices: ' + ', '.join(devices))
print('Warning: this tool will make changes to settings on the connected '
'Android device with serial {serial} that could result in data '
'loss.'.format(serial=serial))
willing_to_continue = prompts.get_boolean(
'Are you sure you want to continue?')
if not willing_to_continue:
raise errors.ReproduceToolUnrecoverableError(
'Bailing out to avoid changing settings on the connected device.')
# Push the test case and build APK to the device.
apk_path = environment.get_value('APP_PATH')
device.update_build(
apk_path, should_initialize_device=not disable_android_setup)
device.push_testcases_to_device()
| apache-2.0 | -1,405,921,316,545,967,000 | 35.851852 | 80 | 0.711809 | false | 3.972056 | false | false | false |
lostinplace/filtered-intervaltree | setup.py | 1 | 1509 | from setuptools import setup, find_packages
from codecs import open
from os import path
import os
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'readme.md'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, '.library-version'), encoding='utf-8') as f:
existing_version = f.read()
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
requirements = f.read().split('\n')
env_version = os.environ.get('LIBVER')
version = env_version or existing_version
setup(
name='filtered-intervaltree',
version=version,
description='an intervaltree with early exit bloom filters',
long_description=long_description,
url='https://github.com/lostinplace/filtered-intervaltree',
author='cwheeler',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
requires=[],
keywords='rbtree intervaltree bloomfilter',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['test']),
install_requires=requirements,
extras_require={
'test': ['coverage'],
}
) | mit | 3,768,461,388,915,031,600 | 26.962963 | 72 | 0.666667 | false | 3.859335 | false | true | false |
Kpler/scrapy-logentries-extension | scrapylogentries/extension.py | 1 | 1989 | import logging
import os
from scrapy import signals
from scrapy.exceptions import NotConfigured
from logentries import LogentriesHandler
from logentriesadapter import LogentriesAdapter, ScrapingHubFilter
logger = logging.getLogger(__name__)
class LogentriesExtension(object):
def __init__(self, token):
self.token = token
root = logging.getLogger()
self.handler = LogentriesHandler(token)
spider_id = os.environ.get('SCRAPY_SPIDER_ID')
project_id = os.environ.get('SCRAPY_PROJECT_ID')
job_id = os.environ.get('SCRAPY_JOB_ID')
formatted = False
if job_id is not None:
formatted = True
filter = ScrapingHubFilter({
'project_id': project_id,
'spider_id': spider_id,
'job_id': job_id,
})
format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s"
formatter = logging.Formatter(format)
self.handler.addFilter(filter)
self.handler.setFormatter(formatter)
root.addHandler(self.handler)
# NCA: not sure we want sensitive information like the token in the logs
# Maybe use debug log level instead
if formatted:
logger.info('Logentries activated with token {} and custom SH format'.format(token))
else:
logger.info('Logentries activated with token {} and no custom SH format'.format(token))
@classmethod
def from_crawler(cls, crawler):
# first check if the extension should be enabled and raise
# NotConfigured otherwise
token = crawler.settings.get('LOGENTRIES_TOKEN')
if not token:
raise NotConfigured
# instantiate the extension object
ext = cls(token)
# return the extension object
return ext
# vim: syntax=python:sws=4:sw=4:et:
| mit | 7,340,216,088,831,452,000 | 30.078125 | 131 | 0.612871 | false | 4.240938 | false | false | false |
teracyhq/flask-classy | test_classful/test_decorators.py | 2 | 7323 | from flask import Flask
from .view_classes import DecoratedView
from .view_classes import DecoratedBoldListView
from .view_classes import DecoratedBoldItalicsListView
from .view_classes import DecoratedListMemberView
from .view_classes import DecoratedListFunctionAttributesView
from .view_classes import DecoratedListMemberFunctionAttributesView
from .view_classes import DecoratedAppendClassAttributeView
from nose.tools import eq_
app = Flask("decorated")
DecoratedView.register(app)
DecoratedBoldListView.register(app)
DecoratedBoldItalicsListView.register(app)
DecoratedListMemberView.register(app)
DecoratedListFunctionAttributesView.register(app)
DecoratedListMemberFunctionAttributesView.register(app)
DecoratedAppendClassAttributeView.register(app)
client = app.test_client()
def test_func_decorator_index():
resp = client.get('/decorated/')
eq_(b"Index", resp.data)
resp = client.get('/decorated')
eq_(resp.status_code, 308)
def test_func_decorator_get():
resp = client.get('/decorated/1234/')
eq_(b"Get 1234", resp.data)
resp = client.get('/decorated/1234')
eq_(resp.status_code, 308)
def test_recursive_decorator_post():
resp = client.post('/decorated/')
eq_(b"Post", resp.data)
resp = client.post('/decorated')
eq_(resp.status_code, 308)
def test_more_recursive_decorator_get():
resp = client.get('/decorated/get_some/')
eq_(b"Get Some", resp.data)
resp = client.get('/decorated/get_some')
eq_(resp.status_code, 308)
def test_multiple_recursive_decorators_get():
resp = client.get('/decorated/get_this/')
eq_(b"Get This", resp.data)
resp = client.get('/decorated/get_this')
eq_(resp.status_code, 308)
def test_routes_with_recursive_decorators():
resp = client.get('/decorated/mixitup/')
eq_(b"Mix It Up", resp.data)
resp = client.get('/decorated/mixitup')
eq_(resp.status_code, 308)
def test_recursive_with_parameter():
resp = client.get('/decorated/someval/1234/')
eq_(b"Someval 1234", resp.data)
def test_recursive_with_route_with_parameter():
resp = client.get('/decorated/anotherval/1234/')
eq_(b"Anotherval 1234", resp.data)
def test_params_decorator():
resp = client.get('/decorated/params_decorator_method/')
eq_(b"Params Decorator", resp.data)
def test_params_decorator_delete():
resp = client.delete('/decorated/1234/')
eq_(b"Params Decorator Delete 1234", resp.data)
resp = client.delete('/decorated/1234')
eq_(resp.status_code, 308)
def test_decorator_bold_list_get():
"""Tests that the get route is wrapped in bold"""
resp = client.get('/decorated_bold_list_view/1234/')
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b>Get 1234</b>', resp.data)
resp = client.get('/decorated_bold_list_view/1234')
eq_(resp.status_code, 308)
def test_decorator_bold_list_index():
"""Tests that the index route is wrapped in bold"""
resp = client.get('/decorated_bold_list_view/')
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b>Index</b>', resp.data)
def test_decorator_bold_italics_list_get():
"""Tests that the get route is wrapped in bold and italics"""
resp = client.get('/decorated_bold_italics_list_view/1234/')
eq_(b'<i>' in resp.data, True)
eq_(b'</i>' in resp.data, True)
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b><i>Get 1234</i></b>', resp.data)
resp = client.get('/decorated_bold_italics_list_view/1234')
eq_(resp.status_code, 308)
def test_decorator_bold_italics_list_index():
"""Tests that the index route is wrapped in bold and italics"""
resp = client.get('/decorated_bold_italics_list_view/')
eq_(b'<i>' in resp.data, True)
eq_(b'</i>' in resp.data, True)
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<b><i>Index</i></b>', resp.data)
def test_decorator_list_member_index():
"""
Tests that the index route is wrapped in bold,
italics and paragraph
"""
resp = client.get('/decorated_list_member_view/')
eq_(b'<i>' in resp.data, True)
eq_(b'</i>' in resp.data, True)
eq_(b'<b>' in resp.data, True)
eq_(b'</b>' in resp.data, True)
eq_(b'<p>' not in resp.data, True)
eq_(b'</p>' not in resp.data, True)
eq_(b'<b><i>Index</i></b>', resp.data)
def test_decorator_list_member_get():
"""Tests the ordering of decorators"""
resp = client.get('/decorated_list_member_view/1234/')
eq_(b'<b>', resp.data[:3])
eq_(b'<i>', resp.data[3:6])
eq_(b'<p>', resp.data[6:9])
eq_(b'</p>', resp.data[-12:-8])
eq_(b'</i>', resp.data[-8:-4])
eq_(b'</b>', resp.data[-4:])
eq_(b'<b><i><p>Get 1234</p></i></b>', resp.data)
resp = client.get('/decorated_list_member_view/1234')
eq_(resp.status_code, 308)
def test_decorator_list_function_attributes_get():
"""
Verify list of decorators with attributes modify all functions in FlaskView
"""
resp = client.get('/decorated_list_function_attributes_view/1234/')
eq_(b'Get 1234' in resp.data, True)
eq_(b'<i><b>Get 1234</b></i>', resp.data)
eq_(hasattr(
app.view_functions['DecoratedListFunctionAttributesView:get'],
'eggs'),
True)
eq_('scrambled',
app.view_functions['DecoratedListFunctionAttributesView:get'].eggs)
resp = client.get('/decorated_list_function_attributes_view/1234')
eq_(resp.status_code, 308)
def test_decorator_list_function_attributes_index():
"""
Verify list of decorators with attributes modify all functions in FlaskView
"""
resp = client.get('/decorated_list_function_attributes_view/')
eq_(b'Index' in resp.data, True)
eq_(b'<i>Index</i>', resp.data)
eq_(hasattr(
app.view_functions['DecoratedListFunctionAttributesView:index'],
'eggs'),
True)
eq_('scrambled',
app.view_functions['DecoratedListFunctionAttributesView:index'].eggs)
def test_decorator_list_member_function_attributes_get():
"""Verify decorator with attributes does not modify other members"""
resp = client.get('/decorated_list_member_function_attributes_view/4321/')
eq_(b'Get 4321' in resp.data, True)
eq_(b'<i><b>Get 4321</b></i>', resp.data)
eq_(
hasattr(
app.view_functions[
'DecoratedListMemberFunctionAttributesView:get'
], 'eggs'),
False)
resp = client.get('/decorated_list_member_function_attributes_view/4321')
eq_(resp.status_code, 308)
def test_decorator_list_member_function_attributes_index():
"""Verify decorator with attributes modify decorated memeber functions"""
resp = client.get('/decorated_list_member_function_attributes_view/')
eq_(b'Index' in resp.data, True)
eq_(b'<i>Index</i>', resp.data)
eq_(hasattr(
app.view_functions[
'DecoratedListMemberFunctionAttributesView:index'
], 'eggs'),
True)
eq_('scrambled',
app.view_functions[
'DecoratedListMemberFunctionAttributesView:index'
].eggs)
def test_decorator_append_class_attribute_index():
resp = client.get('/decorated_append_class_attribute_view/')
eq_(b'Index (this is a test)', resp.data)
| bsd-3-clause | 495,681,920,520,354,400 | 32.286364 | 79 | 0.654377 | false | 3.319583 | true | false | false |
iwm911/plaso | plaso/formatters/mcafeeav.py | 1 | 1238 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for the McAfee AV Logs files."""
from plaso.lib import eventdata
class McafeeAccessProtectionLogEventFormatter(eventdata.EventFormatter):
"""Class that formats the McAfee Access Protection Log events."""
DATA_TYPE = 'av:mcafee:accessprotectionlog'
# The format string.
FORMAT_STRING = (u'File Name: {filename} User: {username} {trigger_location} '
u'{status} {rule} {action}')
FORMAT_STRING_SHORT = u'{filename} {action}'
SOURCE_LONG = 'McAfee Access Protection Log'
SOURCE_SHORT = 'LOG'
| apache-2.0 | -6,182,131,290,445,977,000 | 35.411765 | 80 | 0.731018 | false | 3.77439 | false | false | false |
jarrodmcc/OpenFermion | src/openfermion/utils/_sparse_tools_test.py | 1 | 55877 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sparse_tools.py."""
from __future__ import absolute_import, division
import numpy
import unittest
from numpy.linalg import multi_dot
from scipy.linalg import eigh, norm
from scipy.sparse import csc_matrix
from scipy.special import comb
from openfermion.hamiltonians import (fermi_hubbard, jellium_model,
wigner_seitz_length_scale)
from openfermion.ops import FermionOperator, up_index, down_index
from openfermion.transforms import (get_fermion_operator, get_sparse_operator,
jordan_wigner)
from openfermion.utils import (
Grid, fourier_transform, normal_ordered, number_operator)
from openfermion.utils._jellium_hf_state import (
lowest_single_particle_energy_states)
from openfermion.utils._linear_qubit_operator import LinearQubitOperator
from openfermion.utils._slater_determinants_test import (
random_quadratic_hamiltonian)
from openfermion.utils._sparse_tools import *
class SparseOperatorTest(unittest.TestCase):
def test_kronecker_operators(self):
self.assertAlmostEqual(
0, numpy.amax(numpy.absolute(
kronecker_operators(3 * [identity_csc]) -
kronecker_operators(3 * [pauli_x_csc]) ** 2)))
def test_qubit_jw_fermion_integration(self):
# Initialize a random fermionic operator.
fermion_operator = FermionOperator(((3, 1), (2, 1), (1, 0), (0, 0)),
-4.3)
fermion_operator += FermionOperator(((3, 1), (1, 0)), 8.17)
fermion_operator += 3.2 * FermionOperator()
# Map to qubits and compare matrix versions.
qubit_operator = jordan_wigner(fermion_operator)
qubit_sparse = get_sparse_operator(qubit_operator)
qubit_spectrum = sparse_eigenspectrum(qubit_sparse)
fermion_sparse = jordan_wigner_sparse(fermion_operator)
fermion_spectrum = sparse_eigenspectrum(fermion_sparse)
self.assertAlmostEqual(0., numpy.amax(
numpy.absolute(fermion_spectrum - qubit_spectrum)))
class JordanWignerSparseTest(unittest.TestCase):
def test_jw_sparse_0create(self):
expected = csc_matrix(([1], ([1], [0])), shape=(2, 2))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^')).A,
expected.A))
def test_jw_sparse_1annihilate(self):
expected = csc_matrix(([1, -1], ([0, 2], [1, 3])), shape=(4, 4))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('1')).A,
expected.A))
def test_jw_sparse_0create_2annihilate(self):
expected = csc_matrix(([-1j, 1j],
([4, 6], [1, 3])),
shape=(8, 8))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^ 2', -1j)).A,
expected.A))
def test_jw_sparse_0create_3annihilate(self):
expected = csc_matrix(([-1j, 1j, 1j, -1j],
([8, 10, 12, 14], [1, 3, 5, 7])),
shape=(16, 16))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('0^ 3', -1j)).A,
expected.A))
def test_jw_sparse_twobody(self):
expected = csc_matrix(([1, 1], ([6, 14], [5, 13])), shape=(16, 16))
self.assertTrue(numpy.allclose(
jordan_wigner_sparse(FermionOperator('2^ 1^ 1 3')).A,
expected.A))
def test_qubit_operator_sparse_n_qubits_too_small(self):
with self.assertRaises(ValueError):
qubit_operator_sparse(QubitOperator('X3'), 1)
def test_qubit_operator_sparse_n_qubits_not_specified(self):
expected = csc_matrix(([1, 1, 1, 1], ([1, 0, 3, 2], [0, 1, 2, 3])),
shape=(4, 4))
self.assertTrue(numpy.allclose(
qubit_operator_sparse(QubitOperator('X1')).A,
expected.A))
def test_get_linear_qubit_operator_diagonal_wrong_n(self):
"""Testing with wrong n_qubits."""
with self.assertRaises(ValueError):
get_linear_qubit_operator_diagonal(QubitOperator('X3'), 1)
def test_get_linear_qubit_operator_diagonal_0(self):
"""Testing with zero term."""
qubit_operator = QubitOperator.zero()
vec_expected = numpy.zeros(8)
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator, 3), vec_expected))
def test_get_linear_qubit_operator_diagonal_zero(self):
"""Get zero diagonals from get_linear_qubit_operator_diagonal."""
qubit_operator = QubitOperator('X0 Y1')
vec_expected = numpy.zeros(4)
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_non_zero(self):
"""Get non zero diagonals from get_linear_qubit_operator_diagonal."""
qubit_operator = QubitOperator('Z0 Z2')
vec_expected = numpy.array([1, -1, 1, -1, -1, 1, -1, 1])
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_cmp_zero(self):
"""Compare get_linear_qubit_operator_diagonal with
get_linear_qubit_operator."""
qubit_operator = QubitOperator('Z1 X2 Y5')
vec_expected = numpy.diag(LinearQubitOperator(qubit_operator) *
numpy.eye(2 ** 6))
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_cmp_non_zero(self):
"""Compare get_linear_qubit_operator_diagonal with
get_linear_qubit_operator."""
qubit_operator = QubitOperator('Z1 Z2 Z5')
vec_expected = numpy.diag(LinearQubitOperator(qubit_operator) *
numpy.eye(2 ** 6))
self.assertTrue(numpy.allclose(
get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
class ComputationalBasisStateTest(unittest.TestCase):
def test_computational_basis_state(self):
comp_basis_state = jw_configuration_state([0, 2, 5], 7)
self.assertAlmostEqual(comp_basis_state[82], 1.)
self.assertAlmostEqual(sum(comp_basis_state), 1.)
class JWHartreeFockStateTest(unittest.TestCase):
def test_jw_hartree_fock_state(self):
hartree_fock_state = jw_hartree_fock_state(3, 7)
self.assertAlmostEqual(hartree_fock_state[112], 1.)
self.assertAlmostEqual(sum(hartree_fock_state), 1.)
class JWNumberIndicesTest(unittest.TestCase):
def test_jw_sparse_index(self):
"""Test the indexing scheme for selecting specific particle numbers"""
expected = [1, 2]
calculated_indices = jw_number_indices(1, 2)
self.assertEqual(expected, calculated_indices)
expected = [3]
calculated_indices = jw_number_indices(2, 2)
self.assertEqual(expected, calculated_indices)
def test_jw_number_indices(self):
n_qubits = numpy.random.randint(1, 12)
n_particles = numpy.random.randint(n_qubits + 1)
number_indices = jw_number_indices(n_particles, n_qubits)
subspace_dimension = len(number_indices)
self.assertEqual(subspace_dimension, comb(n_qubits, n_particles))
for index in number_indices:
binary_string = bin(index)[2:].zfill(n_qubits)
n_ones = binary_string.count('1')
self.assertEqual(n_ones, n_particles)
class JWSzIndicesTest(unittest.TestCase):
def test_jw_sz_indices(self):
"""Test the indexing scheme for selecting specific sz value"""
def sz_integer(bitstring):
"""Computes the total number of occupied up sites
minus the total number of occupied down sites."""
n_sites = len(bitstring) // 2
n_up = len([site for site in range(n_sites)
if bitstring[up_index(site)] == '1'])
n_down = len([site for site in range(n_sites)
if bitstring[down_index(site)] == '1'])
return n_up - n_down
def jw_sz_indices_brute_force(sz_value, n_qubits):
"""Computes the correct indices by brute force."""
indices = []
for bitstring in itertools.product(['0', '1'], repeat=n_qubits):
if (sz_integer(bitstring) ==
int(2 * sz_value)):
indices.append(int(''.join(bitstring), 2))
return indices
# General test
n_sites = numpy.random.randint(1, 10)
n_qubits = 2 * n_sites
sz_int = ((-1) ** numpy.random.randint(2) *
numpy.random.randint(n_sites + 1))
sz_value = sz_int / 2.
correct_indices = jw_sz_indices_brute_force(sz_value, n_qubits)
subspace_dimension = len(correct_indices)
calculated_indices = jw_sz_indices(sz_value, n_qubits)
self.assertEqual(len(calculated_indices), subspace_dimension)
for index in calculated_indices:
binary_string = bin(index)[2:].zfill(n_qubits)
self.assertEqual(sz_integer(binary_string), sz_int)
# Test fixing particle number
n_particles = abs(sz_int)
correct_indices = [index for index in correct_indices
if bin(index)[2:].count('1') == n_particles]
subspace_dimension = len(correct_indices)
calculated_indices = jw_sz_indices(sz_value, n_qubits,
n_electrons=n_particles)
self.assertEqual(len(calculated_indices), subspace_dimension)
for index in calculated_indices:
binary_string = bin(index)[2:].zfill(n_qubits)
self.assertEqual(sz_integer(binary_string), sz_int)
self.assertEqual(binary_string.count('1'), n_particles)
# Test exceptions
with self.assertRaises(ValueError):
indices = jw_sz_indices(3, 3)
with self.assertRaises(ValueError):
indices = jw_sz_indices(3.1, 4)
with self.assertRaises(ValueError):
indices = jw_sz_indices(1.5, 8, n_electrons=6)
with self.assertRaises(ValueError):
indices = jw_sz_indices(1.5, 8, n_electrons=1)
class JWNumberRestrictOperatorTest(unittest.TestCase):
def test_jw_restrict_operator(self):
"""Test the scheme for restricting JW encoded operators to number"""
# Make a Hamiltonian that cares mostly about number of electrons
n_qubits = 6
target_electrons = 3
penalty_const = 100.
number_sparse = jordan_wigner_sparse(number_operator(n_qubits))
bias_sparse = jordan_wigner_sparse(
sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i
in range(n_qubits)], FermionOperator()))
hamiltonian_sparse = penalty_const * (
number_sparse - target_electrons *
scipy.sparse.identity(2**n_qubits)).dot(
number_sparse - target_electrons *
scipy.sparse.identity(2**n_qubits)) + bias_sparse
restricted_hamiltonian = jw_number_restrict_operator(
hamiltonian_sparse, target_electrons, n_qubits)
true_eigvals, _ = eigh(hamiltonian_sparse.A)
test_eigvals, _ = eigh(restricted_hamiltonian.A)
self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]),
0.0)
def test_jw_restrict_operator_hopping_to_1_particle(self):
hop = FermionOperator('3^ 1') + FermionOperator('1^ 3')
hop_sparse = jordan_wigner_sparse(hop, n_qubits=4)
hop_restrict = jw_number_restrict_operator(hop_sparse, 1, n_qubits=4)
expected = csc_matrix(([1, 1], ([0, 2], [2, 0])), shape=(4, 4))
self.assertTrue(numpy.allclose(hop_restrict.A, expected.A))
def test_jw_restrict_operator_interaction_to_1_particle(self):
interaction = FermionOperator('3^ 2^ 4 1')
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(
interaction_sparse, 1, n_qubits=6)
expected = csc_matrix(([], ([], [])), shape=(6, 6))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_operator_interaction_to_2_particles(self):
interaction = (FermionOperator('3^ 2^ 4 1') +
FermionOperator('4^ 1^ 3 2'))
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(
interaction_sparse, 2, n_qubits=6)
dim = 6 * 5 // 2 # shape of new sparse array
# 3^ 2^ 4 1 maps 2**4 + 2 = 18 to 2**3 + 2**2 = 12 and vice versa;
# in the 2-particle subspace (1, 4) and (2, 3) are 7th and 9th.
expected = csc_matrix(([-1, -1], ([7, 9], [9, 7])), shape=(dim, dim))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_operator_hopping_to_1_particle_default_nqubits(self):
interaction = (FermionOperator('3^ 2^ 4 1') +
FermionOperator('4^ 1^ 3 2'))
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
# n_qubits should default to 6
interaction_restrict = jw_number_restrict_operator(
interaction_sparse, 2)
dim = 6 * 5 // 2 # shape of new sparse array
# 3^ 2^ 4 1 maps 2**4 + 2 = 18 to 2**3 + 2**2 = 12 and vice versa;
# in the 2-particle subspace (1, 4) and (2, 3) are 7th and 9th.
expected = csc_matrix(([-1, -1], ([7, 9], [9, 7])), shape=(dim, dim))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_jellium_ground_state_integration(self):
n_qubits = 4
grid = Grid(dimensions=1, length=n_qubits, scale=1.0)
jellium_hamiltonian = jordan_wigner_sparse(
jellium_model(grid, spinless=False))
# 2 * n_qubits because of spin
number_sparse = jordan_wigner_sparse(number_operator(2 * n_qubits))
restricted_number = jw_number_restrict_operator(number_sparse, 2)
restricted_jellium_hamiltonian = jw_number_restrict_operator(
jellium_hamiltonian, 2)
energy, ground_state = get_ground_state(restricted_jellium_hamiltonian)
number_expectation = expectation(restricted_number, ground_state)
self.assertAlmostEqual(number_expectation, 2)
class JWSzRestrictOperatorTest(unittest.TestCase):
def test_restrict_interaction_hamiltonian(self):
"""Test restricting a coulomb repulsion Hamiltonian to a specified
Sz manifold."""
x_dim = 3
y_dim = 2
interaction_term = fermi_hubbard(x_dim, y_dim, 0., 1.)
interaction_sparse = get_sparse_operator(interaction_term)
sz_value = 2
interaction_restricted = jw_sz_restrict_operator(interaction_sparse,
sz_value)
restricted_interaction_values = set([
int(value.real) for value in interaction_restricted.diagonal()])
# Originally the eigenvalues run from 0 to 6 but after restricting,
# they should run from 0 to 2
self.assertEqual(restricted_interaction_values, {0, 1, 2})
class JWNumberRestrictStateTest(unittest.TestCase):
def test_jw_number_restrict_state(self):
n_qubits = numpy.random.randint(1, 12)
n_particles = numpy.random.randint(0, n_qubits)
number_indices = jw_number_indices(n_particles, n_qubits)
subspace_dimension = len(number_indices)
# Create a vector that has entry 1 for every coordinate with
# the specified particle number, and 0 everywhere else
vector = numpy.zeros(2**n_qubits, dtype=float)
vector[number_indices] = 1
# Restrict the vector
restricted_vector = jw_number_restrict_state(vector, n_particles)
# Check that it has the correct shape
self.assertEqual(restricted_vector.shape[0], subspace_dimension)
# Check that it has the same norm as the original vector
self.assertAlmostEqual(inner_product(vector, vector),
inner_product(restricted_vector,
restricted_vector))
class JWSzRestrictStateTest(unittest.TestCase):
def test_jw_sz_restrict_state(self):
n_sites = numpy.random.randint(1, 10)
n_qubits = 2 * n_sites
sz_int = ((-1) ** numpy.random.randint(2) *
numpy.random.randint(n_sites + 1))
sz_value = sz_int / 2
sz_indices = jw_sz_indices(sz_value, n_qubits)
subspace_dimension = len(sz_indices)
# Create a vector that has entry 1 for every coordinate in
# the specified subspace, and 0 everywhere else
vector = numpy.zeros(2**n_qubits, dtype=float)
vector[sz_indices] = 1
# Restrict the vector
restricted_vector = jw_sz_restrict_state(vector, sz_value)
# Check that it has the correct shape
self.assertEqual(restricted_vector.shape[0], subspace_dimension)
# Check that it has the same norm as the original vector
self.assertAlmostEqual(inner_product(vector, vector),
inner_product(restricted_vector,
restricted_vector))
class JWGetGroundStatesByParticleNumberTest(unittest.TestCase):
def test_jw_get_ground_state_at_particle_number_herm_conserving(self):
# Initialize a particle-number-conserving Hermitian operator
ferm_op = FermionOperator('0^ 1') + FermionOperator('1^ 0') + \
FermionOperator('1^ 2') + FermionOperator('2^ 1') + \
FermionOperator('1^ 3', -.4) + FermionOperator('3^ 1', -.4)
jw_hamiltonian = jordan_wigner(ferm_op)
sparse_operator = get_sparse_operator(jw_hamiltonian)
n_qubits = 4
num_op = get_sparse_operator(number_operator(n_qubits))
# Test each possible particle number
for particle_number in range(n_qubits):
# Get the ground energy and ground state at this particle number
energy, state = jw_get_ground_state_at_particle_number(
sparse_operator, particle_number)
# Check that it's an eigenvector with the correct eigenvalue
self.assertTrue(
numpy.allclose(sparse_operator.dot(state), energy * state))
# Check that it has the correct particle number
num = expectation(num_op, state)
self.assertAlmostEqual(num, particle_number)
def test_jw_get_ground_state_at_particle_number_hubbard(self):
model = fermi_hubbard(2, 2, 1.0, 4.0)
sparse_operator = get_sparse_operator(model)
n_qubits = count_qubits(model)
num_op = get_sparse_operator(number_operator(n_qubits))
# Test each possible particle number
for particle_number in range(n_qubits):
# Get the ground energy and ground state at this particle number
energy, state = jw_get_ground_state_at_particle_number(
sparse_operator, particle_number)
# Check that it's an eigenvector with the correct eigenvalue
self.assertTrue(
numpy.allclose(sparse_operator.dot(state), energy * state))
# Check that it has the correct particle number
num = expectation(num_op, state)
self.assertAlmostEqual(num, particle_number)
def test_jw_get_ground_state_at_particle_number_jellium(self):
grid = Grid(2, 2, 1.0)
model = jellium_model(grid, spinless=True, plane_wave=False)
sparse_operator = get_sparse_operator(model)
n_qubits = count_qubits(model)
num_op = get_sparse_operator(number_operator(n_qubits))
# Test each possible particle number
for particle_number in range(n_qubits):
# Get the ground energy and ground state at this particle number
energy, state = jw_get_ground_state_at_particle_number(
sparse_operator, particle_number)
# Check that it's an eigenvector with the correct eigenvalue
self.assertTrue(
numpy.allclose(sparse_operator.dot(state), energy * state))
# Check that it has the correct particle number
num = expectation(num_op, state)
self.assertAlmostEqual(num, particle_number)
class JWGetGaussianStateTest(unittest.TestCase):
def setUp(self):
self.n_qubits_range = range(2, 10)
def test_ground_state_particle_conserving(self):
"""Test getting the ground state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, True)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, ground_state = get_ground_state(sparse_operator)
# Compute the ground state using the circuit
circuit_energy, circuit_state = jw_get_gaussian_state(
quadratic_hamiltonian)
# Check that the energies match
self.assertAlmostEqual(ground_energy, circuit_energy)
# Check that the state obtained using the circuit is a ground state
difference = (sparse_operator * circuit_state -
ground_energy * circuit_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_ground_state_particle_nonconserving(self):
"""Test getting the ground state of a Hamiltonian that does not
conserve particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a non-particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False)
# Compute the true ground state
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
ground_energy, ground_state = get_ground_state(sparse_operator)
# Compute the ground state using the circuit
circuit_energy, circuit_state = (
jw_get_gaussian_state(quadratic_hamiltonian))
# Check that the energies match
self.assertAlmostEqual(ground_energy, circuit_energy)
# Check that the state obtained using the circuit is a ground state
difference = (sparse_operator * circuit_state -
ground_energy * circuit_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_excited_state_particle_conserving(self):
"""Test getting an excited state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, True)
# Pick some orbitals to occupy
num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1)
occupied_orbitals = numpy.random.choice(
range(n_qubits), num_occupied_orbitals, False)
# Compute the Gaussian state
circuit_energy, gaussian_state = jw_get_gaussian_state(
quadratic_hamiltonian, occupied_orbitals)
# Compute the true energy
orbital_energies, constant = (
quadratic_hamiltonian.orbital_energies())
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Check that the energies match
self.assertAlmostEqual(energy, circuit_energy)
# Check that the state obtained using the circuit is an eigenstate
# with the correct eigenvalue
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
difference = (sparse_operator * gaussian_state -
energy * gaussian_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_excited_state_particle_nonconserving(self):
"""Test getting an excited state of a Hamiltonian that conserves
particle number."""
for n_qubits in self.n_qubits_range:
# Initialize a non-particle-number-conserving Hamiltonian
quadratic_hamiltonian = random_quadratic_hamiltonian(
n_qubits, False)
# Pick some orbitals to occupy
num_occupied_orbitals = numpy.random.randint(1, n_qubits + 1)
occupied_orbitals = numpy.random.choice(
range(n_qubits), num_occupied_orbitals, False)
# Compute the Gaussian state
circuit_energy, gaussian_state = jw_get_gaussian_state(
quadratic_hamiltonian, occupied_orbitals)
# Compute the true energy
orbital_energies, constant = (
quadratic_hamiltonian.orbital_energies())
energy = numpy.sum(orbital_energies[occupied_orbitals]) + constant
# Check that the energies match
self.assertAlmostEqual(energy, circuit_energy)
# Check that the state obtained using the circuit is an eigenstate
# with the correct eigenvalue
sparse_operator = get_sparse_operator(quadratic_hamiltonian)
difference = (sparse_operator * gaussian_state -
energy * gaussian_state)
discrepancy = numpy.amax(numpy.abs(difference))
self.assertAlmostEqual(discrepancy, 0)
def test_bad_input(self):
"""Test bad input."""
with self.assertRaises(ValueError):
energy, state = jw_get_gaussian_state('a')
class JWSparseGivensRotationTest(unittest.TestCase):
def test_bad_input(self):
with self.assertRaises(ValueError):
givens_matrix = jw_sparse_givens_rotation(0, 2, 1., 1., 5)
with self.assertRaises(ValueError):
givens_matrix = jw_sparse_givens_rotation(4, 5, 1., 1., 5)
class JWSlaterDeterminantTest(unittest.TestCase):
def test_hadamard_transform(self):
r"""Test creating the states
1 / sqrt(2) (a^\dagger_0 + a^\dagger_1) |vac>
and
1 / sqrt(2) (a^\dagger_0 - a^\dagger_1) |vac>.
"""
slater_determinant_matrix = numpy.array([[1., 1.]]) / numpy.sqrt(2.)
slater_determinant = jw_slater_determinant(slater_determinant_matrix)
self.assertAlmostEqual(slater_determinant[1],
slater_determinant[2])
self.assertAlmostEqual(abs(slater_determinant[1]),
1. / numpy.sqrt(2.))
self.assertAlmostEqual(abs(slater_determinant[0]), 0.)
self.assertAlmostEqual(abs(slater_determinant[3]), 0.)
slater_determinant_matrix = numpy.array([[1., -1.]]) / numpy.sqrt(2.)
slater_determinant = jw_slater_determinant(slater_determinant_matrix)
self.assertAlmostEqual(slater_determinant[1],
-slater_determinant[2])
self.assertAlmostEqual(abs(slater_determinant[1]),
1. / numpy.sqrt(2.))
self.assertAlmostEqual(abs(slater_determinant[0]), 0.)
self.assertAlmostEqual(abs(slater_determinant[3]), 0.)
class GroundStateTest(unittest.TestCase):
def test_get_ground_state_hermitian(self):
ground = get_ground_state(get_sparse_operator(
QubitOperator('Y0 X1') + QubitOperator('Z0 Z1')))
expected_state = csc_matrix(([1j, 1], ([1, 2], [0, 0])),
shape=(4, 1)).A
expected_state /= numpy.sqrt(2.0)
self.assertAlmostEqual(ground[0], -2)
self.assertAlmostEqual(
numpy.absolute(
expected_state.T.conj().dot(ground[1]))[0], 1.)
class ExpectationTest(unittest.TestCase):
def test_expectation_correct_sparse_matrix(self):
operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([0., 1.j, 0., 1.j])
self.assertAlmostEqual(expectation(operator, vector), 2.0)
density_matrix = scipy.sparse.csc_matrix(
numpy.outer(vector, numpy.conjugate(vector)))
self.assertAlmostEqual(expectation(operator, density_matrix), 2.0)
def test_expectation_correct_linear_operator(self):
operator = LinearQubitOperator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([0., 1.j, 0., 1.j])
self.assertAlmostEqual(expectation(operator, vector), 2.0)
def test_expectation_handles_column_vector(self):
operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([[0.], [1.j], [0.], [1.j]])
self.assertAlmostEqual(expectation(operator, vector), 2.0)
def test_expectation_correct_zero(self):
operator = get_sparse_operator(QubitOperator('X0'), n_qubits=2)
vector = numpy.array([1j, -1j, -1j, -1j])
self.assertAlmostEqual(expectation(operator, vector), 0.0)
class VarianceTest(unittest.TestCase):
def test_variance_row_vector(self):
X = pauli_matrix_map['X']
Z = pauli_matrix_map['Z']
zero = numpy.array([1., 0.])
plus = numpy.array([1., 1.]) / numpy.sqrt(2)
minus = numpy.array([1., -1.]) / numpy.sqrt(2)
self.assertAlmostEqual(variance(Z, zero), 0.)
self.assertAlmostEqual(variance(X, zero), 1.)
self.assertAlmostEqual(variance(Z, plus), 1.)
self.assertAlmostEqual(variance(X, plus), 0.)
self.assertAlmostEqual(variance(Z, minus), 1.)
self.assertAlmostEqual(variance(X, minus), 0.)
def test_variance_column_vector(self):
X = pauli_matrix_map['X']
Z = pauli_matrix_map['Z']
zero = numpy.array([[1.], [0.]])
plus = numpy.array([[1.], [1.]]) / numpy.sqrt(2)
minus = numpy.array([[1.], [-1.]]) / numpy.sqrt(2)
self.assertAlmostEqual(variance(Z, zero), 0.)
self.assertAlmostEqual(variance(X, zero), 1.)
self.assertAlmostEqual(variance(Z, plus), 1.)
self.assertAlmostEqual(variance(X, plus), 0.)
self.assertAlmostEqual(variance(Z, minus), 1.)
self.assertAlmostEqual(variance(X, minus), 0.)
class ExpectationComputationalBasisStateTest(unittest.TestCase):
def test_expectation_fermion_operator_single_number_terms(self):
operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1')
state = csc_matrix(([1], ([15], [0])), shape=(16, 1))
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.9)
def test_expectation_fermion_operator_two_number_terms(self):
operator = (FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1') +
FermionOperator('2^ 1^ 2 1', -1.7))
state = csc_matrix(([1], ([6], [0])), shape=(16, 1))
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 3.6)
def test_expectation_identity_fermion_operator(self):
operator = FermionOperator.identity() * 1.1
state = csc_matrix(([1], ([6], [0])), shape=(16, 1))
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.1)
def test_expectation_state_is_list_single_number_terms(self):
operator = FermionOperator('3^ 3', 1.9) + FermionOperator('2^ 1')
state = [1, 1, 1, 1]
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.9)
def test_expectation_state_is_list_fermion_operator_two_number_terms(self):
operator = (FermionOperator('2^ 2', 1.9) + FermionOperator('2^ 1') +
FermionOperator('2^ 1^ 2 1', -1.7))
state = [0, 1, 1]
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 3.6)
def test_expectation_state_is_list_identity_fermion_operator(self):
operator = FermionOperator.identity() * 1.1
state = [0, 1, 1]
self.assertAlmostEqual(
expectation_computational_basis_state(operator, state), 1.1)
def test_expectation_bad_operator_type(self):
with self.assertRaises(TypeError):
expectation_computational_basis_state(
'never', csc_matrix(([1], ([6], [0])), shape=(16, 1)))
def test_expectation_qubit_operator_not_implemented(self):
with self.assertRaises(NotImplementedError):
expectation_computational_basis_state(
QubitOperator(), csc_matrix(([1], ([6], [0])), shape=(16, 1)))
class ExpectationDualBasisOperatorWithPlaneWaveBasisState(unittest.TestCase):
def setUp(self):
grid_length = 4
dimension = 1
wigner_seitz_radius = 10.
self.spinless = True
self.n_spatial_orbitals = grid_length ** dimension
n_qubits = self.n_spatial_orbitals
self.n_particles = 3
# Compute appropriate length scale and the corresponding grid.
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, self.n_particles, dimension)
self.grid1 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid1, self.spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, self.n_particles))
self.hf_state_index1 = numpy.sum(2 ** occupied_states)
self.hf_state1 = numpy.zeros(2 ** n_qubits)
self.hf_state1[self.hf_state_index1] = 1.0
self.orbital_occupations1 = [digit == '1' for digit in
bin(self.hf_state_index1)[2:]][::-1]
self.occupied_orbitals1 = [index for index, occupied in
enumerate(self.orbital_occupations1)
if occupied]
self.reversed_occupied_orbitals1 = list(self.occupied_orbitals1)
for i in range(len(self.reversed_occupied_orbitals1)):
self.reversed_occupied_orbitals1[i] = -1 + int(numpy.log2(
self.hf_state1.shape[0])) - self.reversed_occupied_orbitals1[i]
self.reversed_hf_state_index1 = sum(
2 ** index for index in self.reversed_occupied_orbitals1)
def test_1body_hopping_operator_1D(self):
operator = FermionOperator('2^ 0')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_1body_number_operator_1D(self):
operator = FermionOperator('2^ 2')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_2body_partial_number_operator_high_1D(self):
operator = FermionOperator('2^ 1^ 2 0')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_2body_partial_number_operator_mid_1D(self):
operator = FermionOperator('1^ 0^ 1 2')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_3body_double_number_operator_1D(self):
operator = FermionOperator('3^ 2^ 1^ 3 1 0')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_2body_adjacent_number_operator_1D(self):
operator = FermionOperator('3^ 2^ 2 1')
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid1, self.spinless))
expected = expectation(get_sparse_operator(
transformed_operator), self.hf_state1)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals1,
self.n_spatial_orbitals, self.grid1, self.spinless)
self.assertAlmostEqual(expected, actual)
def test_1d5_with_spin_10particles(self):
dimension = 1
grid_length = 5
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = False
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 10
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) +
FermionOperator('7^ 6^ 5 4', -3.7j) +
FermionOperator('3^ 3', 2.1) +
FermionOperator('3^ 2', 1.7))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = 2.1
# Calculated from expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
def test_1d5_with_spin_7particles(self):
dimension = 1
grid_length = 5
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = False
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 7
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('6^ 0^ 1^ 3 5 4', 2) +
FermionOperator('7^ 2^ 4 1') +
FermionOperator('3^ 3', 2.1) +
FermionOperator('5^ 3^ 1 0', 7.3))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = 1.66 - 0.0615536707435j
# Calculated with expected = expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
def test_3d2_spinless(self):
dimension = 3
grid_length = 2
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = True
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 5
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('4^ 2^ 3^ 5 5 4', 2) +
FermionOperator('7^ 6^ 7 4', -3.7j) +
FermionOperator('3^ 7', 2.1))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = -0.2625 - 0.4625j
# Calculated with expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
def test_3d2_with_spin(self):
dimension = 3
grid_length = 2
n_spatial_orbitals = grid_length ** dimension
wigner_seitz_radius = 9.3
spinless = False
n_qubits = n_spatial_orbitals
if not spinless:
n_qubits *= 2
n_particles_big = 9
length_scale = wigner_seitz_length_scale(
wigner_seitz_radius, n_particles_big, dimension)
self.grid3 = Grid(dimension, grid_length, length_scale)
# Get the occupied orbitals of the plane-wave basis Hartree-Fock state.
hamiltonian = jellium_model(self.grid3, spinless, plane_wave=True)
hamiltonian = normal_ordered(hamiltonian)
hamiltonian.compress()
occupied_states = numpy.array(lowest_single_particle_energy_states(
hamiltonian, n_particles_big))
self.hf_state_index3 = numpy.sum(2 ** occupied_states)
self.hf_state3 = csc_matrix(
([1.0], ([self.hf_state_index3], [0])), shape=(2 ** n_qubits, 1))
self.orbital_occupations3 = [digit == '1' for digit in
bin(self.hf_state_index3)[2:]][::-1]
self.occupied_orbitals3 = [index for index, occupied in
enumerate(self.orbital_occupations3)
if occupied]
self.reversed_occupied_orbitals3 = list(self.occupied_orbitals3)
for i in range(len(self.reversed_occupied_orbitals3)):
self.reversed_occupied_orbitals3[i] = -1 + int(numpy.log2(
self.hf_state3.shape[0])) - self.reversed_occupied_orbitals3[i]
self.reversed_hf_state_index3 = sum(
2 ** index for index in self.reversed_occupied_orbitals3)
operator = (FermionOperator('4^ 2^ 3^ 5 5 4', 2) +
FermionOperator('7^ 6^ 7 4', -3.7j) +
FermionOperator('3^ 7', 2.1))
operator = normal_ordered(operator)
transformed_operator = normal_ordered(fourier_transform(
operator, self.grid3, spinless))
expected = -0.2625 - 0.578125j
# Calculated from expected = expectation(get_sparse_operator(
# transformed_operator), self.hf_state3)
actual = expectation_db_operator_with_pw_basis_state(
operator, self.reversed_occupied_orbitals3,
n_spatial_orbitals, self.grid3, spinless)
self.assertAlmostEqual(expected, actual)
class GetGapTest(unittest.TestCase):
def test_get_gap(self):
operator = QubitOperator('Y0 X1') + QubitOperator('Z0 Z1')
self.assertAlmostEqual(get_gap(get_sparse_operator(operator)), 2.0)
def test_get_gap_nonhermitian_error(self):
operator = (QubitOperator('X0 Y1', 1 + 1j) +
QubitOperator('Z0 Z1', 1j) + QubitOperator((), 2 + 1j))
with self.assertRaises(ValueError):
get_gap(get_sparse_operator(operator))
class InnerProductTest(unittest.TestCase):
def test_inner_product(self):
state_1 = numpy.array([1., 1.j])
state_2 = numpy.array([1., -1.j])
self.assertAlmostEqual(inner_product(state_1, state_1), 2.)
self.assertAlmostEqual(inner_product(state_1, state_2), 0.)
class BosonSparseTest(unittest.TestCase):
def setUp(self):
self.hbar = 1.
self.d = 5
self.b = numpy.diag(numpy.sqrt(numpy.arange(1, self.d)), 1)
self.bd = self.b.conj().T
self.q = numpy.sqrt(self.hbar/2)*(self.b + self.bd)
self.p = -1j*numpy.sqrt(self.hbar/2)*(self.b - self.bd)
self.Id = numpy.identity(self.d)
def test_boson_ladder_noninteger_trunc(self):
with self.assertRaises(ValueError):
b = boson_ladder_sparse(1, 0, 0, 0.1)
with self.assertRaises(ValueError):
b = boson_ladder_sparse(1, 0, 0, -1)
with self.assertRaises(ValueError):
b = boson_ladder_sparse(1, 0, 0, 0)
def test_boson_ladder_destroy_one_mode(self):
b = boson_ladder_sparse(1, 0, 0, self.d).toarray()
self.assertTrue(numpy.allclose(b, self.b))
def test_boson_ladder_create_one_mode(self):
bd = boson_ladder_sparse(1, 0, 1, self.d).toarray()
self.assertTrue(numpy.allclose(bd, self.bd))
def test_boson_ladder_single_adjoint(self):
b = boson_ladder_sparse(1, 0, 0, self.d).toarray()
bd = boson_ladder_sparse(1, 0, 1, self.d).toarray()
self.assertTrue(numpy.allclose(b.conj().T, bd))
def test_boson_ladder_two_mode(self):
res = boson_ladder_sparse(2, 0, 0, self.d).toarray()
expected = numpy.kron(self.b, self.Id)
self.assertTrue(numpy.allclose(res, expected))
res = boson_ladder_sparse(2, 1, 0, self.d).toarray()
expected = numpy.kron(self.Id, self.b)
self.assertTrue(numpy.allclose(res, expected))
def test_single_quad_noninteger_trunc(self):
with self.assertRaises(ValueError):
b = single_quad_op_sparse(1, 0, 'q', self.hbar, 0.1)
with self.assertRaises(ValueError):
b = single_quad_op_sparse(1, 0, 'q', self.hbar, -1)
with self.assertRaises(ValueError):
b = single_quad_op_sparse(1, 0, 'q', self.hbar, 0)
def test_single_quad_q_one_mode(self):
res = single_quad_op_sparse(1, 0, 'q', self.hbar, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.q))
self.assertTrue(numpy.allclose(res, res.conj().T))
def test_single_quad_p_one_mode(self):
res = single_quad_op_sparse(1, 0, 'p', self.hbar, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.p))
self.assertTrue(numpy.allclose(res, res.conj().T))
def test_single_quad_two_mode(self):
res = single_quad_op_sparse(2, 0, 'q', self.hbar, self.d).toarray()
expected = numpy.kron(self.q, self.Id)
self.assertTrue(numpy.allclose(res, expected))
res = single_quad_op_sparse(2, 1, 'p', self.hbar, self.d).toarray()
expected = numpy.kron(self.Id, self.p)
self.assertTrue(numpy.allclose(res, expected))
def test_boson_operator_sparse_trunc(self):
op = BosonOperator('0')
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, 0.1)
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, -1)
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, 0)
def test_boson_operator_invalid_op(self):
op = FermionOperator('0')
with self.assertRaises(ValueError):
b = boson_operator_sparse(op, self.d)
def test_boson_operator_sparse_empty(self):
for op in (BosonOperator(), QuadOperator()):
res = boson_operator_sparse(op, self.d)
self.assertEqual(res, numpy.array([[0]]))
def test_boson_operator_sparse_identity(self):
for op in (BosonOperator(''), QuadOperator('')):
res = boson_operator_sparse(op, self.d)
self.assertEqual(res, numpy.array([[1]]))
def test_boson_operator_sparse_single(self):
op = BosonOperator('0')
res = boson_operator_sparse(op, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.b))
op = BosonOperator('0^')
res = boson_operator_sparse(op, self.d).toarray()
self.assertTrue(numpy.allclose(res, self.bd))
op = QuadOperator('q0')
res = boson_operator_sparse(op, self.d, self.hbar).toarray()
self.assertTrue(numpy.allclose(res, self.q))
op = QuadOperator('p0')
res = boson_operator_sparse(op, self.d, self.hbar).toarray()
self.assertTrue(numpy.allclose(res, self.p))
def test_boson_operator_sparse_number(self):
op = BosonOperator('0^ 0')
res = boson_operator_sparse(op, self.d).toarray()
self.assertTrue(numpy.allclose(res, numpy.dot(self.bd, self.b)))
def test_boson_operator_sparse_multi_mode(self):
op = BosonOperator('0^ 1 1^ 2')
res = boson_operator_sparse(op, self.d).toarray()
b0 = boson_ladder_sparse(3, 0, 0, self.d).toarray()
b1 = boson_ladder_sparse(3, 1, 0, self.d).toarray()
b2 = boson_ladder_sparse(3, 2, 0, self.d).toarray()
expected = multi_dot([b0.T, b1, b1.T, b2])
self.assertTrue(numpy.allclose(res, expected))
op = QuadOperator('q0 p0 p1')
res = boson_operator_sparse(op, self.d, self.hbar).toarray()
expected = numpy.identity(self.d**2)
for term in op.terms:
for i, j in term:
expected = expected.dot(single_quad_op_sparse(
2, i, j, self.hbar, self.d).toarray())
self.assertTrue(numpy.allclose(res, expected))
def test_boson_operator_sparse_addition(self):
op = BosonOperator('0^ 1')
op += BosonOperator('0 0^')
res = boson_operator_sparse(op, self.d).toarray()
b0 = boson_ladder_sparse(2, 0, 0, self.d).toarray()
b1 = boson_ladder_sparse(2, 1, 0, self.d).toarray()
expected = numpy.dot(b0.T, b1) + numpy.dot(b0, b0.T)
self.assertTrue(numpy.allclose(res, expected))
| apache-2.0 | 8,698,967,164,351,995,000 | 40.668158 | 81 | 0.614493 | false | 3.586457 | true | false | false |
rchuppala/usc_agent | src/usc-agent-dev/common/source/pyang/pyang/syntax.py | 1 | 11073 | """Description of YANG & YIN syntax."""
import re
### Regular expressions - constraints on arguments
# keywords and identifiers
identifier = r"[_A-Za-z][._\-A-Za-z0-9]*"
prefix = identifier
keyword = '((' + prefix + '):)?(' + identifier + ')'
# no group version of keyword
keyword_ng = '(?:(' + prefix + '):)?(?:' + identifier + ')'
re_keyword = re.compile(keyword)
re_keyword_start = re.compile('^' + keyword)
pos_integer = r"[1-9][0-9]*"
nonneg_integer = r"(0|[1-9])[0-9]*"
integer_ = r"[-+]?" + nonneg_integer
decimal_ = r"(\+|\-)?[0-9]+(\.[0-9]+)?"
length_str = '((min|max|[0-9]+)\s*' \
'(\.\.\s*' \
'(min|max|[0-9]+)\s*)?)'
length_expr = length_str + '(\|\s*' + length_str + ')*'
re_length_part = re.compile(length_str)
range_str = '((\-INF|min|max|((\+|\-)?[0-9]+(\.[0-9]+)?))\s*' \
'(\.\.\s*' \
'(INF|min|max|(\+|\-)?[0-9]+(\.[0-9]+)?)\s*)?)'
range_expr = range_str + '(\|\s*' + range_str + ')*'
re_range_part = re.compile(range_str)
re_identifier = re.compile("^" + identifier + "$")
# path and unique
node_id = keyword_ng
rel_path_keyexpr = r"(\.\./)+(" + node_id + "/)*" + node_id
path_key_expr = r"(current\s*\(\s*\)/" + rel_path_keyexpr + ")"
path_equality_expr = node_id + r"\s*=\s*" + path_key_expr
path_predicate = r"\s*\[\s*" + path_equality_expr + r"\s*\]\s*"
absolute_path_arg = "(?:/" + node_id + "(" + path_predicate + ")*)+"
descendant_path_arg = node_id + "(" + path_predicate + ")*" + \
"(?:" + absolute_path_arg + ")?"
relative_path_arg = r"(\.\./)*" + descendant_path_arg
deref_path_arg = r"deref\s*\(\s*(?:" + relative_path_arg + \
")\s*\)/\.\./" + relative_path_arg
path_arg = "(" + absolute_path_arg + "|" + relative_path_arg + "|" + \
deref_path_arg + ")"
absolute_schema_nodeid = "(/" + node_id + ")+"
descendant_schema_nodeid = node_id + "(" + absolute_schema_nodeid + ")?"
schema_nodeid = "("+absolute_schema_nodeid+"|"+descendant_schema_nodeid+")"
unique_arg = descendant_schema_nodeid + "(\s+" + descendant_schema_nodeid + ")*"
key_arg = node_id + "(\s+" + node_id + ")*"
re_schema_node_id_part = re.compile('/' + keyword)
# URI - RFC 3986, Appendix A
scheme = "[A-Za-z][-+.A-Za-z0-9]*"
unreserved = "[-._~A-Za-z0-9]"
pct_encoded = "%[0-9A-F]{2}"
sub_delims = "[!$&'()*+,;=]"
pchar = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|[:@])")
segment = pchar + "*"
segment_nz = pchar + "+"
userinfo = ("(" + unreserved + "|" + pct_encoded + "|" +
sub_delims + "|:)*")
dec_octet = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])"
ipv4address = "(" + dec_octet + r"\.){3}" + dec_octet
h16 = "[0-9A-F]{1,4}"
ls32 = "(" + h16 + ":" + h16 + "|" + ipv4address + ")"
ipv6address = (
"((" + h16 + ":){6}" + ls32 +
"|::(" + h16 + ":){5}" + ls32 +
"|(" + h16 + ")?::(" + h16 + ":){4}" + ls32 +
"|((" + h16 + ":)?" + h16 + ")?::(" + h16 + ":){3}" + ls32 +
"|((" + h16 + ":){,2}" + h16 + ")?::(" + h16 + ":){2}" + ls32 +
"|((" + h16 + ":){,3}" + h16 + ")?::" + h16 + ":" + ls32 +
"|((" + h16 + ":){,4}" + h16 + ")?::" + ls32 +
"|((" + h16 + ":){,5}" + h16 + ")?::" + h16 +
"|((" + h16 + ":){,6}" + h16 + ")?::)")
ipvfuture = r"v[0-9A-F]+\.(" + unreserved + "|" + sub_delims + "|:)+"
ip_literal = r"\[(" + ipv6address + "|" + ipvfuture + r")\]"
reg_name = "(" + unreserved + "|" + pct_encoded + "|" + sub_delims + ")*"
host = "(" + ip_literal + "|" + ipv4address + "|" + reg_name + ")"
port = "[0-9]*"
authority = "(" + userinfo + "@)?" + host + "(:" + port + ")?"
path_abempty = "(/" + segment + ")*"
path_absolute = "/(" + segment_nz + "(/" + segment + ")*)?"
path_rootless = segment_nz + "(/" + segment + ")*"
path_empty = pchar + "{0}"
hier_part = ("(" + "//" + authority + path_abempty + "|" +
path_absolute + "|" + path_rootless + "|" + path_empty + ")")
query = "(" + pchar + "|[/?])*"
fragment = query
uri = (scheme + ":" + hier_part + r"(\?" + query + ")?" +
"(#" + fragment + ")?")
# Date
date = r"[1-2][0-9]{3}-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])"
re_nonneg_integer = re.compile("^" + nonneg_integer + "$")
re_integer = re.compile("^" + integer_ + "$")
re_decimal = re.compile("^" + decimal_ + "$")
re_uri = re.compile("^" + uri + "$")
re_boolean = re.compile("^(true|false)$")
re_version = re.compile("^1$")
re_date = re.compile("^" + date +"$")
re_status = re.compile("^(current|obsolete|deprecated)$")
re_key = re.compile("^" + key_arg + "$")
re_length = re.compile("^" + length_expr + "$")
re_range = re.compile("^" + range_expr + "$")
re_pos_integer = re.compile(r"^(unbounded|" + pos_integer + r")$")
re_ordered_by = re.compile(r"^(user|system)$")
re_node_id = re.compile("^" + node_id + "$")
re_path = re.compile("^" + path_arg + "$")
re_absolute_path = re.compile("^" + absolute_path_arg + "$")
re_unique = re.compile("^" + unique_arg + "$")
re_schema_nodeid = re.compile("^" + schema_nodeid + "$")
re_absolute_schema_nodeid = re.compile("^" + absolute_schema_nodeid + "$")
re_descendant_schema_nodeid = re.compile("^" + descendant_schema_nodeid + "$")
re_deviate = re.compile("^(add|delete|replace|not-supported)$")
arg_type_map = {
"identifier": lambda s: re_identifier.search(s) is not None,
"non-negative-integer": lambda s: re_nonneg_integer.search(s) is not None,
"integer": lambda s: re_integer.search(s) is not None,
"uri": lambda s: re_uri.search(s) is not None,
"boolean": lambda s: re_boolean.search(s) is not None,
"version": lambda s: re_version.search(s) is not None,
"date": lambda s: re_date.search(s) is not None,
"status-arg": lambda s: re_status.search(s) is not None,
"key-arg": lambda s: re_key.search(s) is not None,
"length-arg": lambda s: re_length.search(s) is not None,
"range-arg": lambda s: re_range.search(s) is not None,
"max-value": lambda s: re_pos_integer.search(s) is not None,
"ordered-by-arg": lambda s: re_ordered_by.search(s) is not None,
"identifier-ref": lambda s: re_node_id.search(s) is not None,
"path-arg": lambda s: re_path.search(s) is not None,
"absolute-path-arg": lambda s: re_absolute_path.search(s) is not None,
"unique-arg": lambda s: re_unique.search(s) is not None,
"absolute-schema-nodeid": lambda s: \
re_absolute_schema_nodeid.search(s) is not None,
"descendant-schema-nodeid": lambda s: \
re_descendant_schema_nodeid.search(s) is not None,
"schema-nodeid": lambda s: \
re_schema_nodeid.search(s) is not None,
"enum-arg": lambda s: chk_enum_arg(s),
"fraction-digits-arg": lambda s: chk_fraction_digits_arg(s),
"deviate-arg": lambda s: re_deviate.search(s) is not None,
}
"""Argument type definitions.
Regular expressions for all argument types except plain string that
are checked directly by the parser.
"""
def chk_enum_arg(s):
"""Checks if the string `s` is a valid enum string.
Return True or False."""
if len(s) == 0 or s[0].isspace() or s[-1].isspace():
return False
else:
return True
def chk_fraction_digits_arg(s):
"""Checks if the string `s` is a valid fraction-digits argument.
Return True or False."""
try:
v = int(s)
if v >= 1 and v <= 18:
return True
else:
return False
except ValueError:
return False
def add_arg_type(arg_type, regexp):
"""Add a new arg_type to the map.
Used by extension plugins to register their own argument types."""
arg_type_map[arg_type] = regexp
# keyword argument-name yin-element
yin_map = \
{'anyxml': ('name', False),
'argument': ('name', False),
'augment': ('target-node', False),
'base': ('name', False),
'belongs-to': ('module', False),
'bit': ('name', False),
'case': ('name', False),
'choice': ('name', False),
'config': ('value', False),
'contact': ('text', True),
'container': ('name', False),
'default': ('value', False),
'description': ('text', True),
'deviate': ('value', False),
'deviation': ('target-node', False),
'enum': ('name', False),
'error-app-tag': ('value', False),
'error-message': ('value', True),
'extension': ('name', False),
'feature': ('name', False),
'fraction-digits': ('value', False),
'grouping': ('name', False),
'identity': ('name', False),
'if-feature': ('name', False),
'import': ('module', False),
'include': ('module', False),
'input': (None, None),
'key': ('value', False),
'leaf': ('name', False),
'leaf-list': ('name', False),
'length': ('value', False),
'list': ('name', False),
'mandatory': ('value', False),
'max-elements': ('value', False),
'min-elements': ('value', False),
'module': ('name', False),
'must': ('condition', False),
'namespace': ('uri', False),
'notification': ('name', False),
'ordered-by': ('value', False),
'organization': ('text', True),
'output': (None, None),
'path': ('value', False),
'pattern': ('value', False),
'position': ('value', False),
'presence': ('value', False),
'prefix': ('value', False),
'range': ('value', False),
'reference': ('text', True),
'refine': ('target-node', False),
'require-instance': ('value', False),
'revision': ('date', False),
'revision-date': ('date', False),
'rpc': ('name', False),
'status': ('value', False),
'submodule': ('name', False),
'type': ('name', False),
'typedef': ('name', False),
'unique': ('tag', False),
'units': ('name', False),
'uses': ('name', False),
'value': ('value', False),
'when': ('condition', False),
'yang-version': ('value', False),
'yin-element': ('value', False),
}
"""Mapping of statements to the YIN representation of their arguments.
The values are pairs whose first component specifies whether the
argument is stored in a subelement and the second component is the
name of the attribute or subelement carrying the argument. See YANG
specification.
"""
| gpl-2.0 | 2,142,903,817,177,840,400 | 41.588462 | 80 | 0.493362 | false | 3.092153 | false | false | false |
ngokevin/zamboni | mkt/operators/tests/test_authorization.py | 1 | 5750 | from nose.tools import ok_
from rest_framework.generics import GenericAPIView
from django.contrib.auth.models import AnonymousUser
from amo.tests import TestCase
from mkt.access.middleware import ACLMiddleware
from mkt.carriers import CARRIER_MAP as CARRIERS
from mkt.feed.constants import FEED_TYPE_SHELF
from mkt.feed.tests.test_models import FeedTestMixin
from mkt.operators.authorization import (OperatorAuthorization,
OperatorShelfAuthorization)
from mkt.operators.models import OperatorPermission
from mkt.regions import REGIONS_DICT as REGIONS
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
from test_utils import RequestFactory
class BaseTestOperatorAuthorization(FeedTestMixin, TestCase):
fixtures = fixture('user_2519') + FeedTestMixin.fixtures
def setUp(self):
super(BaseTestOperatorAuthorization, self).setUp()
self.auth = self.auth_class()
self.user = UserProfile.objects.get(pk=2519)
self.view = GenericAPIView()
def make_admin(self):
self.grant_permission(self.user, 'OperatorDashboard:*')
def give_objpermission(self, carrier, region):
carrier_id = CARRIERS[carrier].id
region_id = REGIONS[region].id
OperatorPermission.objects.create(user=self.user, region=region_id,
carrier=carrier_id)
def is_authorized(self, verb, anon=False, carrier='telefonica',
region='br'):
request = self.request(verb, anon=anon, carrier=carrier,
region=region)
return self.auth.has_permission(request, self.view)
def is_object_authorized(self, verb, obj, anon=False, carrier='telefonica',
region='br'):
request = self.request(verb, anon=anon, carrier=carrier,
region=region)
return self.auth.has_object_permission(request, self.view, obj)
def request(self, verb, anon=False, **kwargs):
request = getattr(RequestFactory(), verb.lower())('/', kwargs)
request.user = AnonymousUser() if anon else self.user
ACLMiddleware().process_request(request)
return request
class TestOperatorAuthorization(BaseTestOperatorAuthorization):
auth_class = OperatorAuthorization
def test_safe(self):
ok_(self.is_authorized('GET', anon=True))
ok_(self.is_authorized('GET'))
def test_safe_permission(self):
self.make_admin()
ok_(self.is_authorized('GET'))
def test_safe_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_authorized('GET', carrier='telefonica', region='br'))
def test_safe_objpermission_mismatch(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_authorized('GET', carrier='america_movil', region='fr'))
def test_unsafe(self):
ok_(not self.is_authorized('POST', anon=True))
ok_(not self.is_authorized('POST'))
def test_unsafe_permission(self):
self.make_admin()
ok_(self.is_authorized('POST'))
def test_unsafe_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_authorized('POST'))
def test_unsafe_objpermission_mismatch(self):
self.give_objpermission('telefonica', 'br')
ok_(not self.is_authorized('POST', carrier='america_movil',
region='fr'))
class TestOperatorShelfAuthorization(BaseTestOperatorAuthorization):
auth_class = OperatorShelfAuthorization
def setUp(self):
super(TestOperatorShelfAuthorization, self).setUp()
self.feed_item = self.feed_item_factory(carrier=1, region=7, # TEF/BR
item_type=FEED_TYPE_SHELF)
self.shelf = self.feed_item.shelf
def test_safe_object(self):
ok_(self.is_object_authorized('GET', self.feed_item, anon=True))
ok_(self.is_object_authorized('GET', self.shelf, anon=True))
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
self.make_admin()
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
def test_safe_object_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
def test_safe_object_objpermission_mismatch(self):
self.give_objpermission('america_movil', 'fr')
ok_(self.is_object_authorized('GET', self.feed_item))
ok_(self.is_object_authorized('GET', self.shelf))
def test_unsafe_object(self):
ok_(not self.is_object_authorized('POST', self.feed_item, anon=True))
ok_(not self.is_object_authorized('POST', self.shelf, anon=True))
ok_(not self.is_object_authorized('POST', self.feed_item))
ok_(not self.is_object_authorized('POST', self.shelf))
self.make_admin()
ok_(self.is_object_authorized('POST', self.feed_item))
ok_(self.is_object_authorized('POST', self.shelf))
def test_unsafe_object_objpermission_correct(self):
self.give_objpermission('telefonica', 'br')
ok_(self.is_object_authorized('POST', self.feed_item))
ok_(self.is_object_authorized('POST', self.shelf))
def test_unsafe_object_objpermission_mismatch(self):
self.give_objpermission('america_movil', 'fr')
ok_(not self.is_object_authorized('POST', self.feed_item))
ok_(not self.is_object_authorized('POST', self.shelf))
| bsd-3-clause | -7,164,898,191,117,203,000 | 39.20979 | 79 | 0.652522 | false | 3.787879 | true | false | false |
obedmr/MPIaaS | app/echoserv.py | 1 | 1498 | #!/usr/bin/env python
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor
import twisted.internet.error
import sys
import ConfigParser
CONFIG_CONF = "setup.conf"
PORT=8000
class Echo(Protocol):
def dataReceived(self, data):
"""
As soon as any data is received, write it back.
"""
lines = data.split('\n')
for line in lines:
if "PORT:" in line:
print line
port = line.split(":")[1].strip()
if "SERVER_IP:" in line:
print line
server_ip = line.split(":")[1].strip()
if "LOCAL_IP:" in line:
print line
client_ip = line.split(":")[1].strip()
parser = ConfigParser.SafeConfigParser()
section = 'CLIENTS_' + client_ip
parser.add_section(section)
parser.set(section, 'ip',str(client_ip))
parser.set(section, 'port',str(port))
parser.write(sys.stdout)
file_conf = open(CONFIG_CONF,'a')
parser.write(file_conf)
file_conf.close()
self.transport.write(data)
def main():
try:
f = Factory()
f.protocol = Echo
reactor.listenTCP(PORT, f)
reactor.run()
except twisted.internet.error.CannotListenError, ex:
print "Port is %d busy: %s" % (PORT, ex)
print "Run ./mpiaas_runner.py --killserver"
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 | -7,503,330,617,456,163,000 | 25.280702 | 56 | 0.55474 | false | 3.8509 | false | false | false |
SimoneLucia/EmbASP-Python | languages/asp/answer_set.py | 1 | 1221 | from languages.asp.asp_mapper import ASPMapper
class AnserSet(object):
"""A collection of data representing a generic Answer Set"""
def __init__(self, value, weightMap=dict()):
self.__value = value # Where data of answer set is stored
self.__weight_map = weightMap # Where weights of the answer set are stored
self.__atoms = set() # Where Answer set's atoms are stored
def get_answer_set(self):
"""Return the current __value data
The method return a list of answer sets in a String format
"""
return self.__value
def get_atoms(self):
"""Return atoms stored in __atoms
The method return a set of Object filled with atoms data
"""
if not self.__atoms:
mapper = ASPMapper.get_instance()
for atom in self.__value:
obj = mapper.get_object(atom)
if (not obj == None):
self.__atoms.add(obj)
return self.__atoms
def get_weights(self):
"""Return the weight_map"""
return self.__weight_map
def __str__(self):
"""Overload string method"""
return str(self.__value)
| mit | -8,103,394,283,243,837,000 | 31.131579 | 83 | 0.564292 | false | 4.505535 | false | false | false |
nkoep/pymanopt | pymanopt/manifolds/psd.py | 1 | 15204 | import warnings
import numpy as np
from numpy import linalg as la, random as rnd
from scipy.linalg import expm
# Workaround for SciPy bug: https://github.com/scipy/scipy/pull/8082
try:
from scipy.linalg import solve_continuous_lyapunov as lyap
except ImportError:
from scipy.linalg import solve_lyapunov as lyap
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold, Manifold
from pymanopt.tools.multi import multilog, multiprod, multisym, multitransp
class _RetrAsExpMixin:
"""Mixin class which defers calls to the exponential map to the retraction
and issues a warning.
"""
def exp(self, Y, U):
warnings.warn(
"Exponential map for manifold '{:s}' not implemented yet. Using "
"retraction instead.".format(self._get_class_name()),
RuntimeWarning)
return self.retr(Y, U)
class SymmetricPositiveDefinite(EuclideanEmbeddedSubmanifold):
"""Manifold of (n x n)^k symmetric positive definite matrices, based on the
geometry discussed in Chapter 6 of Positive Definite Matrices (Bhatia
2007). Some of the implementation is based on sympositivedefinitefactory.m
from the Manopt MATLAB package. Also see "Conic geometric optimisation on
the manifold of positive definite matrices" (Sra & Hosseini 2013) for more
details.
"""
def __init__(self, n, k=1):
self._n = n
self._k = k
if k == 1:
name = ("Manifold of positive definite ({} x {}) matrices").format(
n, n)
else:
name = "Product manifold of {} ({} x {}) matrices".format(k, n, n)
dimension = int(k * n * (n + 1) / 2)
super().__init__(name, dimension)
@property
def typicaldist(self):
return np.sqrt(self.dim)
def dist(self, x, y):
# Adapted from equation 6.13 of "Positive definite matrices". The
# Cholesky decomposition gives the same result as matrix sqrt. There
# may be more efficient ways to compute this.
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
pos_def=True)
return la.norm(logm)
def inner(self, x, u, v):
return np.tensordot(la.solve(x, u), la.solve(x, v), axes=x.ndim)
def proj(self, X, G):
return multisym(G)
def egrad2rgrad(self, x, u):
# TODO: Check that this is correct
return multiprod(multiprod(x, multisym(u)), x)
def ehess2rhess(self, x, egrad, ehess, u):
# TODO: Check that this is correct
return (multiprod(multiprod(x, multisym(ehess)), x) +
multisym(multiprod(multiprod(u, multisym(egrad)), x)))
def norm(self, x, u):
# This implementation is as fast as np.linalg.solve_triangular and is
# more stable, as the above solver tends to output non positive
# definite results.
c = la.cholesky(x)
c_inv = la.inv(c)
return la.norm(multiprod(multiprod(c_inv, u), multitransp(c_inv)))
def rand(self):
# The way this is done is arbitrary. I think the space of p.d.
# matrices would have infinite measure w.r.t. the Riemannian metric
# (cf. integral 0-inf [ln(x)] dx = inf) so impossible to have a
# 'uniform' distribution.
# Generate eigenvalues between 1 and 2
d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1)
# Generate an orthogonal matrix. Annoyingly qr decomp isn't
# vectorized so need to use a for loop. Could be done using
# svd but this is slower for bigger matrices.
u = np.zeros((self._k, self._n, self._n))
for i in range(self._k):
u[i], r = la.qr(rnd.randn(self._n, self._n))
if self._k == 1:
return multiprod(u, d * multitransp(u))[0]
return multiprod(u, d * multitransp(u))
def randvec(self, x):
k = self._k
n = self._n
if k == 1:
u = multisym(rnd.randn(n, n))
else:
u = multisym(rnd.randn(k, n, n))
return u / self.norm(x, u)
def transp(self, x1, x2, d):
return d
def exp(self, x, u):
# TODO: Check which method is faster depending on n, k.
x_inv_u = la.solve(x, u)
if self._k > 1:
e = np.zeros(np.shape(x))
for i in range(self._k):
e[i] = expm(x_inv_u[i])
else:
e = expm(x_inv_u)
return multiprod(x, e)
# This alternative implementation is sometimes faster though less
# stable. It can return a matrix with small negative determinant.
# c = la.cholesky(x)
# c_inv = la.inv(c)
# e = multiexp(multiprod(multiprod(c_inv, u), multitransp(c_inv)),
# sym=True)
# return multiprod(multiprod(c, e), multitransp(c))
retr = exp
def log(self, x, y):
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
pos_def=True)
return multiprod(multiprod(c, logm), multitransp(c))
def zerovec(self, x):
k = self._k
n = self._n
if k == 1:
return np.zeros((k, n, n))
return np.zeros((n, n))
# TODO(nkoep): This could either stay in here (seeing how it's a manifold of
# psd matrices, or in fixed_rank. Alternatively, move this one and
# the next class to a dedicated 'psd_fixed_rank' module.
class _PSDFixedRank(Manifold, _RetrAsExpMixin):
def __init__(self, n, k, name, dimension):
self._n = n
self._k = k
super().__init__(name, dimension)
@property
def typicaldist(self):
return 10 + self._k
def inner(self, Y, U, V):
# Euclidean metric on the total space.
return float(np.tensordot(U, V))
def norm(self, Y, U):
return la.norm(U, "fro")
def dist(self, U, V):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of the "
"'dist' method".format(self._get_class_name()))
def proj(self, Y, H):
# Projection onto the horizontal space
YtY = Y.T.dot(Y)
AS = Y.T.dot(H) - H.T.dot(Y)
Omega = lyap(YtY, AS)
return H - Y.dot(Omega)
def egrad2rgrad(self, Y, egrad):
return egrad
def ehess2rhess(self, Y, egrad, ehess, U):
return self.proj(Y, ehess)
def retr(self, Y, U):
return Y + U
def rand(self):
return rnd.randn(self._n, self._k)
def randvec(self, Y):
H = self.rand()
P = self.proj(Y, H)
return self._normalize(P)
def transp(self, Y, Z, U):
return self.proj(Z, U)
def _normalize(self, Y):
return Y / self.norm(None, Y)
def zerovec(self, X):
return np.zeros((self._n, self._k))
class PSDFixedRank(_PSDFixedRank):
"""
Manifold of n-by-n symmetric positive semidefinite matrices of rank k.
A point X on the manifold is parameterized as YY^T where Y is a matrix of
size nxk. As such, X is symmetric, positive semidefinite. We restrict to
full-rank Y's, such that X has rank exactly k. The point X is numerically
represented by Y (this is more efficient than working with X, which may
be big). Tangent vectors are represented as matrices of the same size as
Y, call them Ydot, so that Xdot = Y Ydot' + Ydot Y. The metric is the
canonical Euclidean metric on Y.
Since for any orthogonal Q of size k, it holds that (YQ)(YQ)' = YY',
we "group" all matrices of the form YQ in an equivalence class. The set
of equivalence classes is a Riemannian quotient manifold, implemented
here.
Notice that this manifold is not complete: if optimization leads Y to be
rank-deficient, the geometry will break down. Hence, this geometry should
only be used if it is expected that the points of interest will have rank
exactly k. Reduce k if that is not the case.
An alternative, complete, geometry for positive semidefinite matrices of
rank k is described in Bonnabel and Sepulchre 2009, "Riemannian Metric
and Geometric Mean for Positive Semidefinite Matrices of Fixed Rank",
SIAM Journal on Matrix Analysis and Applications.
The geometry implemented here is the simplest case of the 2010 paper:
M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
"Low-Rank Optimization on the Cone of Positive Semidefinite Matrices".
Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
"""
def __init__(self, n, k):
name = ("YY' quotient manifold of {:d}x{:d} psd matrices of "
"rank {:d}".format(n, n, k))
dimension = int(k * n - k * (k - 1) / 2)
super().__init__(n, k, name, dimension)
class PSDFixedRankComplex(_PSDFixedRank):
"""
Manifold of n x n complex Hermitian pos. semidefinite matrices of rank k.
Manifold of n-by-n complex Hermitian positive semidefinite matrices of
fixed rank k. This follows the quotient geometry described
in Sarod Yatawatta's 2013 paper:
"Radio interferometric calibration using a Riemannian manifold", ICASSP.
Paper link: http://dx.doi.org/10.1109/ICASSP.2013.6638382.
A point X on the manifold M is parameterized as YY^*, where Y is a
complex matrix of size nxk of full rank. For any point Y on the manifold M,
given any kxk complex unitary matrix U, we say Y*U is equivalent to Y,
i.e., YY^* does not change. Therefore, M is the set of equivalence
classes and is a Riemannian quotient manifold C^{nk}/U(k)
where C^{nk} is the set of all complex matrix of size nxk of full rank.
The metric is the usual real-trace inner product, that is,
it is the usual metric for the complex plane identified with R^2.
Notice that this manifold is not complete: if optimization leads Y to be
rank-deficient, the geometry will break down. Hence, this geometry should
only be used if it is expected that the points of interest will have rank
exactly k. Reduce k if that is not the case.
"""
def __init__(self, n, k):
name = ("YY' quotient manifold of Hermitian {:d}x{:d} complex "
"matrices of rank {:d}".format(n, n, k))
dimension = 2 * k * n - k * k
super().__init__(n, k, name, dimension)
def inner(self, Y, U, V):
return 2 * float(np.tensordot(U, V).real)
def norm(self, Y, U):
return np.sqrt(self.inner(Y, U, U))
def dist(self, U, V):
S, _, D = la.svd(V.T.conj().dot(U))
E = U - V.dot(S).dot(D)
return self.inner(None, E, E) / 2
def rand(self):
rand_ = super().rand
return rand_() + 1j * rand_()
class Elliptope(Manifold, _RetrAsExpMixin):
"""
Manifold of n-by-n psd matrices of rank k with unit diagonal elements.
A point X on the manifold is parameterized as YY^T where Y is a matrix of
size nxk. As such, X is symmetric, positive semidefinite. We restrict to
full-rank Y's, such that X has rank exactly k. The point X is numerically
represented by Y (this is more efficient than working with X, which may be
big). Tangent vectors are represented as matrices of the same size as Y,
call them Ydot, so that Xdot = Y Ydot' + Ydot Y and diag(Xdot) == 0. The
metric is the canonical Euclidean metric on Y.
The diagonal constraints on X (X(i, i) == 1 for all i) translate to
unit-norm constraints on the rows of Y: norm(Y(i, :)) == 1 for all i. The
set of such Y's forms the oblique manifold. But because for any orthogonal
Q of size k, it holds that (YQ)(YQ)' = YY', we "group" all matrices of the
form YQ in an equivalence class. The set of equivalence classes is a
Riemannian quotient manifold, implemented here.
Note that this geometry formally breaks down at rank-deficient Y's. This
does not appear to be a major issue in practice when optimization
algorithms converge to rank-deficient Y's, but convergence theorems no
longer hold. As an alternative, you may use the oblique manifold (it has
larger dimension, but does not break down at rank drop.)
The geometry is taken from the 2010 paper:
M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
"Low-Rank Optimization on the Cone of Positive Semidefinite Matrices".
Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
"""
def __init__(self, n, k):
self._n = n
self._k = k
name = ("YY' quotient manifold of {:d}x{:d} psd matrices of rank {:d} "
"with diagonal elements being 1".format(n, n, k))
dimension = int(n * (k - 1) - k * (k - 1) / 2)
super().__init__(name, dimension)
@property
def typicaldist(self):
return 10 * self._k
def inner(self, Y, U, V):
return float(np.tensordot(U, V))
def dist(self, U, V):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of the "
"'dist' method".format(self._get_class_name()))
def norm(self, Y, U):
return np.sqrt(self.inner(Y, U, U))
# Projection onto the tangent space, i.e., on the tangent space of
# ||Y[i, :]||_2 = 1
def proj(self, Y, H):
eta = self._project_rows(Y, H)
# Projection onto the horizontal space
YtY = Y.T.dot(Y)
AS = Y.T.dot(eta) - H.T.dot(Y)
Omega = lyap(YtY, -AS)
return eta - Y.dot((Omega - Omega.T) / 2)
def retr(self, Y, U):
return self._normalize_rows(Y + U)
# Euclidean gradient to Riemannian gradient conversion. We only need the
# ambient space projection: the remainder of the projection function is not
# necessary because the Euclidean gradient must already be orthogonal to
# the vertical space.
def egrad2rgrad(self, Y, egrad):
return self._project_rows(Y, egrad)
def ehess2rhess(self, Y, egrad, ehess, U):
scaling_grad = (egrad * Y).sum(axis=1)
hess = ehess - U * scaling_grad[:, np.newaxis]
scaling_hess = (U * egrad + Y * ehess).sum(axis=1)
hess -= Y * scaling_hess[:, np.newaxis]
return self.proj(Y, hess)
def rand(self):
return self._normalize_rows(rnd.randn(self._n, self._k))
def randvec(self, Y):
H = self.proj(Y, self.rand())
return H / self.norm(Y, H)
def transp(self, Y, Z, U):
return self.proj(Z, U)
def _normalize_rows(self, Y):
"""Return an l2-row-normalized copy of the matrix Y."""
return Y / la.norm(Y, axis=1)[:, np.newaxis]
# Orthogonal projection of each row of H to the tangent space at the
# corresponding row of X, seen as a point on a sphere.
def _project_rows(self, Y, H):
# Compute the inner product between each vector H[i, :] with its root
# point Y[i, :], i.e., Y[i, :].T * H[i, :]. Returns a row vector.
inners = (Y * H).sum(axis=1)
return H - Y * inners[:, np.newaxis]
def zerovec(self, X):
return np.zeros((self._n, self._k))
| bsd-3-clause | 3,308,188,274,906,176,000 | 36.173594 | 79 | 0.615496 | false | 3.443715 | false | false | false |
UrLab/DocHub | www/rest_urls.py | 1 | 1862 | from rest_framework.routers import APIRootView, DefaultRouter
import catalog.rest
import documents.rest
import notifications.rest
import search.rest
import telepathy.rest
import users.rest
import www.rest
class DochubAPI(APIRootView):
"""
This is the API of DocHub.
You are free to use it to crawl DocHub,
write your own frontend or even make a copy of our documents.
But please, if you do, respect those rules :
* To not hit the server too hard. If you degrade the service for other users, we will ban you.
* Respect the privacy of the users
* If you scrape and reuse our content, plase credit DocHub and the original uploader.
This whole API is auth protected.
To be able to use it without your session cookie,
use your personal token from <a hre="/api/me">/api/me</a>
([doc](http://www.django-rest-framework.org/api-guide/authentication/#tokenauthentication))
"""
pass
class Router(DefaultRouter):
APIRootView = DochubAPI
router = Router()
router.register(r'users', users.rest.UserViewSet)
router.register(r'courses', catalog.rest.CourseViewSet)
router.register(r'categories', catalog.rest.CategoryViewSet)
router.register(r'threads', telepathy.rest.ThreadViewSet)
router.register(r'messages', telepathy.rest.MessageViewSet)
router.register(r'documents', documents.rest.DocumentViewSet)
router.register(r'search/courses', search.rest.CourseSearchViewSet, basename="search-courses")
router.register(r'feed', www.rest.FeedViewSet, basename="feed")
router.register(r'me', users.rest.Me, basename="users-me")
router.register(r'notifications', notifications.rest.NotificationsViewSet, basename="notifications")
router.register(r'me/actions', www.rest.SelfFeedViewSet, basename="user-actions")
router.register(r'tree', catalog.rest.Tree, basename="catalog-tree")
urlpatterns = router.urls
| agpl-3.0 | 402,545,330,675,040,000 | 34.807692 | 100 | 0.762084 | false | 3.643836 | false | false | false |
pombredanne/discern | examples/problem_grader/grader/models.py | 1 | 5156 | from django.db import models
from django.contrib.auth.models import User
from django.forms.models import model_to_dict
from django.db.models.signals import post_save, pre_save
import random
import string
from django.conf import settings
import requests
import json
import logging
log= logging.getLogger(__name__)
class Rubric(models.Model):
"""
The rubric object is a way to locally store data about rubric options.
Each rubric is associated with a problem object stored on the API side.
"""
#Each rubric is specific to a problem and a user.
associated_problem = models.IntegerField()
user = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def get_scores(self):
"""
Calculate the final score for a given rubric.
"""
scores = []
all_scores = []
final_score=0
max_score = 0
options = self.get_rubric_dict()
for option in options:
#Add to all_scores for each of the scores
all_scores.append(option['option_points'])
#If the student was marked as correct for a given option, add it to the score
if option['selected']:
scores.append(option['option_points'])
if len(scores)>0:
final_score = sum(scores)
if len(all_scores)>0:
max_score = sum(all_scores)
return {
'score' : final_score,
'max_score' : max_score
}
def get_rubric_dict(self):
"""
Get the rubric in dictionary form.
"""
options = []
#Bundle up all of the rubric options
option_set = self.rubricoption_set.all().order_by('id')
for option in option_set:
options.append(model_to_dict(option))
return options
class RubricOption(models.Model):
"""
Each rubric has multiple options
"""
#Associate options with rubrics
rubric = models.ForeignKey(Rubric)
#Number of points the rubric option is worth
option_points = models.IntegerField()
#Text to show to users for this option
option_text = models.TextField()
#Whether or not this option is selected (ie marked correct)
selected = models.BooleanField(default=False)
class UserProfile(models.Model):
"""
Every user has a profile. Used to store additional fields.
"""
user = models.OneToOneField(User)
#Api key
api_key = models.TextField(default="")
#Api username
api_user = models.TextField(default="")
#whether or not an api user has been created
api_user_created = models.BooleanField(default=False)
def get_api_auth(self):
"""
Returns the api authentication dictionary for the given user
"""
return {
'username' : self.api_user,
'api_key' : self.api_key
}
def create_user_profile(sender, instance, created, **kwargs):
"""
Creates a user profile based on a signal from User when it is created
"""
#Create a userprofile if the user has just been created, don't if not.
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
else:
return
#If a userprofile was not created (gotten instead), then don't make an api user
if not created:
return
#Create a random password for the api user
random_pass = ''.join([random.choice(string.digits + string.letters) for i in range(0, 15)])
#Data we will post to the api to make a user
data = {
'username' : instance.username,
'password' : random_pass,
'email' : instance.email
}
headers = {'content-type': 'application/json'}
#Now, let's try to get the schema for the create user model.
create_user_url = settings.FULL_API_START + "createuser/"
counter = 0
status_code = 400
#Try to create the user at the api
while status_code==400 and counter<2 and not instance.profile.api_user_created:
try:
#Post our information to try to create a user
response = requests.post(create_user_url, data=json.dumps(data),headers=headers)
status_code = response.status_code
#If a user has been created, store the api key locally
if status_code==201:
instance.profile.api_user_created = True
response_data = json.loads(response.content)
instance.profile.api_key = response_data['api_key']
instance.profile.api_user = data['username']
instance.profile.save()
except:
log.exception("Could not create an API user!")
instance.profile.save()
counter+=1
#If we could not create a user in the first pass through the loop, add to the username to try to make it unique
data['username'] += random.choice(string.digits + string.letters)
post_save.connect(create_user_profile, sender=User)
#Maps the get_profile() function of a user to an attribute profile
User.profile = property(lambda u: u.get_profile())
| agpl-3.0 | 7,535,211,253,820,330,000 | 32.480519 | 119 | 0.632661 | false | 4.1248 | false | false | false |
mabotech/mabo.io | py/AK/test/redis_lua000.py | 1 | 1801 | # -*- coding: utf-8 -*-
"""
redis lua
redis eval, notyify in lua script
"""
import time
import redis
def main(key, val, key2, val2):
# connection pool
r = redis.Redis(host='localhost', port=6379, db=5)
d = {"a":"v1"}
"""
eval("lua script","number of kkeys", keys[],argv[])
KEYS[1]
ARGV[1]
compare value
update value when change
create job to update db when value change
set heartbeat pre tag
"""
lua_code = """if redis.call("EXISTS", KEYS[1]) == 1 then
-- redis.call("SET", "ST", ARGV[3])
-- redis.call("LPUSH", "c1","chan1")
-- redis.call("PUBLISH", "c1","new")
--
local payload = redis.call("GET", KEYS[1])
if payload == ARGV[1] then
return "same"
else
redis.call("SET", KEYS[1],ARGV[1])
redis.call("SET", KEYS[2],ARGV[2])
redis.call("LPUSH", "c1","chan2")
return payload -- return old val
end
else
redis.call("SET", KEYS[1],ARGV[1])
redis.call("SET", KEYS[2],ARGV[2])
redis.call("LPUSH", "c1","chan2")
return nil
end"""
#.format(**d)
#print(lua_code)
#benchmark
"""
0.22 ms
4545 times/second
"""
t1 = time.time()
stamp = t1*1000
val2 = t1*1000
n = 1
for i in xrange(0, n):
v = r.eval(lua_code, 2, key, key2, val, val2, stamp)
t2 = time.time()
t = (t2-t1)*1000/n
print("%sms" %(t))
#print(1000/t)
print(v)
h = r.script_load(lua_code)
print h
#print dir(r)
if __name__ == "__main__":
key = "y:a:c"
val = "10.20"
key2 = "y:a:c_st"
val2 = time.time()
main(key, val, key2, val2) | mit | -3,571,517,743,080,625,000 | 17.770833 | 61 | 0.481954 | false | 3.052542 | false | false | false |
levilucio/SyVOLT | t_core/HTopClass2TableNAC0.py | 1 | 8862 |
from core.himesis import Himesis, HimesisPreConditionPatternNAC
import cPickle as pickle
from uuid import UUID
class HTopClass2TableNAC0(HimesisPreConditionPatternNAC):
def __init__(self, LHS):
"""
Creates the himesis graph representing the AToM3 model HTopClass2TableNAC0.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HTopClass2TableNAC0, self).__init__(name='HTopClass2TableNAC0', num_nodes=3, edges=[], LHS=LHS)
# Add the edges
self.add_edges([(1, 0), (0, 2)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__CD2RDBMSMetaModel'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the NAC have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True forbids the rule from being applied,
# returning False enables the rule to be applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = UUID('d74c9eae-e470-4aa6-8817-2e15a1b64aab')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_label__"] = """3"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__Parent"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('94914a38-3999-44e8-8ecc-1e356a6b3e23')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__is_persistent"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """1"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__Clazz"""
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = UUID('a2616a97-3c66-4aa2-928f-52a37b14147b')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__is_persistent"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """2"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = """MT_pre__Clazz"""
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["GUID__"] = UUID('4a053f4e-83f0-474b-af5a-6e2e58e5ea12')
# Load the bridge between this NAC and its LHS
from HTopClass2TableNAC0Bridge import HTopClass2TableNAC0Bridge
self.bridge = HTopClass2TableNAC0Bridge()
def eval_is_persistent1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_is_persistent2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the NAC have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True forbids the rule from being applied,
# returning False enables the rule to be applied.
#===============================================================================
return True
| mit | 731,728,089,143,411,300 | 47.233333 | 117 | 0.51072 | false | 4.568041 | false | false | false |
MalloyDelacroix/DownloaderForReddit | Tools/ui_converter.py | 1 | 5130 | #!/usr/bin/env python
import sys
import os
import subprocess
class Converter:
base_ui_path = os.path.relpath('Resources/ui_files')
base_out_path = os.path.relpath('DownloaderForReddit/guiresources')
def __init__(self, ui_file):
self.ui_file = ui_file
self.callable_methods = [
'about',
'add_reddit_object',
'core_settings',
'database_dialog',
'database_settings',
'display_settings',
'download_settings',
'export_wizard',
'notification_settings',
'output_settings',
'filter_input',
'filter_widget',
'main_window',
'object_info',
'object_settings',
'quick_filter_settings',
'reddit_object_dialog',
'schedule_settings',
'settings',
'update_dialog',
'invalid_dialog',
'existing_names_dialog',
]
def run(self):
if self.ui_file == 'list':
self.list_methods()
self.ui_file = input('GUI file name (or number): ')
try:
name = self.get_method()
method = getattr(self, name)
method()
print('Conversion successful')
except AttributeError:
print(f'Command not recognized. Choices are: ')
self.list_methods()
def get_method(self):
try:
index = int(self.ui_file)
return self.callable_methods[index]
except ValueError:
return self.ui_file
def list_methods(self):
for x, y in enumerate(self.callable_methods):
print(f'{x}: {y}')
def convert(self, name, *sub_paths):
original = os.getcwd()
os.chdir(os.path.dirname(original)) # change directories so that all file paths in created file are correct
in_path = self.get_in_path(name, *sub_paths)
out_path = self.get_out_path(name, *sub_paths)
command = f'pyuic5 {in_path} -o {out_path}'
# print(command)
subprocess.run(command)
os.chdir(original)
def get_in_path(self, name, *sub_paths):
name = f'{name}.ui'
return os.path.join(self.base_ui_path, *sub_paths, name)
def get_out_path(self, name, *sub_paths):
name = f'{name}_auto.py'
return os.path.join(self.base_out_path, *sub_paths, name)
def about(self):
name = 'about_dialog'
self.convert(name)
def add_reddit_object(self):
name = 'add_reddit_object_dialog'
self.convert(name)
def main_window(self):
name = 'downloader_for_reddit_gui'
self.convert(name)
def reddit_object_dialog(self):
name = 'reddit_object_settings_dialog'
self.convert(name)
def update_dialog(self):
name = 'update_dialog'
self.convert(name)
def database_dialog(self):
name = 'database_dialog'
self.convert(name, 'database_views')
def filter_input(self):
name = 'filter_input_widget'
self.convert(name, 'database_views')
def filter_widget(self):
name = 'filter_widget'
self.convert(name, 'database_views')
def core_settings(self):
name = 'core_settings_widget'
self.convert(name, 'settings')
def database_settings(self):
name = 'database_settings_widget'
self.convert(name, 'settings')
def display_settings(self):
name = 'display_settings_widget'
self.convert(name, 'settings')
def download_settings(self):
name = 'download_settings_widget'
self.convert(name, 'settings')
def export_wizard(self):
name = 'export_wizard'
self.convert(name)
def notification_settings(self):
name = 'notification_settings_widget'
self.convert(name, 'settings')
def output_settings(self):
name = 'output_settings_widget'
self.convert(name, 'settings')
def quick_filter_settings(self):
name = 'quick_filter_settings_widget'
self.convert(name, 'settings')
def schedule_settings(self):
name = 'schedule_settings_widget'
self.convert(name, 'settings')
def settings(self):
name = 'settings_dialog'
self.convert(name, 'settings')
def object_info(self):
name = 'object_info_widget'
self.convert(name, 'widgets')
def object_settings(self):
name = 'object_settings_widget'
self.convert(name, 'widgets')
def invalid_dialog(self):
name = 'invalid_reddit_object_dialog'
self.convert(name)
def existing_names_dialog(self):
name = 'existing_names_dialog'
self.convert(name)
def user_auth_wizard(self):
name = 'user_auth_wizard'
self.convert(name)
def main():
try:
command = sys.argv[1]
except IndexError:
print('No class specified')
command = input('GUI Name (or number): ')
converter = Converter(command)
converter.run()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,101,822,332,021,041,000 | 26.433155 | 116 | 0.571345 | false | 3.868778 | false | false | false |
duncan-r/SHIP | ship/utils/fileloaders/fileloader.py | 1 | 2254 | """
Summary:
Main file loader for the API. This offers convenience methods to make it
simple to load any type of file from one place.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
from __future__ import unicode_literals
from ship.utils import utilfunctions as uuf
from ship.utils.fileloaders import tuflowloader
from ship.utils.fileloaders import iefloader
from ship.utils.fileloaders import datloader
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class FileLoader(object):
"""
"""
def __init__(self):
"""
"""
self._known_files = {'ief': iefloader.IefLoader,
'tcf': tuflowloader.TuflowLoader,
'dat': datloader.DatLoader,
'ied': datloader.DatLoader}
self.warnings = []
def loadFile(self, filepath, arg_dict={}):
"""Load a file from disk.
Args:
filepath (str): the path to the file to load.
arg_dict={}(Dict): contains keyword referenced arguments needed by
any of the loaders. E.g. the TuflowLoader can take some
scenario values.
Returns:
The object created by the individual file loaders. E.g. for .dat
files this will be an IsisUnitCollection. See the individual
ALoader implementations for details of return types.
Raises:
AttributeError: if the file type is not tcf/dat/ief/ied.
See Also:
:class:'ALoader'
:class:'IefLoader'
:class:'TuflowLoader'
:class:'DatLoader'
"""
ext = uuf.fileExtensionWithoutPeriod(filepath)
if not ext.lower() in self._known_files:
logger.error('File type %s is not currently supported for loading' % ext)
raise AttributeError('File type %s is not currently supported for loading' % ext)
loader = self._known_files[ext]()
contents = loader.loadFile(filepath, arg_dict)
self.warnings = loader.warnings
del loader
return contents
| mit | 366,836,191,615,290,940 | 26.82716 | 93 | 0.60071 | false | 4.285171 | false | false | false |
arrabito/DIRAC | ConfigurationSystem/Service/ConfigurationHandler.py | 1 | 3918 | """ The CS! (Configuration Service)
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.private.ServiceInterface import ServiceInterface
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import DErrno
gServiceInterface = None
gPilotSynchronizer = None
def initializeConfigurationHandler(serviceInfo):
global gServiceInterface
gServiceInterface = ServiceInterface(serviceInfo['URL'])
return S_OK()
class ConfigurationHandler(RequestHandler):
""" The CS handler
"""
types_getVersion = []
def export_getVersion(self):
return S_OK(gServiceInterface.getVersion())
types_getCompressedData = []
def export_getCompressedData(self):
sData = gServiceInterface.getCompressedConfigurationData()
return S_OK(sData)
types_getCompressedDataIfNewer = [basestring]
def export_getCompressedDataIfNewer(self, sClientVersion):
sVersion = gServiceInterface.getVersion()
retDict = {'newestVersion': sVersion}
if sClientVersion < sVersion:
retDict['data'] = gServiceInterface.getCompressedConfigurationData()
return S_OK(retDict)
types_publishSlaveServer = [basestring]
def export_publishSlaveServer(self, sURL):
gServiceInterface.publishSlaveServer(sURL)
return S_OK()
types_commitNewData = [basestring]
def export_commitNewData(self, sData):
global gPilotSynchronizer
credDict = self.getRemoteCredentials()
if 'DN' not in credDict or 'username' not in credDict:
return S_ERROR("You must be authenticated!")
res = gServiceInterface.updateConfiguration(sData, credDict['username'])
if not res['OK']:
return res
# Check the flag for updating the pilot 3 JSON file
if self.srv_getCSOption('UpdatePilotCStoJSONFile', False) and gServiceInterface.isMaster():
if gPilotSynchronizer is None:
try:
# This import is only needed for the Master CS service, making it conditional avoids
# dependency on the git client preinstalled on all the servers running CS slaves
from DIRAC.WorkloadManagementSystem.Utilities.PilotCStoJSONSynchronizer import PilotCStoJSONSynchronizer
except ImportError as exc:
self.log.exception("Failed to import PilotCStoJSONSynchronizer", repr(exc))
return S_ERROR(DErrno.EIMPERR, 'Failed to import PilotCStoJSONSynchronizer')
gPilotSynchronizer = PilotCStoJSONSynchronizer()
return gPilotSynchronizer.sync()
return res
types_writeEnabled = []
def export_writeEnabled(self):
return S_OK(gServiceInterface.isMaster())
types_getCommitHistory = []
def export_getCommitHistory(self, limit=100):
if limit > 100:
limit = 100
history = gServiceInterface.getCommitHistory()
if limit:
history = history[:limit]
return S_OK(history)
types_getVersionContents = [list]
def export_getVersionContents(self, versionList):
contentsList = []
for version in versionList:
retVal = gServiceInterface.getVersionContents(version)
if retVal['OK']:
contentsList.append(retVal['Value'])
else:
return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message']))
return S_OK(contentsList)
types_rollbackToVersion = [basestring]
def export_rollbackToVersion(self, version):
retVal = gServiceInterface.getVersionContents(version)
if not retVal['OK']:
return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message']))
credDict = self.getRemoteCredentials()
if 'DN' not in credDict or 'username' not in credDict:
return S_ERROR("You must be authenticated!")
return gServiceInterface.updateConfiguration(retVal['Value'],
credDict['username'],
updateVersionOption=True)
| gpl-3.0 | -4,709,197,104,656,305,000 | 33.368421 | 114 | 0.710822 | false | 4.085506 | true | false | false |
lunixbochs/nullstatic | gen.py | 1 | 2581 | #!/usr/bin/env python2
from collections import defaultdict
from datetime import date, datetime
from email.Utils import formatdate
import frontmatter
import jinja2
import markdown
import os
import sys
import time
import yaml
@jinja2.contextfilter
def _render(context, data):
return env.from_string(data['source']).render(**context)
def datekey(entry):
d = entry.get('date', date.min)
if isinstance(d, date):
d = datetime.combine(d, datetime.min.time())
return d
def strip_path(base, path):
return path.replace(base, '', 1).lstrip(os.sep)
def gen(base, out):
env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True, loader=jinja2.FileSystemLoader(base))
env.filters['render'] = _render
env.filters['markdown'] = markdown.markdown
env.filters['date'] = lambda x: x.strftime('%Y-%m-%d')
env.filters['rfc822'] = lambda x: formatdate(time.mktime(x.timetuple()))
env.filters['datesort'] = lambda x: sorted(x, key=lambda k: datekey(k))
tree = defaultdict(list)
for root, dirs, files in os.walk(base):
root = strip_path(base, root)
for name in files:
if name.endswith('.j2'):
path = os.path.join(base, root, name)
post = frontmatter.load(path)
data = {'name': name.rsplit('.', 1)[0], 'src': path, 'source': post.content}
data.update(post)
data['ext'] = data.get('ext', (os.path.splitext(data.get('render', ''))[1] if not '.' in data['name'] else ''))
data['url'] = data.get('url', data['name']) + data['ext']
data['dst'] = os.path.join(out, os.path.dirname(strip_path(base, path)), data['url'])
tree[root].append(data)
for template in (t for ts in tree.values() for t in ts):
source, render = map(template.get, ('source', 'render'), (None, ''))
if source is not None:
if render:
source = open(os.path.join(base, render), 'r').read().decode('utf-8')
ctx = {cat: templates for cat, templates in tree.items() if cat}
ctx.update(tree=tree, **template)
data = env.from_string(source).render(**ctx)
dstdir = os.path.dirname(template['dst'])
if not os.path.exists(dstdir):
os.makedirs(dstdir)
with open(template['dst'], 'w') as o:
o.write(data.encode('utf-8'))
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('Usage: gen.py <src> <out>')
sys.exit(1)
gen(*sys.argv[1:])
| mit | -623,981,896,369,554,700 | 37.522388 | 127 | 0.586594 | false | 3.506793 | false | false | false |
owais/django-simple-activity | simple_activity/models.py | 1 | 1965 | from django.db import models
from django.utils.timezone import now
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from filtered_contenttypes.fields import FilteredGenericForeignKey
from django_pgjson.fields import JsonBField
from .managers import ActionManager
from . import settings as app_settings
from . import registry
def _default_action_meta():
return {}
class Action(models.Model):
item_type = models.ForeignKey(ContentType, related_name='actions')
item_id = models.PositiveIntegerField()
item = FilteredGenericForeignKey('item_type', 'item_id')
target_type = models.ForeignKey(ContentType, blank=True, null=True,
related_name='target_actions')
target_id = models.PositiveIntegerField(blank=True, null=True)
target = FilteredGenericForeignKey('target_type', 'target_id')
actor = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='activity')
verb = models.CharField(max_length=23,
choices=registry.as_model_choices())
published = models.DateTimeField(auto_now_add=True)
meta = JsonBField(default=_default_action_meta, blank=True)
objects = ActionManager()
class Meta:
abstract = app_settings.get('ACTION_MODEL') != 'simple_activity.Action'
ordering = ('-published',)
@classmethod
def add_action(klass, verb, actor, item, target=None, published=None,
meta={}):
if not registry.is_valid(verb):
raise ValueError('`{}` not a valid verb.'.format(verb))
published = published or now()
create_kwargs = {'actor': actor, 'item': item, 'verb': verb.code}
if target:
create_kwargs['target'] = target
create_kwargs['published'] = published
klass.objects.create(**create_kwargs)
@property
def verb_object(self):
return registry.get_from_code(self.verb)
| bsd-2-clause | 2,345,284,087,180,824,000 | 34.727273 | 80 | 0.672774 | false | 4.216738 | false | false | false |
EmanueleCannizzaro/scons | test/Clean/Option.py | 1 | 2620 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Clean/Option.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that {Set,Get}Option('clean') works correctly to control
cleaning behavior.
"""
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('build.py', r"""
import sys
contents = open(sys.argv[2], 'rb').read()
file = open(sys.argv[1], 'wb')
file.write(contents)
file.close()
""")
test.write('SConstruct', """
B = Builder(action = r'%(_python_)s build.py $TARGETS $SOURCES')
env = Environment(BUILDERS = { 'B' : B })
env.B(target = 'foo.out', source = 'foo.in')
mode = ARGUMENTS.get('MODE')
if mode == 'not':
assert not GetOption('clean')
if mode == 'set-zero':
assert GetOption('clean')
SetOption('clean', 0)
assert GetOption('clean')
if mode == 'set-one':
assert not GetOption('clean')
SetOption('clean', 1)
assert GetOption('clean')
""" % locals())
test.write('foo.in', '"Foo", I say!\n')
test.run(arguments='foo.out MODE=not')
test.must_match(test.workpath('foo.out'), '"Foo", I say!\n')
test.run(arguments='-c foo.out MODE=set-zero')
test.must_not_exist(test.workpath('foo.out'))
test.run(arguments='foo.out MODE=none')
test.must_match(test.workpath('foo.out'), '"Foo", I say!\n')
test.run(arguments='foo.out MODE=set-one')
test.must_not_exist(test.workpath('foo.out'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -4,647,620,131,027,711,000 | 29.465116 | 94 | 0.711832 | false | 3.308081 | true | false | false |
olhoneles/olhoneles | montanha/management/commands/collectors/algo.py | 1 | 12094 | # -*- coding: utf-8 -*-
#
# Copyright (©) 2010-2013 Estêvão Samuel Procópio
# Copyright (©) 2010-2013 Gustavo Noronha Silva
# Copyright (©) 2013 Marcelo Jorge Vieira
# Copyright (©) 2014 Wilson Pinto Júnior
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import operator
import os
import re
import rows
from datetime import datetime
from io import BytesIO
from cStringIO import StringIO
from cachetools import Cache, cachedmethod
from django.core.files import File
from basecollector import BaseCollector
from montanha.models import (
Institution, Legislature, PoliticalParty, Legislator, ExpenseNature,
ArchivedExpense, Mandate
)
class ALGO(BaseCollector):
TITLE_REGEX = re.compile(r'\d+ - (.*)')
MONEY_RE = re.compile(r'([0-9.,]+)[,.]([0-9]{2})$')
def __init__(self, *args, **kwargs):
super(ALGO, self).__init__(*args, **kwargs)
self.base_url = 'http://al.go.leg.br'
self.institution, _ = Institution.objects.get_or_create(
siglum='ALGO', name=u'Assembléia Legislativa do Estado de Goiás'
)
self.legislature, _ = Legislature.objects.get_or_create(
institution=self.institution,
date_start=datetime(2015, 1, 1),
date_end=datetime(2018, 12, 31)
)
self.list_of_legislators_cache = Cache(1024)
self.expenses_nature_cached = {}
def _normalize_party_siglum(self, siglum):
names_map = {
'SDD': 'Solidariedade',
}
return names_map.get(siglum, siglum)
def update_legislators(self):
url = self.base_url + '/deputado/'
html = self.retrieve_uri(url, post_process=False, force_encoding='utf-8')
rows_xpath = u'//tbody/tr'
fields_xpath = {
u'nome': u'./td[position()=1]/a/text()',
u'url': u'./td[position()=1]/a/@href',
u'party': u'./td[position()=2]/text()',
u'telefone': u'./td[position()=3]/text()',
u'fax': u'./td[position()=4]/text()',
u'email': u'./td[position()=5]/a[position()=1]/img/@title',
}
table = rows.import_from_xpath(BytesIO(html.encode('utf-8')), rows_xpath, fields_xpath)
url_regex = re.compile(r'.*id/(\d+)')
email_regex = re.compile(r'Email: (.*)')
for row in table:
_id = url_regex.match(row.url).group(1)
email = None
if row.email:
email = email_regex.match(row.email).group(1).strip()
party_siglum = self._normalize_party_siglum(row.party)
party, party_created = PoliticalParty.objects.get_or_create(
siglum=party_siglum
)
self.debug(u'New party: {0}'.format(party))
legislator, created = Legislator.objects.get_or_create(name=row.nome)
legislator.site = self.base_url + row.url
legislator.email = email
legislator.save()
if created:
self.debug(u'New legislator: {0}'.format(legislator))
else:
self.debug(u'Found existing legislator: {0}'.format(legislator))
self.mandate_for_legislator(legislator, party, original_id=_id)
@classmethod
def parse_title(self, title):
if '-' in title:
match = self.TITLE_REGEX.search(title)
if match:
return match.group(1).encode('utf-8')
return title.encode('utf-8')
@classmethod
def parse_money(self, value):
match = self.MONEY_RE.search(value)
if match:
return float('{0}.{1}'.format(
match.group(1).replace('.', '').replace(',', ''),
match.group(2)
))
else:
raise ValueError('Cannot convert {0} to float (money)'.format(value))
def get_parlamentar_id(self, year, month, name):
legislators = self.get_list_of_legislators(year, month)
legislators = [i for i in legislators if i['nome'] == name]
if not legislators:
return
return legislators[0]['id']
@cachedmethod(operator.attrgetter('list_of_legislators_cache'))
def get_list_of_legislators(self, year, month):
url = '{0}/transparencia/verbaindenizatoria/listardeputados?ano={1}&mes={2}'.format(
self.base_url,
year,
month,
)
data = json.loads(self.retrieve_uri(url, force_encoding='utf8').text)
return data['deputados']
def find_data_for_month(self, mandate, year, month):
parlamentar_id = self.get_parlamentar_id(year, month, mandate.legislator.name)
if not parlamentar_id:
self.debug(
u'Failed to discover parlamentar_id for year={0}, month={1}, legislator={2}'.format(
year, month, mandate.legislator.name,
)
)
raise StopIteration
url = '{0}/transparencia/verbaindenizatoria/exibir?ano={1}&mes={2}&parlamentar_id={3}'.format(
self.base_url, year, month, parlamentar_id
)
data = self.retrieve_uri(url, force_encoding='utf8')
if u'parlamentar não prestou contas para o mês' in data.text:
self.debug(u'not found data for: {0} -> {1}/{2}'.format(
mandate.legislator, year, month
))
raise StopIteration
container = data.find('div', id='verba')
if not container:
self.debug('div#verba not found')
table = container.find('table', recursive=False)
if not table:
self.debug('table.tabela-verba-indenizatoria not found')
raise StopIteration
group_trs = table.findAll('tr', {'class': 'verba_titulo'})
for tr in group_trs:
budget_title = self.parse_title(tr.text)
budget_subtitle = None
while True:
tr = tr.findNext('tr')
if not tr:
break
tr_class = tr.get('class')
if tr.get('class') == 'verba_titulo':
break
elif tr_class == 'info-detalhe-verba':
for data in self.parse_detale_verba(tr, budget_title, budget_subtitle):
yield data
elif tr_class == 'subtotal':
continue
elif len(tr.findAll('td')) == 3:
tds = tr.findAll('td')
budget_subtitle = self.parse_title(tds[0].text)
next_tr = tr.findNext('tr')
break_classes = ('subtotal', 'info-detalhe-verba', 'verba_titulo')
if next_tr.get('class') in break_classes:
continue
value_presented = self.parse_money(tds[1].text)
value_expensed = self.parse_money(tds[2].text)
if not value_expensed or not value_presented:
continue
data = {
'budget_title': budget_title,
'budget_subtitle': budget_subtitle,
'value_presented': value_presented,
'date': '1/%d/%d' % (month, year),
'value_expensed': value_expensed,
'number': 'Sem número'
}
self.debug(u'Generated JSON: {0}'.format(data))
yield data
def parse_detale_verba(self, elem, budget_title, budget_subtitle):
rows_xpath = u'//tbody/tr'
fields_xpath = {
u'nome': u'./td[position()=1]/text()',
u'cpf_cnpj': u'./td[position()=2]/text()',
u'date': u'./td[position()=3]/text()',
u'number': u'./td[position()=4]/text()',
u'value_presented': u'./td[position()=5]/text()',
u'value_expensed': u'./td[position()=6]/text()',
}
table = rows.import_from_xpath(
BytesIO(str(elem)), rows_xpath, fields_xpath)
for row in table:
data = dict(row.__dict__)
data.update({
'budget_title': budget_title,
'budget_subtitle': budget_subtitle,
'cpf_cnpj': self.normalize_cnpj_or_cpf(row.cpf_cnpj),
'value_presented': self.parse_money(row.value_presented),
'value_expensed': self.parse_money(row.value_expensed),
})
self.debug(u'Generated JSON: {0}'.format(data))
yield data
def get_or_create_expense_nature(self, name):
if name not in self.expenses_nature_cached:
try:
nature = ExpenseNature.objects.get(name=name)
except ExpenseNature.DoesNotExist:
nature = ExpenseNature(name=name)
nature.save()
self.expenses_nature_cached[name] = nature
return self.expenses_nature_cached[name]
def update_data_for_month(self, mandate, year, month):
for data in self.find_data_for_month(mandate, year, month):
nature = self.get_or_create_expense_nature(
'{0}: {1}'.format(data['budget_title'], data['budget_subtitle'])
)
name = data.get('nome') or 'Sem nome'
no_identifier = u'Sem CPF/CNPJ ({0})'.format(name)
cpf_cnpj = data.get('cpf_cnpj', no_identifier)
supplier = self.get_or_create_supplier(cpf_cnpj, name)
date = datetime.strptime(data['date'], '%d/%m/%Y')
expense = ArchivedExpense(
number=data.get('number', ''),
nature=nature,
date=date,
value=data['value_presented'],
expensed=data['value_expensed'],
mandate=mandate,
supplier=supplier,
collection_run=self.collection_run,
)
expense.save()
def update_images(self):
mandates = Mandate.objects.filter(legislature=self.legislature, legislator__picture='')
headers = {
'Referer': self.base_url + '/deputado/',
'Origin': self.base_url,
}
deputado_data = self.retrieve_uri(self.base_url + '/deputado/', headers=headers)
for mandate in mandates:
leg = mandate.legislator
found_text = deputado_data.find(text=re.compile(leg.name))
if not found_text:
self.debug(u'Legislator not found in page: {0}'.format(mandate.legislator.name))
continue
tr = found_text.findParents('tr')[0]
tds = tr.findAll('td')
detail_path = tds[0].find('a')['href']
detail_url = self.base_url + detail_path
detail_data = self.retrieve_uri(detail_url, headers=headers)
photo_container = detail_data.find('div', {'class': re.compile(r'foto')})
photo_url = photo_container.find('img')['src']
photo_data = self.retrieve_uri(self.base_url + photo_url, post_process=False, return_content=True)
photo_buffer = StringIO(photo_data)
photo_buffer.seek(0)
leg.picture.save(os.path.basename(photo_url), File(photo_buffer))
leg.save()
self.debug('Saved %s Image URL: {0}'.format(leg.name, photo_url))
else:
self.debug('All legislators have photos')
| agpl-3.0 | -3,635,771,197,065,837,000 | 34.532353 | 110 | 0.552934 | false | 3.617066 | false | false | false |
mupif/mupif | mupif/Field.py | 1 | 42683 | #
# MuPIF: Multi-Physics Integration Framework
# Copyright (C) 2010-2015 Borek Patzak
#
# Czech Technical University, Faculty of Civil Engineering,
# Department of Structural Mechanics, 166 29 Prague, Czech Republic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from builtins import range
from builtins import object
from . import Cell
from . import FieldID
from . import ValueType
from . import BBox
from . import APIError
from . import MupifObject
from . import Mesh
from .Physics import PhysicalQuantities
from .Physics.PhysicalQuantities import PhysicalQuantity
from numpy import array, arange, random, zeros
import numpy
import copy
import Pyro4
from enum import IntEnum
import logging
log = logging.getLogger()
try:
import cPickle as pickle # faster serialization if available
except:
import pickle
# import logging - never use it here, it causes cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute
# lookup thread.lock failed
# debug flag
debug = 0
class FieldType(IntEnum):
"""
Represent the supported values of FieldType, i.e. FT_vertexBased or FT_cellBased.
"""
FT_vertexBased = 1
FT_cellBased = 2
@Pyro4.expose
class Field(MupifObject.MupifObject, PhysicalQuantity):
"""
Representation of field. Field is a scalar, vector, or tensorial
quantity defined on a spatial domain. The field, however is assumed
to be fixed at certain time. The field can be evaluated in any spatial point
belonging to underlying domain.
Derived classes will implement fields defined on common discretizations,
like fields defined on structured/unstructured FE meshes, FD grids, etc.
.. automethod:: __init__
.. automethod:: _evaluate
"""
def __init__(self, mesh, fieldID, valueType, units, time, values=None, fieldType=FieldType.FT_vertexBased, objectID=0, metaData={}):
"""
Initializes the field instance.
:param Mesh.Mesh mesh: Instance of a Mesh class representing the underlying discretization
:param FieldID fieldID: Field type (displacement, strain, temperature ...)
:param ValueType valueType: Type of field values (scalar, vector, tensor). Tensor is a tuple of 9 values. It is changed to 3x3 for VTK output automatically.
:param Physics.PhysicalUnits units: Field value units
:param Physics.PhysicalQuantity time: Time associated with field values
:param values: Field values (format dependent on a particular field type, however each individual value should be stored as tuple, even scalar value)
:type values: list of tuples representing individual values
:param FieldType fieldType: Optional, determines field type (values specified as vertex or cell values), default is FT_vertexBased
:param int objectID: Optional ID of problem object/subdomain to which field is related, default = 0
:param dict metaData: Optionally pass metadata for merging
"""
super(Field, self).__init__()
self.mesh = mesh
self.fieldID = fieldID
self.valueType = valueType
self.time = time
self.uri = None # pyro uri; used in distributed setting
# self.log = logging.getLogger()
self.fieldType = fieldType
self.objectID = objectID
if values is None:
if self.fieldType == FieldType.FT_vertexBased:
ncomponents = mesh.getNumberOfVertices()
else:
ncomponents = mesh.getNumberOfCells()
self.value = zeros((ncomponents, self.getRecordSize()))
else:
self.value = values
if PhysicalQuantities.isPhysicalUnit(units):
self.unit = units
else:
self.unit = PhysicalQuantities.findUnit(units)
self.setMetadata('Units', self.unit.name())
self.setMetadata('Type', 'mupif.Field.Field')
self.setMetadata('Type_ID', str(self.fieldID))
self.setMetadata('FieldType', str(fieldType))
self.setMetadata('ValueType', str(self.valueType))
self.updateMetadata(metaData)
@classmethod
def loadFromLocalFile(cls, fileName):
"""
Alternative constructor which loads instance directly from a Pickle module.
:param str fileName: File name
:return: Returns Field instance
:rtype: Field
"""
return pickle.load(open(fileName, 'rb'))
def getRecordSize(self):
"""
Return the number of scalars per value, depending on :obj:`valueType` passed when constructing the instance.
:return: number of scalars (1,3,9 respectively for scalar, vector, tensor)
:rtype: int
"""
if self.valueType == ValueType.Scalar:
return 1
elif self.valueType == ValueType.Vector:
return 3
elif self.valueType == ValueType.Tensor:
return 9
else:
raise ValueError("Invalid value of Field.valueType (%d)." % self.valueType)
def getMesh(self):
"""
Obtain mesh.
:return: Returns a mesh of underlying discretization
:rtype: Mesh.Mesh
"""
return self.mesh
def getValueType(self):
"""
Returns ValueType of the field, e.g. scalar, vector, tensor.
:return: Returns value type of the receiver
:rtype: ValueType
"""
return self.valueType
def getFieldID(self):
"""
Returns FieldID, e.g. FID_Displacement, FID_Temperature.
:return: Returns field ID
:rtype: FieldID
"""
return self.fieldID
def getFieldIDName(self):
"""
Returns name of the field.
:return: Returns fieldID name
:rtype: string
"""
return self.fieldID.name
def getFieldType(self):
"""
Returns receiver field type (values specified as vertex or cell values)
:return: Returns fieldType id
:rtype: FieldType
"""
return self.fieldType
def getTime(self):
"""
Get time of the field.
:return: Time of field data
:rtype: Physics.PhysicalQuantity
"""
return self.time
def evaluate(self, positions, eps=0.0):
"""
Evaluates the receiver at given spatial position(s).
:param positions: 1D/2D/3D position vectors
:type positions: tuple, a list of tuples
:param float eps: Optional tolerance for probing whether the point belongs to a cell (should really not be used)
:return: field value(s)
:rtype: Physics.PhysicalQuantity with given value or tuple of values
"""
# test if positions is a list of positions
if isinstance(positions, list):
ans = []
for pos in positions:
ans.append(self._evaluate(pos, eps))
return PhysicalQuantity(ans, self.unit)
else:
# single position passed
return PhysicalQuantity(self._evaluate(positions, eps), self.unit)
def _evaluate(self, position, eps):
"""
Evaluates the receiver at a single spatial position.
:param tuple position: 1D/2D/3D position vector
:param float eps: Optional tolerance
:return: field value
:rtype: tuple of doubles
.. note:: This method has some issues related to https://sourceforge.net/p/mupif/tickets/22/ .
"""
cells = self.mesh.giveCellLocalizer().giveItemsInBBox(BBox.BBox([c-eps for c in position], [c+eps for c in position]))
# answer=None
if len(cells):
if self.fieldType == FieldType.FT_vertexBased:
for icell in cells:
try:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
answer = icell.interpolate(position, [self.value[i.number] for i in icell.getVertices()])
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
raise
return answer
except ZeroDivisionError:
print('ZeroDivisionError?')
log.debug(icell.number)
log.debug(position)
icell.debug = 1
log.debug(icell.containsPoint(position), icell.glob2loc(position))
log.error('Field::evaluate - no source cell found for position %s' % str(position))
for icell in cells:
log.debug(icell.number)
log.debug(icell.containsPoint(position))
log.debug(icell.glob2loc(position))
else: # if (self.fieldType == FieldType.FT_vertexBased):
# in case of cell based fields do compute average of cell values containing point
# this typically happens when point is on the shared edge or vertex
count = 0
for icell in cells:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
tmp = self.value[icell.number]
if count == 0:
answer = list(tmp)
else:
for i in answer:
answer = [x+y for x in answer for y in tmp]
count += 1
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
log.error(icell.getVertices())
raise
# end loop over icells
if count == 0:
log.error('Field::evaluate - no source cell found for position %s', str(position))
# for icell in cells:
# log.debug(icell.number, icell.containsPoint(position), icell.glob2loc(position))
else:
answer = [x/count for x in answer]
return answer
else:
# no source cell found
log.error('Field::evaluate - no source cell found for position ' + str(position))
raise ValueError('Field::evaluate - no source cell found for position ' + str(position))
def getVertexValue(self, vertexID):
"""
Returns the value associated with a given vertex.
:param int vertexID: Vertex identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_vertexBased:
return PhysicalQuantity(self.value[vertexID], self.unit)
else:
raise TypeError('Attempt to acces vertex value of cell based field, use evaluate instead')
def getCellValue(self, cellID):
"""
Returns the value associated with a given cell.
:param int cellID: Cell identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_cellBased:
return PhysicalQuantity(self.value[cellID], self.unit)
else:
raise TypeError('Attempt to acces cell value of vertex based field, use evaluate instead')
def _giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
Depreceated, use getVertexValue() or getCellValue()
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: Physics.PhysicalQuantity
"""
return PhysicalQuantity(self.value[componentID], self.unit)
def giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: tuple
"""
return self.value[componentID]
def setValue(self, componentID, value):
"""
Sets the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:param tuple value: Value to be set for a given component, should have the same units as receiver
.. Note:: If a mesh has mapping attached (a mesh view) then we have to remember value locally and record change. The source field values are updated after commit() method is invoked.
"""
self.value[componentID] = value
def commit(self):
"""
Commits the recorded changes (via setValue method) to a primary field.
"""
def getObjectID(self):
"""
Returns field objectID.
:return: Object's ID
:rtype: int
"""
return self.objectID
def getUnits(self):
"""
:return: Returns units of the receiver
:rtype: Physics.PhysicalUnits
"""
return self.unit
def merge(self, field):
"""
Merges the receiver with given field together. Both fields should be on different parts of the domain (can also overlap), but should refer to same underlying discretization, otherwise unpredictable results can occur.
:param Field field: given field to merge with.
"""
# first merge meshes
mesh = copy.deepcopy(self.mesh)
mesh.merge(field.mesh)
log.debug(mesh)
# merge the field values
# some type checking first
if self.fieldType != field.fieldType:
raise TypeError("Field::merge: fieldType of receiver and parameter is different")
if self.fieldType == FieldType.FT_vertexBased:
values = [0]*mesh.getNumberOfVertices()
for v in range(self.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(self.mesh.getVertex(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(field.mesh.getVertex(v).label)] = field.value[v]
else:
values = [0]*mesh.getNumberOfCells()
for v in range(self.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(self.mesh.giveCell(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(field.mesh.giveCell(v).label)] = field.value[v]
self.mesh = mesh
self.value = values
def field2VTKData (self, name=None, lookupTable=None):
"""
Creates VTK representation of the receiver. Useful for visualization. Requires pyvtk module.
:param str name: human-readable name of the field
:param pyvtk.LookupTable lookupTable: color lookup table
:return: Instance of pyvtk
:rtype: pyvtk.VtkData
"""
import pyvtk
if name is None:
name = self.getFieldIDName()
if lookupTable and not isinstance(lookupTable, pyvtk.LookupTable):
log.info('ignoring lookupTable which is not a pyvtk.LookupTable instance.')
lookupTable = None
if lookupTable is None:
lookupTable=pyvtk.LookupTable([(0, .231, .298, 1.0), (.4, .865, .865, 1.0), (.8, .706, .016, 1.0)], name='coolwarm')
# Scalars use different name than 'coolwarm'. Then Paraview uses its own color mapping instead of taking
# 'coolwarm' from *.vtk file. This prevents setting Paraview's color mapping.
scalarsKw = dict(name=name, lookup_table='default')
else:
scalarsKw = dict(name=name, lookup_table=lookupTable.name)
# see http://cens.ioc.ee/cgi-bin/cvsweb/python/pyvtk/examples/example1.py?rev=1.3 for an example
vectorsKw = dict(name=name) # vectors don't have a lookup_table
if self.fieldType == FieldType.FT_vertexBased:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Vectors(self.value, **vectorsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
else:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Vectors(self.value, **vectorsKw),lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
def getMartixForTensor(self, values):
"""
Reshape values to a list with 3x3 arrays. Usable for VTK export.
:param list values: List containing tuples of 9 values, e.g. [(1,2,3,4,5,6,7,8,9), (1,2,3,4,5,6,7,8,9), ...]
:return: List containing 3x3 matrices for each tensor
:rtype: list
"""
tensor = []
for i in values:
tensor.append(numpy.reshape(i, (3, 3)))
return tensor
def dumpToLocalFile(self, fileName, protocol=pickle.HIGHEST_PROTOCOL):
"""
Dump Field to a file using a Pickle serialization module.
:param str fileName: File name
:param int protocol: Used protocol - 0=ASCII, 1=old binary, 2=new binary
"""
pickle.dump(self, open(fileName, 'wb'), protocol)
def field2Image2D(self, plane='xy', elevation=(-1.e-6, 1.e-6), numX=10, numY=20, interp='linear', fieldComponent=0, vertex=True, colorBar='horizontal', colorBarLegend='', barRange=(None, None), barFormatNum='%.3g', title='', xlabel='', ylabel='', fileName='', show=True, figsize=(8, 4), matPlotFig=None):
"""
Plots and/or saves 2D image using a matplotlib library. Works for structured and unstructured 2D/3D fields. 2D/3D fields need to define plane. This method gives only basic viewing options, for aesthetic and more elaborated output use e.g. VTK field export with
postprocessors such as ParaView or Mayavi. Idea from https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#id1
:param str plane: what plane to extract from field, valid values are 'xy', 'xz', 'yz'
:param tuple elevation: range of third coordinate. For example, in plane='xy' is grabs z coordinates in the range
:param int numX: number of divisions on x graph axis
:param int numY: number of divisions on y graph axis
:param str interp: interpolation type when transferring to a grid. Valid values 'linear', 'nearest' or 'cubic'
:param int fieldComponent: component of the field
:param bool vertex: if vertices shoud be plot as points
:param str colorBar: color bar details. Valid values '' for no colorbar, 'vertical' or 'horizontal'
:param str colorBarLegend: Legend for color bar. If '', current field name and units are printed. None prints nothing.
:param tuple barRange: min and max bar range. If barRange=('NaN','NaN'), it is adjusted automatically
:param str barFormatNum: format of color bar numbers
:param str title: title
:param str xlabel: x axis label
:param str ylabel: y axis label
:param str fileName: if nonempty, a filename is written to the disk, usually png, pdf, ps, eps and svg are supported
:param bool show: if the plot should be showed
:param tuple figsize: size of canvas in inches. Affects only showing a figure. Image to a file adjust one side automatically.
:param obj matPlotFig: False means plot window remains in separate thread, True waits until a plot window becomes closed
:return: handle to matPlotFig
:rtype: matPlotFig
"""
try:
import numpy as np
import math
from scipy.interpolate import griddata
import matplotlib
matplotlib.use('TkAgg') # Qt4Agg gives an empty, black window
import matplotlib.pyplot as plt
except ImportError as e:
log.error('Skipping field2Image2D due to missing modules: %s' % e)
return None
# raise
if self.fieldType != FieldType.FT_vertexBased:
raise APIError.APIError('Only FieldType.FT_vertexBased is now supported')
mesh = self.getMesh()
numVertices = mesh.getNumberOfVertices()
indX = 0
indY = 0
elev = 0
if plane == 'xy':
indX = 0
indY = 1
elev = 2
elif plane == 'xz':
indX = 0
indY = 2
elev = 1
elif plane == 'yz':
indX = 1
indY = 2
elev = 0
# find eligible vertex points and values
vertexPoints = []
vertexValue = []
for i in range(0, numVertices):
coords = mesh.getVertex(i).getCoordinates()
# print(coords)
value = self.giveValue(i)[fieldComponent]
if elevation[1] > coords[elev] > elevation[0]:
vertexPoints.append((coords[indX], coords[indY]))
vertexValue.append(value)
if len(vertexPoints) == 0:
log.info('No valid vertex points found, putting zeros on domain 1 x 1')
for i in range(5):
vertexPoints.append((i % 2, i/4.))
vertexValue.append(0)
# for i in range (0, len(vertexPoints)):
# print (vertexPoints[i], vertexValue[i])
vertexPointsArr = np.array(vertexPoints)
vertexValueArr = np.array(vertexValue)
xMin = vertexPointsArr[:, 0].min()
xMax = vertexPointsArr[:, 0].max()
yMin = vertexPointsArr[:, 1].min()
yMax = vertexPointsArr[:, 1].max()
# print(xMin, xMax, yMin, yMax)
grid_x, grid_y = np.mgrid[xMin:xMax:complex(0, numX), yMin:yMax:complex(0, numY)]
grid_z1 = griddata(vertexPointsArr, vertexValueArr, (grid_x, grid_y), interp)
# print (grid_z1.T)
plt.ion() # ineractive mode
if matPlotFig is None:
matPlotFig = plt.figure(figsize=figsize)
# plt.xlim(xMin, xMax)
# plt.ylim(yMin, yMax)
plt.clf()
plt.axis((xMin, xMax, yMin, yMax))
image = plt.imshow(grid_z1.T, extent=(xMin, xMax, yMin, yMax), origin='lower', aspect='equal')
# plt.margins(tight=True)
# plt.tight_layout()
# plt.margins(x=-0.3, y=-0.3)
if colorBar:
cbar = plt.colorbar(orientation=colorBar, format=barFormatNum)
if colorBarLegend is not None:
if colorBarLegend == '':
colorBarLegend = self.getFieldIDName() + '_' + str(fieldComponent)
if self.unit is not None:
colorBarLegend = colorBarLegend + ' (' + self.unit.name() + ')'
cbar.set_label(colorBarLegend, rotation=0 if colorBar == 'horizontal' else 90)
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if vertex == 1:
plt.scatter(vertexPointsArr[:, 0], vertexPointsArr[:, 1], marker='o', c='b', s=5, zorder=10)
# plt.axis('equal')
# plt.gca().set_aspect('equal', adjustable='box-forced')
if isinstance(barRange[0], float) or isinstance(barRange[0], int):
image.set_clim(vmin=barRange[0], vmax=barRange[1])
if fileName:
plt.savefig(fileName, bbox_inches='tight')
if show:
matPlotFig.canvas.draw()
# plt.ioff()
# plt.show(block=True)
return matPlotFig
def field2Image2DBlock(self):
"""
Block an open window from matPlotLib. Waits until closed.
"""
import matplotlib.pyplot as plt
plt.ioff()
plt.show(block=True)
def toHdf5(self, fileName, group='component1/part1'):
"""
Dump field to HDF5, in a simple format suitable for interoperability (TODO: document).
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be saved under.
The HDF hierarchy is like this::
group
|
+--- mesh_01 {hash=25aa0aa04457}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- mesh_02 {hash=17809e2b86ea}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- ...
+--- field_01
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_02
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_03
| +--- -> mesh_02
| \--- [cell_values]
\--- ...
where ``plain`` names are HDF (sub)groups, ``[bracketed]`` names are datasets, ``{name=value}`` are HDF attributes, ``->`` prefix indicated HDF5 hardlink (transparent to the user); numerical suffixes (``_01``, ...) are auto-allocated. Mesh objects are hardlinked using HDF5 hardlinks if an identical mesh is already stored in the group, based on hexdigest of its full data.
.. note:: This method has not been tested yet. The format is subject to future changes.
"""
import h5py
hdf = h5py.File(fileName, 'a', libver='latest')
if group not in hdf:
gg = hdf.create_group(group)
else:
gg = hdf[group]
# raise IOError('Path "%s" is already used in "%s".'%(path,fileName))
def lowestUnused(trsf, predicate, start=1):
"""
Find the lowest unused index, where *predicate* is used to test for existence, and *trsf* transforms
integer (starting at *start* and incremented until unused value is found) to whatever predicate accepts
as argument. Lowest transformed value is returned.
"""
import itertools
for i in itertools.count(start=start):
t = trsf(i)
if not predicate(t):
return t
# save mesh (not saved if there already)
newgrp = lowestUnused(trsf=lambda i: 'mesh_%02d' % i, predicate=lambda t: t in gg)
mh5 = self.getMesh().asHdf5Object(parentgroup=gg, newgroup=newgrp)
if self.value:
fieldGrp = hdf.create_group(lowestUnused(trsf=lambda i, group=group: group+'/field_%02d' % i, predicate=lambda t: t in hdf))
fieldGrp['mesh'] = mh5
fieldGrp.attrs['fieldID'] = self.fieldID
fieldGrp.attrs['valueType'] = self.valueType
# string/bytes may not contain NULL when stored as string in HDF5
# see http://docs.h5py.org/en/2.3/strings.html
# that's why we cast to opaque type "void" and uncast using tostring before unpickling
fieldGrp.attrs['units'] = numpy.void(pickle.dumps(self.unit))
fieldGrp.attrs['time'] = numpy.void(pickle.dumps(self.time))
# fieldGrp.attrs['time']=self.time.getValue()
if self.fieldType == FieldType.FT_vertexBased:
val = numpy.empty(shape=(self.getMesh().getNumberOfVertices(), self.getRecordSize()), dtype=numpy.float)
for vert in range(self.getMesh().getNumberOfVertices()):
val[vert] = self.getVertexValue(vert).getValue()
fieldGrp['vertex_values'] = val
elif self.fieldType == FieldType.FT_cellBased:
# raise NotImplementedError("Saving cell-based fields to HDF5 is not yet implemented.")
val = numpy.empty(shape=(self.getMesh().getNumberOfCells(), self.getRecordSize()), dtype=numpy.float)
for cell in range(self.getMesh().getNumberOfCells()):
val[cell] = self.getCellValue(cell)
fieldGrp['cell_values'] = val
else:
raise RuntimeError("Unknown fieldType %d." % self.fieldType)
@staticmethod
def makeFromHdf5(fileName, group='component1/part1'):
"""
Restore Fields from HDF5 file.
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be read from (IOError is raised if the group does not exist).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
.. note:: This method has not been tested yet.
"""
import h5py
hdf = h5py.File(fileName, 'r', libver='latest')
grp = hdf[group]
# load mesh and field data from HDF5
meshObjs = [obj for name, obj in grp.items() if name.startswith('mesh_')]
fieldObjs = [obj for name, obj in grp.items() if name.startswith('field_')]
# construct all meshes as mupif objects
meshes = [Mesh.Mesh.makeFromHdf5Object(meshObj) for meshObj in meshObjs]
# construct all fields as mupif objects
ret = []
for f in fieldObjs:
if 'vertex_values' in f:
fieldType, values = FieldType.FT_vertexBased, f['vertex_values']
elif 'cell_values' in f:
fieldType, values = FieldType.FT_cellBased, f['cell_values']
else:
ValueError("HDF5/mupif format error: unable to determine field type.")
fieldID, valueType, units, time = FieldID(f.attrs['fieldID']), f.attrs['valueType'], f.attrs['units'].tostring(), f.attrs['time'].tostring()
if units == '':
units = None # special case, handled at saving time
else:
units = pickle.loads(units)
if time == '':
time = None # special case, handled at saving time
else:
time = pickle.loads(time)
meshIndex = meshObjs.index(f['mesh']) # find which mesh object this field refers to
ret.append(Field(mesh=meshes[meshIndex], fieldID=fieldID, units=units, time=time, valueType=valueType, values=values, fieldType=fieldType))
return ret
def toVTK2(self, fileName, format='ascii'):
"""
Save the instance as Unstructured Grid in VTK2 format (``.vtk``).
:param str fileName: where to save
:param str format: one of ``ascii`` or ``binary``
"""
self.field2VTKData().tofile(filename=fileName, format=format)
@staticmethod
def makeFromVTK2(fileName, unit, time=0, skip=['coolwarm']):
"""
Return fields stored in *fileName* in the VTK2 (``.vtk``) format.
:param str fileName: filename to load from
:param PhysicalUnit unit: physical unit of filed values
:param float time: time value for created fields (time is not saved in VTK2, thus cannot be recovered)
:param [string,] skip: file names to be skipped when reading the input file; the default value skips the default coolwarm colormap.
:returns: one field from VTK
:rtype: Field
"""
import pyvtk
from .dataID import FieldID
if not fileName.endswith('.vtk'):
log.warning('Field.makeFromVTK2: fileName should end with .vtk, you may get in trouble (proceeding).')
ret = []
try:
data = pyvtk.VtkData(fileName) # this is where reading the file happens (inside pyvtk)
except NotImplementedError:
log.info('pyvtk fails to open (binary?) file "%s", trying through vtk.vtkGenericDataReader.' % fileName)
return Field.makeFromVTK3(fileName, time=time, units=unit, forceVersion2=True)
ugr = data.structure
if not isinstance(ugr, pyvtk.UnstructuredGrid):
raise NotImplementedError(
"grid type %s is not handled by mupif (only UnstructuredGrid is)." % ugr.__class__.__name__)
mesh = Mesh.UnstructuredMesh.makeFromPyvtkUnstructuredGrid(ugr)
# get cell and point data
pd, cd = data.point_data.data, data.cell_data.data
for dd, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for d in dd:
# will raise KeyError if fieldID with that name is not defined
if d.name in skip:
continue
fid = FieldID[d.name]
# determine the number of components using the expected number of values from the mesh
expectedNumVal = (mesh.getNumberOfVertices() if fieldType == FieldType.FT_vertexBased else mesh.getNumberOfCells())
nc = len(d.scalars)//expectedNumVal
valueType = ValueType.fromNumberOfComponents(nc)
values = [d.scalars[i*nc:i*nc+nc] for i in range(len(d.scalars))]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=unit, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def toVTK3(self, fileName, **kw):
"""
Save the instance as Unstructured Grid in VTK3 format (``.vtu``). This is a simple proxy for calling :obj:`manyToVTK3` with the instance as the only field to be saved. If multiple fields with identical mesh are to be saved in VTK3, use :obj:`manyToVTK3` directly.
:param fileName: output file name
:param ``**kw``: passed to :obj:`manyToVTK3`
"""
return self.manyToVTK3([self], fileName, **kw)
@staticmethod
def manyToVTK3(fields, fileName, ascii=False, compress=True):
"""
Save all fields passed as argument into VTK3 Unstructured Grid file (``*.vtu``).
All *fields* must be defined on the same mesh object; exception will be raised if this is not the case.
:param list of Field fields:
:param fileName: output file name
:param bool ascii: write numbers are ASCII in the XML-based VTU file (rather than base64-encoded binary in XML)
:param bool compress: apply compression to the data
"""
import vtk
if not fields:
raise ValueError('At least one field must be passed.')
# check if all fields are defined on the same mesh
if len(set([f.mesh for f in fields])) != 1:
raise RuntimeError(
'Not all fields are sharing the same Mesh object (and could not be saved to a single .vtu file')
# convert mesh to VTK UnstructuredGrid
mesh = fields[0].getMesh()
vtkgrid = mesh.asVtkUnstructuredGrid()
# add fields as arrays
for f in fields:
arr = vtk.vtkDoubleArray()
arr.SetNumberOfComponents(f.getRecordSize())
arr.SetName(f.getFieldIDName())
assert f.getFieldType() in (FieldType.FT_vertexBased, FieldType.FT_cellBased) # other future types not handled
if f.getFieldType() == FieldType.FT_vertexBased:
nn = mesh.getNumberOfVertices()
else:
nn = mesh.getNumberOfCells()
arr.SetNumberOfValues(nn)
for i in range(nn):
arr.SetTuple(i, f.giveValue(i))
if f.getFieldType() == FieldType.FT_vertexBased:
vtkgrid.GetPointData().AddArray(arr)
else:
vtkgrid.GetCellData().AddArray(arr)
# write the unstructured grid to file
writer = vtk.vtkXMLUnstructuredGridWriter()
if compress:
writer.SetCompressor(vtk.vtkZLibDataCompressor())
if ascii:
writer.SetDataModeToAscii()
writer.SetFileName(fileName)
# change between VTK5 and VTK6
if vtk.vtkVersion().GetVTKMajorVersion() == 6:
writer.SetInputData(vtkgrid)
else:
writer.SetInputData(vtkgrid)
writer.Write()
# finito
@staticmethod
def makeFromVTK3(fileName, units, time=0, forceVersion2=False):
"""
Create fields from a VTK unstructured grid file (``.vtu``, format version 3, or ``.vtp`` with *forceVersion2*); the mesh is shared between fields.
``vtk.vtkXMLGenericDataObjectReader`` is used to open the file (unless *forceVersion2* is set), but it is checked that contained dataset is a ``vtk.vtkUnstructuredGrid`` and an error is raised if not.
.. note:: Units are not supported when loading from VTK, all fields will have ``None`` unit assigned.
:param str fileName: VTK (``*.vtu``) file
:param PhysicalUnit units: units of read values
:param float time: time value for created fields (time is not saved in VTK3, thus cannot be recovered)
:param bool forceVersion2: if ``True``, ``vtk.vtkGenericDataObjectReader`` (for VTK version 2) will be used to open the file, isntead of ``vtk.vtkXMLGenericDataObjectReader``; this also supposes *fileName* ends with ``.vtk`` (not checked, but may cause an error).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
"""
import vtk
from .dataID import FieldID
# rr=vtk.vtkXMLUnstructuredGridReader()
if forceVersion2 or fileName.endswith('.vtk'):
rr = vtk.vtkGenericDataObjectReader()
else:
rr = vtk.vtkXMLGenericDataObjectReader()
rr.SetFileName(fileName)
rr.Update()
ugrid = rr.GetOutput()
if not isinstance(ugrid, vtk.vtkUnstructuredGrid):
raise RuntimeError("vtkDataObject read from '%s' must be a vtkUnstructuredGrid (not a %s)" % (
fileName, ugrid.__class__.__name__))
# import sys
# sys.stderr.write(str((ugrid,ugrid.__class__,vtk.vtkUnstructuredGrid)))
# make mesh -- implemented separately
mesh = Mesh.UnstructuredMesh.makeFromVtkUnstructuredGrid(ugrid)
# fields which will be returned
ret = []
# get cell and point data
cd, pd = ugrid.GetCellData(), ugrid.GetPointData()
for data, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for idata in range(data.GetNumberOfArrays()):
aname, arr = pd.GetArrayName(idata), pd.GetArray(idata)
nt = arr.GetNumberOfTuples()
if nt == 0:
raise RuntimeError("Zero values in field '%s', unable to determine value type." % aname)
t0 = arr.GetTuple(0)
valueType = ValueType.fromNumberOfComponents(len(arr.GetTuple(0)))
# this will raise KeyError if fieldID with that name not defined
fid = FieldID[aname]
# get actual values as tuples
values = [arr.GetTuple(t) for t in range(nt)]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=units, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def _sum(self, other, sign1, sign2):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to modify the field values.
"""
raise TypeError('Not supported')
def inUnitsOf(self, *units):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to use convertToUnits method
performing in place conversion.
"""
raise TypeError('Not supported')
# def __deepcopy__(self, memo):
# """ Deepcopy operatin modified not to include attributes starting with underscore.
# These are supposed to be the ones valid only to s specific copy of the receiver.
# An example of these attributes are _PyroURI (injected by Application),
# where _PyroURI contains the URI of specific object, the copy should receive
# its own URI
# """
# cls = self.__class__
# dpcpy = cls.__new__(cls)
#
# memo[id(self)] = dpcpy
# for attr in dir(self):
# if not attr.startswith('_'):
# value = getattr(self, attr)
# setattr(dpcpy, attr, copy.deepcopy(value, memo))
# return dpcpy
| lgpl-3.0 | 7,138,186,339,437,092,000 | 43.094008 | 381 | 0.597896 | false | 4.202324 | false | false | false |
unisport/thumblr | thumblr/tasks.py | 1 | 1681 |
from django.conf import settings
from celery import Celery, Task
from raven import Client
import usecases
client = Client(settings.SENTRY_DSN)
celery = Celery('tasks')
celery.conf.update(
AWS_ACCESS_KEY_ID=settings.AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY=settings.AWS_SECRET_ACCESS_KEY,
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'],
CELERY_RESULT_SERIALIZER='json',
BROKER_URL="sqs://%s:%s@" % (settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY),
CELERY_RESULT_BACKEND="redis",
CELERY_TIMEZONE='Europe/Copenhagen',
BROKER_TRANSPORT_OPTIONS={'region': 'eu-west-1',
'polling_interval': 0.3,
'visibility_timeout': 3600,
'queue_name_prefix': 'catalog_products_'},
)
class ImagesCallbackTask(Task):
"""
Generic subclass for Product Image Processing tasks
so in case of of failure, a notification is sent to Sentry.
"""
# def on_success(self, retval, task_id, args, kwargs):
# pass
def on_failure(self, exc, task_id, args, kwargs, einfo):
# client.captureMessage('Task "%s" has failed miserably.' % task_id)
client.capture('raven.events.Message', message='Task "%s" has failed miserably.' % task_id,
data={},
extra={'exc': exc,
'Task ID': task_id,
'Args': args,
'Kwargs': kwargs,
'einfo': einfo
}
)
usecases.add_image = celery.task(usecases.add_image) | mit | -463,160,966,042,746,240 | 33.326531 | 100 | 0.558001 | false | 3.803167 | false | false | false |
guaka/trust-metrics | trustlet/pymmetry/file_certs.py | 1 | 9725 | #!/usr/bin/env python
""" file_certs.py: File-based Trust Metric Profiles (example code)
Copyright (C) 2001 Luke Kenneth Casson Leighton <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
File-based Profiles on which certifications (also file-based) can
be stored and retrieved for evaluation by Trust Metrics.
...with NO LOCKING! ...yet.
unfortunately, type info of non-string profile names
is lost on the [very basic] file-format. so, whilst
the trust metric and net flow evaluation code couldn't
care less what the type of its nodes is, the file
storage does.
*shrug*. who wants to be a number, anyway.
WARNING: there is a lot of class-context overloading
in this demonstration code, particularly DictCertifications
and FileCertifications get reused rather inappropriately.
... but it will do, as a simple example. [i'll get round
to doing a SQL and an XML one, later, properly].
"""
from certs import DictCertifications, CertInfo
from profile import Profile
from string import join
from os import makedirs, path
# deal with having to store strings as text. *sigh*
def unsafe_str(s):
s = s.strip()
if s[0] != "'" and s[0] != '"':
# paranoia. don't want code from file evaluated!
# if someone edits a file and removes the first
# quote but not the second, TOUGH.
s = '"""'+s+'"""'
return eval(s)
# yes, we _do_ want the quotes.
# they get removed by unsafe_str, above, on retrieval.
def safe_str(s):
return repr(str(s))
class FileCertifications(DictCertifications):
""" Certification file of format:
certname1: user1=level1, user2=level2, ...
certname2: user1=level1, user2=level2, ...
"""
def set_filename(self, file):
self.f = file
try:
p, f = path.split(file)
makedirs(p)
except:
pass
def __read_dict(self):
self.info = {}
try:
f = open(self.f,"rw")
except:
return
for l in f.readlines():
l = l.strip()
if len(l) == 0:
continue
[ftype, certs] = l.split(":")
ftype = unsafe_str(ftype)
certs = certs.split(",")
for cert in certs:
[fname, flevel] = cert.split("=")
l = unsafe_str(flevel)
fn = unsafe_str(fname)
DictCertifications.add(self, ftype, fn, l)
f.close()
def __write_dict(self):
f = open(self.f,"w")
for key in DictCertifications.cert_keys(self):
l = safe_str(key)+": "
certs = []
dict = DictCertifications.certs_by_type(self, key)
for c in dict.keys():
certs.append(safe_str(c)+"="+safe_str(dict[c]))
l += join(certs, ", ") + "\n"
f.write(l)
f.close()
def cert_keys(self):
self.__read_dict()
return DictCertifications.cert_keys(self)
def certs_by_type(self, type):
self.__read_dict()
return DictCertifications.certs_by_type(self, type)
def cert_type_keys(self, type, name):
self.__read_dict()
return DictCertifications.certs_type_keys(self, type, name)
def add(self, type, name, level):
self.__read_dict()
DictCertifications.add(self, type, name, level)
self.__write_dict()
def remove(self, type, name):
self.__read_dict()
DictCertifications.remove(self, type, name, level)
self.__write_dict()
def cert_level(self, type, name):
self.__read_dict()
return DictCertifications.cert_level(self, type, name)
class FileProfile(Profile):
def __init__(self, name, CertClass):
Profile.__init__(self, name, CertClass)
self._certs_by_subj.set_filename("users/"+str(name)+"/certs.subj")
self._certs_by_issuer.set_filename("users/"+str(name)+"/certs.issuer")
# overload meaning of FileCertifications here to store user-profile.
self.info = FileCertifications()
self.info.set_filename("users/"+str(name)+"/profile")
def set_filename(self, file):
self.info.set_filename(file)
def info_keys(self):
return self.info.cert_keys()
def infos_by_type(self, type):
return self.info.certs_by_type(type)
def info_type_keys(self, type, name):
return self.info.certs_type_keys(type, name)
def add(self, type, name, level):
self.info.add(type, name, level)
def remove(self, type, name):
self.info.remove(type, name, level)
def info_index(self, type, name):
return self.info.cert_level(type, name)
class FileCertInfo(CertInfo):
""" This is probably some of the clumsiest code ever written.
overload DictCertification - because it's been a really
good, lazy weekend, to store an unordered list (seeds),
an ordered list (levels) etc.
yuck. please, someone shoot me or do a better job,
_esp._ for example code.
"""
def cert_seeds(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
# clumsy usage of a dictionary as an unordered list. argh.
d = d.certs_by_type("seeds")
return d.keys()
def cert_levels(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
dict = d.certs_by_type("levels")
# clumsy usage of a dictionary into an ordered list. argh.
keys = dict.keys()
l = [None] * len(keys)
for idx in keys:
l[int(idx)] = dict[idx]
return l
def cert_level_default(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
[d] = d.certs_by_type("default level").keys()
return d
def cert_level_min(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
[d] = d.certs_by_type("min level").keys()
return d
def cert_tmetric_type(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
[d] = d.certs_by_type("type").keys()
return d
def add_cert_seed(self, idxn, seed):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
# clumsy usage of a dictionary as an unordered list. argh.
return d.add("seeds", seed, None)
def add_cert_level(self, idxn, level, index):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
# clumsy usage of a dictionary as an index-ordered list. argh.
return d.add("levels", index, level)
def set_cert_level_default(self, idxn, dflt_level):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
return d.add("default level", dflt_level, None)
def set_cert_level_min(self, idxn, min_level):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
return d.add("min level", min_level, None)
def set_cert_tmetric_type(self, idxn, type):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
return d.add("type", type, None)
def test():
from profile import Profiles
from tm_calc import PymTrustMetric
from pprint import pprint
f = FileCertInfo()
f.add_cert_seed('like', '55')
f.add_cert_seed('like', 'luke')
f.add_cert_level('like', 'none', 0)
f.add_cert_level('like', "don't care", 1)
f.add_cert_level('like', 'good', 2)
f.add_cert_level('like', 'best', 3)
f.set_cert_level_default('like', "don't care")
f.set_cert_level_min('like', 'none')
f.set_cert_tmetric_type('like', 'to')
f.add_cert_seed('hate', 'heather')
f.add_cert_seed('hate', '10')
f.add_cert_level('hate', 'none', 0)
f.add_cert_level('hate', "don't care", 1)
f.add_cert_level('hate', 'dislike', 2)
f.add_cert_level('hate', 'looks CAN kill', 3)
f.set_cert_level_default('hate', "don't care")
f.set_cert_level_min('hate', 'none')
f.set_cert_tmetric_type('hate', 'to')
p = Profiles(FileProfile, FileCertifications)
r = p.add_profile('luke')
r.add("name", 0, "luke")
r.add("name", 1, "kenneth")
r.add("name", 2, "casson")
r.add("name", 3, "leighton")
r.add("info", 0, "likes python a lot - thinks it's really cool")
r.add("info", 1, "groks network traffic like he has a built-in headsocket")
p.add_profile('heather')
p.add_profile('bob')
p.add_profile('mary')
p.add_profile('lesser fleas')
p.add_profile('little fleas')
p.add_profile('fleas')
p.add_profile('robbie the old crock pony')
p.add_profile('tart the flat-faced persian cat')
p.add_profile('mo the mad orange pony')
p.add_profile('55')
p.add_profile('10')
p.add_profile('2')
p.add_profile('fleas ad infinitum')
p.add_cert('luke', 'like', 'heather', 'best')
p.add_cert('heather', 'like', 'luke', 'best')
p.add_cert('heather', 'like', 'robbie the old crock pony', 'best')
p.add_cert('heather', 'like', 'tart the flat-faced persian cat', 'best')
p.add_cert('heather', 'like', 'mo the mad orange pony', 'best' )
p.add_cert('bob', 'like', 'mary', 'good')
p.add_cert('bob', 'like', 'heather', 'good')
p.add_cert('mary', 'like', 'bob', 'good')
p.add_cert('fleas', 'like', 'little fleas', 'good')
p.add_cert('little fleas', 'like', 'lesser fleas', 'best')
p.add_cert('lesser fleas', 'like', 'fleas ad infinitum', 'best')
p.add_cert('robbie the old crock pony', 'like', 'fleas', 'best')
p.add_cert('55', 'like', '10', 'none')
p.add_cert('10', 'like', '2', 'best')
p.add_cert('heather', 'hate', 'bob', 'dislike' )
p.add_cert('heather', 'hate', 'fleas', 'looks CAN kill' )
p.add_cert('fleas', 'hate', 'mary', 'dislike')
p.add_cert('10', 'hate', '55', 'looks CAN kill')
t = PymTrustMetric(f, p)
r = t.tmetric_calc('like')
pprint(r)
r = t.tmetric_calc('like', ['heather'])
pprint(r)
r = t.tmetric_calc('hate')
pprint(r)
if __name__ == '__main__':
test()
| gpl-2.0 | -2,231,080,821,749,822,500 | 27.943452 | 77 | 0.668072 | false | 2.744074 | false | false | false |
natduca/ndbg | util/vec2.py | 1 | 3522 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
class vec2(object):
def __init__(self, opt_a=None,opt_b=None):
if opt_a != None and opt_b != None:
self.x = float(opt_a)
self.y = float(opt_b)
elif opt_a != None:
self.x = float(opt_a.x)
self.y = float(opt_a.y)
else:
self.x = 0
self.y = 0
def set(self,a,opt_b=None):
if opt_b != None:
self.x = float(a)
self.y = float(opt_b)
else:
self.x = float(a.x)
self.y = float(a.y)
def __str__(self):
return "(%f,%f)" % (self.x,self.y)
def vec2_add(a,b):
dst = vec2()
dst.x = a.x + b.x
dst.y = a.y + b.y
return dst
def vec2_accum(a,b):
a.x += b.x
a.y += b.y
return a
def vec2_sub(a,b):
dst = vec2()
dst.x = a.x - b.x
dst.y = a.y - b.y
return dst
def vec2_neg_accum(a,b):
a.x -= b.x
a.y -= b.y
return a
def vec2_scale(a,scale):
dst = vec2()
dst.x = a.x * scale
dst.y = a.y * scale
return dst
def vec2_scale_inplace(a,scale):
a.x *= scale
a.y *= scale
return a
def vec2_piecewise_mul(a,b):
dst = vec2()
dst.x = a.x * b.x
dst.y = a.y * b.y
return dst
def vec2_piecewise_div(a,b):
dst = vec2()
dst.x = a.x / b.x
dst.y = a.y / b.y
return dst
def vec2_dot(a,b):
return a.x * b.x + a.y * b.y
def vec2_length(a):
return math.sqrt(vec2_dot(a,a))
def vec2_length_sqared(a):
return vec2_dot(a,a)
def vec2_normalize(a):
s = 1/vec2_length(a)
return vec2_scale(a,s)
def vec2_normalize_inplace(dst):
s = 1/vec2_length(dst)
dst.x *= s
dst.y *= s
return dst
def vec2_interp(a,b,factor):
delta = vec2_sub(b,a)
vec2_scale_inplace(delta,factor)
vec2_accum(delta,a)
return delta
def vec2_distance(a,b):
return vec2_length(vec2_sub(b,a))
class rect(object):
def __init__(self,opt_a=None,opt_b=None,centered=False):
if opt_a and opt_b:
self.pos = vec2(opt_a)
self.size = vec2(opt_b)
elif opt_a == None and opt_b == None:
self.pos = vec2(0,0)
self.size = vec2(0,0)
else:
raise Exception("Need two args or none")
if centered:
hsize = vec2_scale(self.size,0.5)
self.pos = vec2_sub(self.pos,hsize)
def contains(self,v):
return v.x >= self.pos.x and v.x < self.pos.x + self.size.x and v.y >= self.pos.y and v.y < self.pos.y + self.size.y
###########################################################################
class ivec2(object):
def __init__(self, opt_a=None,opt_b=None):
if opt_a != None and opt_b != None:
self.x = int(opt_a)
self.y = int(opt_b)
elif opt_a != None:
self.x = int(opt_a.x)
self.y = int(opt_a.y)
else:
self.x = 0
self.y = 0
def set(self,a,opt_b=None):
if opt_b != None:
self.x = int(a)
self.y = int(opt_b)
else:
self.x = int(a.x)
self.y = int(a.y)
def __str__(self):
return "(%i,%i)" % (self.x,self.y)
| apache-2.0 | 5,074,446,273,003,439,000 | 20.47561 | 121 | 0.561045 | false | 2.676292 | false | false | false |
KT26/PythonCourse | 8. Class/8.py | 1 | 1268 | # Created by PyCharm Pro Edition
# User: Kaushik Talukdar
# Date: 24-04-17
# Time: 12:29 AM
# INHERITANCE
# We can create a new class, but instead of writing it from scratch, we can base it on an existing class.
# Lets understand inheritance better with an example
class Car():
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
self.mileage = 0
def get_descriptive_name(self):
full_name = self.make.title() + ' ' + self.model.title() + ' ' + str(self.year)
return full_name
def update_odometer(self, mileage):
self.mileage = mileage
# the class below is an inherited class derived from Cars and have access to Car's variables as well as methods
# The parent class name must appear in parenthesis in child class for Inheritance to work
# the super() method is responsible for providing the child class with all the variables and methods of parent class
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year)
my_car = ElectricCar('Tesla', 'Model S', '2017')
car = my_car.get_descriptive_name()
print(car)
| mit | -6,550,018,117,725,046,000 | 31.368421 | 116 | 0.630126 | false | 3.612536 | false | false | false |
Faeriol/news-summarizer | summarizer.py | 1 | 3274 | import os
from goose3 import Goose
from selenium import webdriver
from selenium.common.exceptions import UnexpectedAlertPresentException, SessionNotCreatedException, WebDriverException
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
os.environ['MOZ_HEADLESS'] = '1' # Should be moved out
LANGUAGE = "english" # Should be config option
class NotEnoughContent(Exception):
def __init__(self, url: str) -> None:
super().__init__("Not enough content for: {}".format(url))
class InvalidContent(Exception):
def __init__(self, url: str) -> None:
super().__init__("Content appears invalid for: {}".format(url))
class BrowserSummarizer(object):
def __init__(self, language: str, sentence_count: int) -> None:
self.language = language
self.sentence_count = sentence_count
self.browser = None
self.goose = Goose({"enable_image_fetching": False})
self.stemmer = Stemmer(language)
self.tokenizer = Tokenizer(language)
self.summarizer = Summarizer(self.stemmer)
self.summarizer.stop_words = get_stop_words(language)
def init(self) -> None:
if self.browser:
self.done()
self.browser = webdriver.Firefox()
def __enter__(self):
self.init()
return self
def __exit__(self, *args):
self.done()
def _blank(self):
"""
Empty browser, do not kill instance
"""
try:
self.browser.get("about:blank")
except UnexpectedAlertPresentException:
self.browser.switch_to.alert()
self.browser.switch_to.alert().dismiss()
def parse_url(self, url: str) -> (str, str):
"""
Parse retrieve the given url and parse it.
:param url: The URL to parse
:return: The resolved URL, the parsed content
"""
try:
self.browser.get(url)
except UnexpectedAlertPresentException:
self.browser.switch_to.alert()
self.browser.switch_to.alert().dismiss()
self.browser.get(url)
except WebDriverException:
raise InvalidContent(url)
try: # Move around any alerts
self.browser.switch_to.alert()
self.browser.switch_to.alert().dismiss()
except Exception:
pass
try:
contents = self.goose.extract(raw_html=self.browser.page_source)
cleaned_url = self.browser.current_url
except IndexError:
raise InvalidContent(url)
finally:
self._blank()
parser = PlaintextParser.from_string(contents.cleaned_text, self.tokenizer)
sentences = self.summarizer(parser.document, self.sentence_count)
if len(sentences) < self.sentence_count:
raise NotEnoughContent(url)
return cleaned_url, " ".join(str(sentence) for sentence in sentences)
def done(self) -> None:
self.browser.close()
try:
self.browser.quit()
except SessionNotCreatedException:
pass
self.browser = None
| mit | -2,505,183,108,841,201,700 | 32.408163 | 118 | 0.626145 | false | 4.082294 | false | false | false |
corerd/PyDomo | powerman/pwrmonitor.py | 1 | 6768 | #!/usr/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import logging
import json
import inspect
from sys import stderr
from time import strftime
from datetime import datetime
from os.path import dirname, join
from apiclient import errors
from traceback import format_exc
from powerman.upower import UPowerManager
from cloud.upload import upload_datastore
from cloud.googleapis.gmailapi import gmSend
from cloud.cloudcfg import ConfigDataLoad, checkDatastore
# Globals
VERSION = '1.0'
VERSION_DATE = '2019'
# Claud configuration file get from cloud package
DEFAULT_CFG_FILE = 'cloudcfg.json'
DEFAULT_CFG_FILE_PATH = join(dirname(inspect.getfile(ConfigDataLoad)),
DEFAULT_CFG_FILE)
# Power supply type IDs
PSU_UNKNOWN = -1
PSU_AC = 0
PSU_BATTERY = 1
# Power supply type string description
PSU_AC_DESC = "AC_ADAPTER"
PSU_BATTERY_DESC = "BATTERY"
# Files keeping power supply state
LOG_FILE = 'pwrmonitor-log.txt'
PSU_TYPE_FILE = 'pwrmonitor.json'
DEFAULT_PSU_CFG = \
{
'power-supply': 'UNKNOWN'
}
USAGE = '''Check power supply type and if it is switched to battery
then send an email alert message from the user's Gmail account.
Data are logged in CSV format: datetime;city;temperature
Email address of the receiver and datastore path are taken from a configuration
file in JSON format.
If none is given, the configuration is read from the file:
%s
''' % DEFAULT_CFG_FILE_PATH
def print_error(msg):
print('%s;%s' % (strftime("%Y-%m-%d %H:%M:%S"), msg), file=stderr)
def psu_type_getFromCfg(cfg_data):
"""Get the power supply type from configuration data
Args:
cfg_data: PSU configuration data
Returns:
PSU_UNKNOWN
PSU_AC
PSU_BATTERY
"""
psu_type_desc = cfg_data['power-supply']
if psu_type_desc == PSU_BATTERY_DESC:
return PSU_BATTERY
elif psu_type_desc == PSU_AC_DESC:
return PSU_AC
return PSU_UNKNOWN
def psu_type_getFromDevice():
"""Get the power supply type from UPowerManager
Returns:
PSU_AC
PSU_BATTERY
"""
pwrMan = UPowerManager()
# Get the Devices List searching for a battery
battery_device = None
for dev in pwrMan.detect_devices():
if 'battery' in dev:
battery_device = dev
break
if not battery_device:
# no battery device found:
# power supply is external
return PSU_AC
if 'discharg' in pwrMan.get_state(battery_device).lower():
# The battery power allowd states:
# "Unknown"
# "Loading" (that is Charging)
# "Discharging"
# "Empty"
# "Fully charged"
# "Pending charge"
# "Pending discharge"
return PSU_BATTERY
return PSU_AC
def alert_send(to, message_text):
"""Send an alert email message from the user's account
to the email address get from the configuration file.
Args:
to: Email address of the receiver.
message_text: The text of the alert message.
Returns:
Success.
"""
subject = 'PSU Alert at ' + datetime.now().strftime("%d-%m-%Y %H:%M:%S")
success = -1
try:
gmSend(to, subject, message_text)
except errors.HttpError as e:
logging.error('HttpError occurred: %s' % e)
except Exception:
logging.error(format_exc())
else:
logging.info(message_text)
success = 0
return success
def main():
print('pwrmonitor v%s - (C) %s' % (VERSION, VERSION_DATE))
# get the configuration data
try:
cloud_cfg = ConfigDataLoad(DEFAULT_CFG_FILE_PATH)
except Exception as e:
print_error('cloud configuration: unable to load %s' % DEFAULT_CFG_FILE_PATH)
print_error('cloud configuration exception: %s' % type(e).__name__)
print_error('cloud configuration: %s' % str(e))
return -1
try:
log_file = join(cloud_cfg.data['datastore'], LOG_FILE)
except KeyError:
print_error("Keyword 'datastore' not found in file %s" %
DEFAULT_CFG_FILE_PATH)
return -1
try:
receiver_address = cloud_cfg.data['alert-receiver-address']
except KeyError:
print_error("Keyword 'alert-receiver-address' not found in file %s" %
DEFAULT_CFG_FILE_PATH)
return -1
# logger setup
if checkDatastore(log_file) is not True:
print_error("Cannot access %s directory" % cloud_cfg.data['datastore'])
return -1
logging.basicConfig(filename=log_file,
format='%(asctime)s;%(levelname)s;%(message)s',
level=logging.DEBUG)
# check PSU type
psu_switch2battery = 0
psu_cfg_file = join(cloud_cfg.data['datastore'], PSU_TYPE_FILE)
psu_cfg = ConfigDataLoad(psu_cfg_file, DEFAULT_PSU_CFG)
psu_type_prev = psu_type_getFromCfg(psu_cfg.data)
psu_type_current = psu_type_getFromDevice()
if psu_type_current != psu_type_prev:
if psu_type_current == PSU_BATTERY:
psu_type_desc = PSU_BATTERY_DESC
else:
psu_type_desc = PSU_AC_DESC
logging.info('power supply switched to {}'.format(psu_type_desc))
psu_cfg.data['power-supply'] = psu_type_desc
psu_cfg.update()
if psu_type_current == PSU_BATTERY:
psu_switch2battery = 1
logging.debug('send alert')
alert_send(receiver_address, 'AC power adapter has been unplugged.')
upload_datastore(cloud_cfg.data['datastore'])
return psu_switch2battery
if __name__ == "__main__":
exit(main())
| mit | -1,850,814,616,450,463,200 | 30.18894 | 85 | 0.655585 | false | 3.644588 | true | false | false |
jolyonb/edx-platform | common/lib/chem/chem/miller.py | 1 | 9303 | """ Calculation of Miller indices """
from __future__ import absolute_import
import decimal
import fractions as fr
import json
import math
import numpy as np
from six.moves import map
from six.moves import range
from functools import reduce
def lcm(a, b):
"""
Returns least common multiple of a, b
Args:
a, b: floats
Returns:
float
"""
return a * b / fr.gcd(a, b)
def segment_to_fraction(distance):
"""
Converts lengths of which the plane cuts the axes to fraction.
Tries convert distance to closest nicest fraction with denominator less or
equal than 10. It is
purely for simplicity and clearance of learning purposes. Jenny: 'In typical
courses students usually do not encounter indices any higher than 6'.
If distance is not a number (numpy nan), it means that plane is parallel to
axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is
returned
Generally (special cases):
a) if distance is smaller than some constant, i.g. 0.01011,
than fraction's denominator usually much greater than 10.
b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane,
But if he will slightly move the mouse and click on 0.65 -> it will be
(16,15,16) plane. That's why we are doing adjustments for points coordinates,
to the closest tick, tick + tick / 2 value. And now UI sends to server only
values multiple to 0.05 (half of tick). Same rounding is implemented for
unittests.
But if one will want to calculate miller indices with exact coordinates and
with nice fractions (which produce small Miller indices), he may want shift
to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero
in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin.
In this way he can receive nice small fractions. Also there is can be
degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) -
it is a line. This case should be considered separately. Small nice Miller
numbers and possibility to create very small segments can not be implemented
at same time).
Args:
distance: float distance that plane cuts on axis, it must not be 0.
Distance is multiple of 0.05.
Returns:
Inverted fraction.
0 / 1 if distance is nan
"""
if np.isnan(distance):
return fr.Fraction(0, 1)
else:
fract = fr.Fraction(distance).limit_denominator(10)
return fr.Fraction(fract.denominator, fract.numerator)
def sub_miller(segments):
'''
Calculates Miller indices from segments.
Algorithm:
1. Obtain inverted fraction from segments
2. Find common denominator of inverted fractions
3. Lead fractions to common denominator and throws denominator away.
4. Return obtained values.
Args:
List of 3 floats, meaning distances that plane cuts on x, y, z axes.
Any float not equals zero, it means that plane does not intersect origin,
i. e. shift of origin has already been done.
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
'''
fracts = [segment_to_fraction(segment) for segment in segments]
common_denominator = reduce(lcm, [fract.denominator for fract in fracts])
miller_indices = ([
fract.numerator * math.fabs(common_denominator) / fract.denominator
for fract in fracts
])
return'(' + ','.join(map(str, list(map(decimal.Decimal, miller_indices)))) + ')'
def miller(points):
"""
Calculates Miller indices from points.
Algorithm:
1. Calculate normal vector to a plane that goes trough all points.
2. Set origin.
3. Create Cartesian coordinate system (Ccs).
4. Find the lengths of segments of which the plane cuts the axes. Equation
of a line for axes: Origin + (Coordinate_vector - Origin) * parameter.
5. If plane goes trough Origin:
a) Find new random origin: find unit cube vertex, not crossed by a plane.
b) Repeat 2-4.
c) Fix signs of segments after Origin shift. This means to consider
original directions of axes. I.g.: Origin was 0,0,0 and became
new_origin. If new_origin has same Y coordinate as Origin, then segment
does not change its sign. But if new_origin has another Y coordinate than
origin (was 0, became 1), than segment has to change its sign (it now
lies on negative side of Y axis). New Origin 0 value of X or Y or Z
coordinate means that segment does not change sign, 1 value -> does
change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1
6. Run function that calculates miller indices from segments.
Args:
List of points. Each point is list of float coordinates. Order of
coordinates in point's list: x, y, z. Points are different!
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
"""
N = np.cross(points[1] - points[0], points[2] - points[0])
O = np.array([0, 0, 0])
P = points[0] # point of plane
Ccs = list(map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]]))
segments = ([
np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0
else np.nan for ort in Ccs
])
if any(x == 0 for x in segments): # Plane goes through origin.
vertices = [
# top:
np.array([1.0, 1.0, 1.0]),
np.array([0.0, 0.0, 1.0]),
np.array([1.0, 0.0, 1.0]),
np.array([0.0, 1.0, 1.0]),
# bottom, except 0,0,0:
np.array([1.0, 0.0, 0.0]),
np.array([0.0, 1.0, 0.0]),
np.array([1.0, 1.0, 1.0]),
]
for vertex in vertices:
if np.dot(vertex - O, N) != 0: # vertex not in plane
new_origin = vertex
break
# obtain new axes with center in new origin
X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]])
Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]])
Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]])
new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin]
segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if
np.dot(ort, N) != 0 else np.nan for ort in new_Ccs])
# fix signs of indices: 0 -> 1, 1 -> -1 (
segments = (1 - 2 * new_origin) * segments
return sub_miller(segments)
def grade(user_input, correct_answer):
'''
Grade crystallography problem.
Returns true if lattices are the same and Miller indices are same or minus
same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only
on student's selection of origin.
Args:
user_input, correct_answer: json. Format:
user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"],
["0.78","1.00","0.00"],["0.00","1.00","0.72"]]}
correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'}
"lattice" is one of: "", "sc", "bcc", "fcc"
Returns:
True or false.
'''
def negative(m):
"""
Change sign of Miller indices.
Args:
m: string with meaning of Miller indices. E.g.:
(-6,3,-6) -> (6, -3, 6)
Returns:
String with changed signs.
"""
output = ''
i = 1
while i in range(1, len(m) - 1):
if m[i] in (',', ' '):
output += m[i]
elif m[i] not in ('-', '0'):
output += '-' + m[i]
elif m[i] == '0':
output += m[i]
else:
i += 1
output += m[i]
i += 1
return '(' + output + ')'
def round0_25(point):
"""
Rounds point coordinates to closest 0.5 value.
Args:
point: list of float coordinates. Order of coordinates: x, y, z.
Returns:
list of coordinates rounded to closes 0.5 value
"""
rounded_points = []
for coord in point:
base = math.floor(coord * 10)
fractional_part = (coord * 10 - base)
aliquot0_25 = math.floor(fractional_part / 0.25)
if aliquot0_25 == 0.0:
rounded_points.append(base / 10)
if aliquot0_25 in (1.0, 2.0):
rounded_points.append(base / 10 + 0.05)
if aliquot0_25 == 3.0:
rounded_points.append(base / 10 + 0.1)
return rounded_points
user_answer = json.loads(user_input)
if user_answer['lattice'] != correct_answer['lattice']:
return False
points = [list(map(float, p)) for p in user_answer['points']]
if len(points) < 3:
return False
# round point to closes 0.05 value
points = [round0_25(point) for point in points]
points = [np.array(point) for point in points]
# print miller(points), (correct_answer['miller'].replace(' ', ''),
# negative(correct_answer['miller']).replace(' ', ''))
if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')):
return True
return False
| agpl-3.0 | 2,638,268,032,322,332,000 | 32.584838 | 122 | 0.5868 | false | 3.622664 | false | false | false |
s5brown/MLfeatures | assignment_1_Sebastian_Brown.py | 1 | 3964 | """Assignment 1."""
from assignment_1_eval import pairs
vowels = ['a', 'e', 'i', 'o', 'u']
es_sounds = ['ch', 's', 'z']
no_change = [
'economics', 'mathematics', 'statistics', 'luggage',
'baggage', 'furniture', 'information', 'gymnastics', 'news']
always_singular = ['fish', 'barracks', 'deer', 'sheep']
def pluralize(sg):
"""Return list of plural form(s) of input_word.
Building this function is the purpose of Assignment 1.
The most basic case is already provided.
"""
# print('Enter word to be made plural: ')
plurals = []
if sg in no_change:
plurals.append('')
elif sg in always_singular:
plurals.append(sg)
elif sg == 'tooth':
plurals.append('teeth')
elif sg == 'goose':
plurals.append('geese')
elif sg == 'child':
plurals.append('children')
elif sg == 'foot':
plurals.append('feet')
elif sg == 'man':
plurals.append('men')
elif sg == 'woman':
plurals.append('women')
elif sg == 'person':
plurals.append('people')
elif sg == 'mouse':
plurals.append('mice')
elif sg == 'corpus':
plurals.append(sg.replace(sg[-2:], 'ora'))
elif sg == 'genus':
plurals.append(sg.replace(sg[-2:], 'era'))
elif sg.endswith('a'):
plurals.append(sg + 'e')
plurals.append(sg + 's')
elif sg == 'crisis':
plurals.append('crises')
elif sg.endswith('us'):
plurals.append(sg.replace(sg[-2:], 'i'))
plurals.append(sg + 'es')
elif sg.endswith('ex'):
plurals.append(sg.replace(sg[-2:], 'ices'))
plurals.append(sg + 'es')
elif sg.endswith('x'):
plurals.append(sg.replace(sg[-1], 'ces'))
plurals.append(sg + 'es')
elif sg.endswith('um'):
plurals.append(sg.replace(sg[-2:], 'a'))
plurals.append(sg + 's')
elif sg.endswith('on'):
plurals.append(sg.replace(sg[-2:], 'a'))
elif sg.endswith('is'):
plurals.append(sg.replace(sg[-2:], 'es'))
elif sg.endswith('oo'):
plurals.append(sg + 's')
elif sg.endswith('o') and sg != 'auto':
plurals.append(sg + 'es')
plurals.append(sg + 's')
elif sg.endswith('y') and sg[-2] in vowels:
plurals.append(sg + 's')
elif sg.endswith('y'):
plurals.append(sg.replace(sg[-1], 'ies'))
# NOTE I had to add parentheses to the following two lines to make the interpreter keep reading the next line.
elif (sg.endswith(es_sounds[0]) or sg.endswith(es_sounds[1])
or sg.endswith(es_sounds[2])):
plurals.append(sg + 'es')
elif sg.endswith('f'):
plurals.append(sg.replace(sg[-1], 'ves'))
elif sg.endswith('fe'):
plurals.append(sg.replace(sg[-2:], 'ves'))
else:
plurals.append(sg + 's')
return plurals
def singularize(sg):
"""Return list of plural form(s) of input_word.
Building this function is the purpose of Assignment 1.
The most basic case is already provided.
"""
# print("Enter word to be made singular: ")
plurals = []
return plurals
def evaluate(pl_func=pluralize, pair_data=pairs):
"""Evaluate the performance of pluralize function based on pairs data.
pl_func -- function that pluralizes input word (default=pluralize)
pair_data -- list of 2-tuples: [(sg1, pl1), (sg2, pl2),...] (default=pairs)
"""
total = len(pair_data)
# Determine how many lexemes have more than one plural form.
# duplicates = len(set([i for i, j in pair_data]))
correct = 0
for sg, pl in pair_data:
predicted_pl = pl_func(sg)
if pl == predicted_pl or pl in predicted_pl:
correct += 1
print('correct:', sg, predicted_pl, '({})'.format(pl), sep='\t')
else:
print('INcorrect:', sg, predicted_pl, '({})'.format(pl), sep='\t')
print('Your score:', correct, '/', total, '{:.2%}'.format(correct / total))
evaluate()
| gpl-3.0 | -5,668,059,858,905,396,000 | 31.760331 | 114 | 0.573411 | false | 3.222764 | false | false | false |
qtproject/pyside-pyside | tests/QtQml/registertype.py | 1 | 3725 | #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
import unittest
import helper
from PySide2.QtCore import Property, QTimer, QUrl
from PySide2.QtGui import QGuiApplication, QPen, QColor, QPainter
from PySide2.QtQml import qmlRegisterType, ListProperty
from PySide2.QtQuick import QQuickView, QQuickItem, QQuickPaintedItem
class PieSlice (QQuickPaintedItem):
def __init__(self, parent = None):
QQuickPaintedItem.__init__(self, parent)
self._color = QColor()
self._fromAngle = 0
self._angleSpan = 0
def getColor(self):
return self._color
def setColor(self, value):
self._color = value
def getFromAngle(self):
return self._angle
def setFromAngle(self, value):
self._fromAngle = value
def getAngleSpan(self):
return self._angleSpan
def setAngleSpan(self, value):
self._angleSpan = value
color = Property(QColor, getColor, setColor)
fromAngle = Property(int, getFromAngle, setFromAngle)
angleSpan = Property(int, getAngleSpan, setAngleSpan)
def paint(self, painter):
global paintCalled
pen = QPen(self._color, 2)
painter.setPen(pen);
painter.setRenderHints(QPainter.Antialiasing, True);
painter.drawPie(self.boundingRect(), self._fromAngle * 16, self._angleSpan * 16);
paintCalled = True
class PieChart (QQuickItem):
def __init__(self, parent = None):
QQuickItem.__init__(self, parent)
self._name = ''
self._slices = []
def getName(self):
return self._name
def setName(self, value):
self._name = value
name = Property(str, getName, setName)
def appendSlice(self, _slice):
global appendCalled
_slice.setParentItem(self)
self._slices.append(_slice)
appendCalled = True
slices = ListProperty(PieSlice, append=appendSlice)
appendCalled = False
paintCalled = False
class TestQmlSupport(unittest.TestCase):
def testIt(self):
app = QGuiApplication([])
qmlRegisterType(PieChart, 'Charts', 1, 0, 'PieChart');
qmlRegisterType(PieSlice, "Charts", 1, 0, "PieSlice");
view = QQuickView()
view.setSource(QUrl.fromLocalFile(helper.adjust_filename('registertype.qml', __file__)))
view.show()
QTimer.singleShot(250, view.close)
app.exec_()
self.assertTrue(appendCalled)
self.assertTrue(paintCalled)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | 2,296,957,859,559,989,800 | 30.567797 | 96 | 0.651812 | false | 3.888309 | true | false | false |
bmd/twittrscrapr | twittrscrapr/scrapers/timelinescrapr.py | 1 | 1459 | import logging
from ..parsers import DictParser
from base_scraper import TwittrScrapr
log = logging.getLogger('scrapers.TimelineScrapr')
class TimelineScrapr(TwittrScrapr):
def __init__(self, api_keys, writer):
super(TimelineScrapr, self).__init__(api_keys, writer)
def _fetch_user_timeline(self, user):
finished_pagination = False
new_max = None
results = []
parser = DictParser()
while not finished_pagination:
self.check_rate_limit()
call_result = self.api.get_user_timeline(screen_name=user, count=200, include_rts=1, trim_user=True,
max_id=new_max)
if len(call_result) > 0:
results.extend([parser.parse(t, user=user) for t in call_result])
new_max = str(int(call_result[-1]['id_str']) - 1)
else:
finished_pagination = True
self.reset_time = self.api.get_lastfunction_header('x-rate-limit-reset')
self.calls_remaining = self.api.get_lastfunction_header('x-rate-limit-remaining')
return results
@TwittrScrapr.error_handler
def fetch_user_statuses(self, writer):
for user in self.scrape_queue:
log.info('Fetching tweets for {}'.format(user))
res = self._fetch_user_timeline(user)
log.info('Got {} tweets'.format(len(res)))
writer.writerows(res)
| mit | -2,145,231,821,339,842,000 | 33.738095 | 112 | 0.592872 | false | 3.741026 | false | false | false |
astroclark/BayesSpec | waveforms/waveforms2hdf5.py | 1 | 1186 | #!/usr/bin/env python
"""
waveforms2hdf5.py loops over the list of waveforms defined in this script and
dumps out an hdf5 file for the plus polarisation. The idea is to then compute
the Shannon entropy of the waveforms using Matlab's wentropy.m function.
"""
import h5py
import numpy as np
import pmns_utils
wfs='/Users/jclark/hmns_repo/results/penultimate_waveforms.txt'
waveform_list=np.loadtxt(wfs,dtype=str)
#waveform_list=['shen_135135_lessvisc','apr_135135']
h5_file=h5py.File('waveforms.hdf5','w')
h5_snr_file=h5py.File('snr.hdf5','w')
for waveform in waveform_list:
# Generate waveform instance
wf=pmns_utils.Waveform(waveform)
# Compute the time series & SNR
wf.make_wf_timeseries()
wf.compute_characteristics()
# Zoom in on signal
peak_idx=np.argmax(wf.hplus.data.data)
wf_start_idx=np.argwhere(abs(wf.hplus.data.data)>0)[0]
wf_end_idx=np.argwhere(abs(wf.hplus.data.data)>0)[-1]
wf_reduced = wf.hplus.data.data[wf_start_idx:wf_end_idx]
h5_file[waveform] = wf_reduced
h5_snr_file[waveform] = wf.snr_plus
#h5_file[waveform]=wf_reduced
#h5_file[waveform+'_snr']=wf.snr_plus
h5_file.close()
h5_snr_file.close()
| gpl-2.0 | -5,496,950,762,767,886,000 | 29.410256 | 78 | 0.713322 | false | 2.653244 | false | false | false |
Ziqi-Li/bknqgis | pandas/pandas/core/reshape/reshape.py | 1 | 45812 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import re
import numpy as np
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notna
import pandas.core.dtypes.concat as _concat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.frame import _shared_docs
from pandas.util._decorators import Appender
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{}".format(name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index, self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sort_index(level=0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items,
fill_value=fill_value)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.loc[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value)
return unstacker.get_result()
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
@Appender(_shared_docs['melt'] %
dict(caller='pd.melt(df, ',
versionadded="",
other='DataFrame.melt'))
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
# TODO: what about the existing index?
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif (isinstance(frame.columns, MultiIndex) and
not isinstance(id_vars, list)):
raise ValueError('id_vars must be a list of tuples when columns'
' are a MultiIndex')
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif (isinstance(frame.columns, MultiIndex) and
not isinstance(value_vars, list)):
raise ValueError('value_vars must be a list of tuples when'
' columns are a MultiIndex')
else:
value_vars = list(value_vars)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i
for i in range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns
._get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2007], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
2 Red Sox 2008 545
3 Yankees 2008 526
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
mdata[target] = _concat._concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the subobservation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hypen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
Aone, Btwo,.., and you have an unrelated column Arating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j)
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multuple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack()
>>> w.columns = pd.Index(w.columns).str.join('')
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, stub, sep, suffix):
regex = "^{0}{1}{2}".format(re.escape(stub), re.escape(sep), suffix)
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
value_name=stub.rstrip(sep), var_name=j)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
return newdf.set_index(i + [j])
if any(map(lambda s: s in df.columns.tolist(), stubnames)):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = list(map(lambda stub:
get_var_names(df, stub, sep, suffix), stubnames))
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
melted = []
for s, v in zip(stubnames, value_vars):
melted.append(melt_stub(df, s, i, j, v, sep))
melted = melted[0].join(melted[1:], how='outer')
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
See Also
--------
Series.str.get_dummies
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
if is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse, drop_first=drop_first)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index, default_fill_value=0)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=np.uint8),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=np.uint8)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
dtype=np.uint8)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=np.uint8).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| gpl-2.0 | 8,329,274,033,396,292,000 | 33.239163 | 79 | 0.56005 | false | 3.675546 | false | false | false |
zackw/pelican | pelican/readers.py | 1 | 25554 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import logging
import os
import re
from collections import OrderedDict
import docutils
import docutils.core
import docutils.io
from docutils.writers.html4css1 import HTMLTranslator, Writer
import six
from six.moves.html_parser import HTMLParser
from pelican import rstdirectives # NOQA
from pelican import signals
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.utils import SafeDatetime, escape_html, get_date, pelican_open, \
posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, six.text_type):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader(object):
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
HTMLTranslator.__init__(self, document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
Writer.__init__(self)
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ['rst']
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
class FileInput(docutils.io.FileInput):
"""Patch docutils.io.FileInput to remove "U" mode in py3.
Universal newlines is enabled by default and "U" mode is deprecated
in py3.
"""
def __init__(self, *args, **kwargs):
if six.PY3:
kwargs['mode'] = kwargs.get('mode', 'r').replace('U', '')
docutils.io.FileInput.__init__(self, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(RstReader, self).__init__(*args, **kwargs)
def _parse_metadata(self, document):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'exit_status_level': 2,
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
source_class=self.FileInput,
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish(enable_exit_status=True)
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super(MarkdownReader, self).__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif name in METADATA_PROCESSORS:
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
try:
# Python 3.4+
HTMLParser.__init__(self, convert_charrefs=False)
except TypeError:
HTMLParser.__init__(self)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape_html(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape_html(tag))
for k, v in attrs:
result += ' ' + escape_html(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += "='{}'".format(escape_html(v, quote=False))
else:
result += '="{}"'.format(escape_html(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
if name == 'keywords':
name = 'tags'
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super(Readers, self).__init__(settings, cache_name,
caching_policy, load_policy,
)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
self.cache_data(path, (content, reader_metadata))
metadata.update(_filter_discardable_metadata(reader_metadata))
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
import smartypants
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], six.string_types):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else:
metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = SafeDatetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get(
source_path, {}))
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': SafeDatetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| agpl-3.0 | 7,005,900,722,206,514,000 | 35.349929 | 79 | 0.559678 | false | 4.298402 | false | false | false |
Wikidata/StrepHit | strephit/commons/date_normalizer.py | 1 | 7746 | from __future__ import absolute_import
import yaml
import re
import os
import logging
logger = logging.getLogger(__name__)
class DateNormalizer(object):
"""
find matches in text strings using regular expressions and transforms them
according to a pattern transformation expression evaluated on the match
the specifications are given in yaml format and allow to define meta functions
and meta variables as well as the pattern and transformation rules themselves.
meta variables will be placed inside patterns which use them in order to
make writing patterns easier. meta variables will be available to use from
inside the meta functions too as a dictionary named meta_vars
a pattern transformation expression is an expression which will be evaluated
if the corresponding regular expression matches. the pattern transformation
will have access to all the meta functions and meta variables defined and
to a variable named 'match' containing the regex match found
"""
def __init__(self, language=None, specs=None):
assert language or specs, 'please specify either one of the pre-set ' \
'languages or provide a custom rule set'
if specs is None:
path = os.path.join(os.path.dirname(__file__), 'resources',
'normalization_rules_%s.yml' % language)
with open(path) as f:
specs = yaml.load(f)
self._meta_init(specs)
basic_r = {name: pattern for name, pattern in self.meta_vars.iteritems()}
self.regexes = {}
for category, regexes in specs.iteritems():
regexes = sum((x.items() for x in regexes), [])
self.regexes[category] = [(re.compile(pattern.format(**basic_r)
.replace(' ', '\\s*'),
re.IGNORECASE), result)
for pattern, result in regexes]
def _meta_init(self, specs):
""" Reads the meta variables and the meta functions from the specification
:param dict specs: The specifications loaded from the file
:return: None
"""
# read meta variables and perform substitutions
self.meta_vars = {}
if '__meta_vars__' in specs:
for definition in specs.pop('__meta_vars__'):
var, value = definition.items()[0]
if isinstance(value, basestring):
self.meta_vars[var] = value.format(**self.meta_vars)
elif isinstance(value, dict):
self.meta_vars[var] = {
k: v.format(**self.meta_vars) for k, v in value.iteritems()
}
# compile meta functions in a dictionary
self.meta_funcs = {}
if '__meta_funcs__' in specs:
for f in specs.pop('__meta_funcs__'):
exec f in self.meta_funcs
# make meta variables available to the meta functions just defined
self.meta_funcs['__builtins__']['meta_vars'] = self.meta_vars
self.globals = self.meta_funcs
self.globals.update(self.meta_vars)
def normalize_one(self, expression, conflict='longest'):
""" Find the matching part in the given expression
:param str expression: The expression in which to search the match
:param str conflict: Whether to return the first match found or scan
through all the provided regular expressions and return the longest
or shortest part of the string matched by a regular expression.
Note that the match will always be the first one found in the string,
this parameter tells how to resolve conflicts when there is more than
one regular expression that returns a match. When more matches have
the same length the first one found counts
Allowed values are `first`, `longest` and `shortest`
:return: Tuple with (start, end), category, result
:rtype: tuple
Sample usage:
>>> from strephit.commons.date_normalizer import DateNormalizer
>>> DateNormalizer('en').normalize_one('Today is the 1st of June, 2016')
((13, 30), 'Time', {'month': 6, 'day': 1, 'year': 2016})
"""
best_match = None
expression = expression.lower()
for category, regexes in self.regexes.iteritems():
for regex, transform in regexes:
match = regex.search(expression)
if not match:
continue
elif conflict == 'first':
return self._process_match(category, transform, match, 0)
elif best_match is None or \
conflict == 'longest' and match.end() - match.start() > best_match[1] or \
conflict == 'shortest' and match.end() - match.start() < best_match[1]:
best_match = match, match.end() - match.start(), category, transform
if best_match is None:
return (-1, -1), None, None
else:
match, _, category, transform = best_match
return self._process_match(category, transform, match, 0)
def normalize_many(self, expression):
""" Find all the matching entities in the given expression expression
:param str expression: The expression in which to look for
:return: Generator of tuples (start, end), category, result
Sample usage:
>>> from pprint import pprint
>>> from strephit.commons.date_normalizer import DateNormalizer
>>> pprint(list(DateNormalizer('en').normalize_many('I was born on April 18th, '
... 'and today is April 18th, 2016!')))
[((14, 24), 'Time', {'day': 18, 'month': 4}),
((39, 55), 'Time', {'day': 18, 'month': 4, 'year': 2016})]
"""
# start matching from here, and move forward as new matches
# are found so to avoid overlapping matches and return
# the correct offset inside the original sentence
position = 0
expression = expression.lower()
for category, regexes in self.regexes.iteritems():
for regex, transform in regexes:
end = 0
for match in regex.finditer(expression[position:]):
yield self._process_match(category, transform, match, position)
end = max(end, match.end())
position += end
def _process_match(self, category, transform, match, first_position):
result = eval(transform, self.globals, {'match': match})
start, end = match.span()
return (first_position + start, first_position + end), category, result
NORMALIZERS = {}
def normalize_numerical_fes(language, text):
""" Normalize numerical FEs in a sentence
"""
if language not in NORMALIZERS:
NORMALIZERS[language] = DateNormalizer(language)
normalizer = NORMALIZERS[language]
logger.debug('labeling and normalizing numerical FEs of language %s...', language)
count = 0
for (start, end), tag, norm in normalizer.normalize_many(text):
chunk = text[start:end]
logger.debug('Chunk [%s] normalized into [%s], tagged as [%s]' % (chunk, norm, tag))
# All numerical FEs are extra ones and their values are literals
fe = {
'fe': tag,
'chunk': chunk,
'type': 'extra',
'literal': norm,
'score': 1.0
}
count += 1
yield fe
logger.debug('found %d numerical FEs into "%s"', count, text)
| gpl-3.0 | 4,360,993,509,104,244,700 | 41.097826 | 98 | 0.590627 | false | 4.64946 | false | false | false |
FedericoRessi/networking-odl | networking_odl/ml2/network_topology.py | 1 | 12691 | # Copyright (c) 2015-2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import importlib
import logging
import six
from six.moves.urllib import parse
from neutron.extensions import portbindings
from oslo_log import log
from oslo_serialization import jsonutils
from networking_odl.common import cache
from networking_odl.common import client
from networking_odl.common import utils
from networking_odl.common._i18n import _LI, _LW, _LE
LOG = log.getLogger(__name__)
class NetworkTopologyManager(object):
# the first valid vif type will be chosed following the order
# on this list. This list can be modified to adapt to user preferences.
valid_vif_types = [
portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS]
# List of class names of registered implementations of interface
# NetworkTopologyParser
network_topology_parsers = [
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser']
def __init__(self, vif_details=None, client=None):
# Details for binding port
self._vif_details = vif_details or {}
# Rest client used for getting network topology from ODL
self._client = client or NetworkTopologyClient.create_client()
# Table of NetworkTopologyElement
self._elements_by_ip = cache.Cache(
self._fetch_and_parse_network_topology)
# Parsers used for processing network topology
self._parsers = list(self._create_parsers())
def bind_port(self, port_context):
"""Set binding for a valid segment
"""
host_name = port_context.host
elements = list()
try:
# Append to empty list to add as much elements as possible
# in the case it raises an exception
elements.extend(self._fetch_elements_by_host(host_name))
except Exception:
LOG.exception(
_LE('Error fetching elements for host %(host_name)r.'),
{'host_name': host_name}, exc_info=1)
if not elements:
# In case it wasn't able to find any network topology element
# for given host then it uses the legacy OVS one keeping the old
# behaviour
LOG.warning(
_LW('Using legacy OVS network topology element for port '
'binding for host: %(host_name)r.'),
{'host_name': host_name})
# Imported here to avoid cyclic module dependencies
from networking_odl.ml2 import ovsdb_topology
elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]
# TODO(Federico Ressi): in the case there are more candidate virtual
# switches instances for the same host it choses one for binding
# port. As there isn't any know way to perform this selection it
# selects a VIF type that is valid for all switches that have
# been found and a VIF type valid for all them. This has to be improved
for vif_type in self.valid_vif_types:
vif_type_is_valid_for_all = True
for element in elements:
if vif_type not in element.valid_vif_types:
# it is invalid for at least one element: discard it
vif_type_is_valid_for_all = False
break
if vif_type_is_valid_for_all:
# This is the best VIF type valid for all elements
LOG.debug(
"Found VIF type %(vif_type)r valid for all network "
"topology elements for host %(host_name)r.",
{'vif_type': vif_type, 'host_name': host_name})
for element in elements:
# It assumes that any element could be good for given host
# In most of the cases I expect exactely one element for
# every compute host
try:
return element.bind_port(
port_context, vif_type, self._vif_details)
except Exception:
LOG.exception(
_LE('Network topology element has failed binding '
'port:\n%(element)s'),
{'element': element.to_json()})
LOG.error(
_LE('Unable to bind port element for given host and valid VIF '
'types:\n'
'\thostname: %(host_name)s\n'
'\tvalid VIF types: %(valid_vif_types)s'),
{'host_name': host_name,
'valid_vif_types': ', '.join(self.valid_vif_types)})
# TDOO(Federico Ressi): should I raise an exception here?
def _create_parsers(self):
for parser_name in self.network_topology_parsers:
try:
yield NetworkTopologyParser.create_parser(parser_name)
except Exception:
LOG.exception(
_LE('Error initializing topology parser: %(parser_name)r'),
{'parser_name': parser_name})
def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
'''Yields all network topology elements referring to given host name
'''
host_addresses = [host_name]
try:
# It uses both compute host name and known IP addresses to
# recognize topology elements valid for given computed host
ip_addresses = utils.get_addresses_by_name(host_name)
except Exception:
ip_addresses = []
LOG.exception(
_LE('Unable to resolve IP addresses for host %(host_name)r'),
{'host_name': host_name})
else:
host_addresses.extend(ip_addresses)
yield_elements = set()
try:
for _, element in self._elements_by_ip.fetch_all(
host_addresses, cache_timeout):
# yields every element only once
if element not in yield_elements:
yield_elements.add(element)
yield element
except cache.CacheFetchError as error:
# This error is expected on most of the cases because typically not
# all host_addresses maps to a network topology element.
if yield_elements:
# As we need only one element for every host we ignore the
# case in which others host addresseses didn't map to any host
LOG.debug(
'Host addresses not found in networking topology: %s',
', '.join(error.missing_keys))
else:
LOG.exception(
_LE('No such network topology elements for given host '
'%(host_name)r and given IPs: %(ip_addresses)s.'),
{'host_name': host_name,
'ip_addresses': ", ".join(ip_addresses)})
error.reraise_cause()
def _fetch_and_parse_network_topology(self, addresses):
# The cache calls this method to fecth new elements when at least one
# of the addresses is not in the cache or it has expired.
# pylint: disable=unused-argument
LOG.info(_LI('Fetch network topology from ODL.'))
response = self._client.get()
response.raise_for_status()
network_topology = response.json()
if LOG.isEnabledFor(logging.DEBUG):
topology_str = jsonutils.dumps(
network_topology, sort_keys=True, indent=4,
separators=(',', ': '))
LOG.debug("Got network topology:\n%s", topology_str)
at_least_one_element_for_asked_addresses = False
for parser in self._parsers:
try:
for element in parser.parse_network_topology(network_topology):
if not isinstance(element, NetworkTopologyElement):
raise TypeError(
"Yield element doesn't implement interface "
"'NetworkTopologyElement': {!r}".format(element))
# the same element can be known by more host addresses
for host_address in element.host_addresses:
if host_address in addresses:
at_least_one_element_for_asked_addresses = True
yield host_address, element
except Exception:
LOG.exception(
_LE("Parser %(parser)r failed to parse network topology."),
{'parser': parser})
if not at_least_one_element_for_asked_addresses:
# this will mark entries for given addresses as failed to allow
# calling this method again as soon it is requested and avoid
# waiting for cache expiration
raise ValueError(
'No such topology element for given host addresses: {}'.format(
', '.join(addresses)))
@six.add_metaclass(abc.ABCMeta)
class NetworkTopologyParser(object):
@classmethod
def create_parser(cls, parser_class_name):
'''Creates a 'NetworkTopologyParser' of given class name.
'''
module_name, class_name = parser_class_name.rsplit('.', 1)
module = importlib.import_module(module_name)
clss = getattr(module, class_name)
if not issubclass(clss, cls):
raise TypeError(
"Class {class_name!r} of module {module_name!r} doesn't "
"implement 'NetworkTopologyParser' interface.".format(
class_name=class_name, module_name=module_name))
return clss()
@abc.abstractmethod
def parse_network_topology(self, network_topology):
'''Parses OpenDaylight network topology
Yields all network topology elements implementing
'NetworkTopologyElement' interface found in given network topology.
'''
@six.add_metaclass(abc.ABCMeta)
class NetworkTopologyElement(object):
@abc.abstractproperty
def host_addresses(self):
'''List of known host addresses of a single compute host
Either host names and ip addresses are valid.
Neutron host controller must know at least one of these compute host
names or ip addresses to find this element.
'''
@abc.abstractproperty
def valid_vif_types(self):
'''Returns a tuple listing VIF types supported by the compute node
'''
@abc.abstractmethod
def bind_port(self, port_context, vif_type, vif_details):
'''Bind port context using given vif type and vit details
This method is expected to search for a valid segment and then
call following method:
from neutron.common import constants
from neutron.plugins.ml2 import driver_api
port_context.set_binding(
valid_segment[driver_api.ID], vif_type, vif_details,
status=constants.PORT_STATUS_ACTIVE)
'''
def to_dict(self):
cls = type(self)
return {
'class': cls.__module__ + '.' + cls.__name__,
'host_addresses': list(self.host_addresses),
'valid_vif_types': list(self.valid_vif_types)}
def to_json(self):
return jsonutils.dumps(
self.to_dict(), sort_keys=True, indent=4, separators=(',', ': '))
class NetworkTopologyClient(client.OpenDaylightRestClient):
_GET_ODL_NETWORK_TOPOLOGY_URL =\
'restconf/operational/network-topology:network-topology'
def __init__(self, url, username, password, timeout):
if url:
url = parse.urlparse(url)
port = ''
if url.port:
port = ':' + str(url.port)
topology_url = '{}://{}{}/{}'.format(
url.scheme, url.hostname, port,
self._GET_ODL_NETWORK_TOPOLOGY_URL)
else:
topology_url = None
super(NetworkTopologyClient, self).__init__(
topology_url, username, password, timeout)
| apache-2.0 | 132,776,603,059,931,380 | 38.659375 | 79 | 0.588527 | false | 4.560187 | false | false | false |
tayebzaidi/PPLL_Spr_16 | chat/client3.py | 1 | 1260 | from multiprocessing.connection import Client
from random import random
from time import sleep
from multiprocessing.connection import Listener
from multiprocessing import Process
local_listener = (('127.0.0.1', 5003),'secret client 3 password')
def client_listener():
cl = Listener(address=local_listener[0], authkey=local_listener[1])
print '.............client listener starting'
print '.............accepting conexions'
while True:
conn = cl.accept()
print '.............connection accepted from', cl.last_accepted
m = conn.recv()
print '.............message received from server', m
if __name__ == '__main__':
print 'trying to connect'
conn = Client(address=('127.0.0.1', 6000), authkey='secret password server')
conn.send(local_listener)
cl = Process(target=client_listener, args=())
cl.start()
connected = True
while connected:
value = raw_input("'C', stay connected. 'Q' quit connection")
if value == 'Q':
connected = False
else:
print "continue connected"
conn.send("connected")
print "last message"
conn.send("quit")
conn.close()
cl.terminate()
print "end client"
| gpl-3.0 | 7,113,699,005,899,186,000 | 28.302326 | 80 | 0.605556 | false | 4.090909 | false | false | false |
laalaguer/gae-blog-module | gaesession/handlers.py | 1 | 7633 | import webapp2
from webapp2_extras import sessions
class MainHandler(webapp2.RequestHandler):
def get(self):
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandlerWithArguments(webapp2.RequestHandler):
def get(self, photo_key): # even with arguments, we call with dispatch(self)
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext import blobstore
class MyUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def my_post_dispatch(self, *args, **kwargs):
''' A Fake dispatch method that you want to call inside your Route()
Just an imitation of the webapp2 style dispatch() with limited functions
'''
self.session_store = sessions.get_store(request=self.request)
try:
if self.request.method == 'POST':
self.post(*args, **kwargs) # since webapp doesn't have dispatch() method like webapp2, we do it manually
else:
self.error(405)
self.response.out.write('Method not allowed')
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
def wrapper(func):
def dest(self, *args, **kwargs):
print 'before decorated' # for your future use. you can write wrapper like 'user_required'
func(self,*args, **kwargs)
print 'after decorated'
return dest
@wrapper
def post(self):
# Get all the uploaded file info
myfiles = self.get_uploads('file') # this is a list of blob key info
# You do some operations on the myfiles, maybe transform them
# maybe associate them with other ndb entities in your database
# ...
# But we also want to manipulate with the session, RIGHT ???
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# Finally, I delete them,just in case you won't let it go.
[blobstore.delete(each.key()) for each in self.get_uploads('file')]
class ServeBlobHandler(blobstore_handlers.BlobstoreDownloadHandler):
''' Serve the images to the public '''
def my_get_dispatch(self, *args, **kwargs):
''' A Fake dispatch method that you want to call inside your Route()
Just an imitation of the webapp2 style dispatch() with limited functions
'''
self.session_store = sessions.get_store(request=self.request)
try:
if self.request.method == 'GET':
self.get(*args, **kwargs) # this is the real get method we want here
else:
self.error(405)
self.response.out.write('Method not allowed')
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
def wrapper(func):
def dest(self, *args, **kwargs):
print 'before decorated' # for your future use. you can write wrapper like 'user_required'
func(self,*args, **kwargs)
print 'after decorated'
return dest
@wrapper
def get(self, photo_key):
if not blobstore.get(photo_key):
self.error(404)
else:
self.send_blob(photo_key) | apache-2.0 | -2,670,067,092,621,807,600 | 45.266667 | 120 | 0.617844 | false | 3.975521 | false | false | false |
fluxcapacitor/pipeline | libs/pipeline_model/tensorflow/core/framework/tensor_slice_pb2.py | 1 | 4870 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor_slice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor_slice.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n,tensorflow/core/framework/tensor_slice.proto\x12\ntensorflow\"\x80\x01\n\x10TensorSliceProto\x12\x33\n\x06\x65xtent\x18\x01 \x03(\x0b\x32#.tensorflow.TensorSliceProto.Extent\x1a\x37\n\x06\x45xtent\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x06length\x18\x02 \x01(\x03H\x00\x42\x0c\n\nhas_lengthB2\n\x18org.tensorflow.frameworkB\x11TensorSliceProtosP\x01\xf8\x01\x01\x62\x06proto3')
)
_TENSORSLICEPROTO_EXTENT = _descriptor.Descriptor(
name='Extent',
full_name='tensorflow.TensorSliceProto.Extent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='tensorflow.TensorSliceProto.Extent.start', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='length', full_name='tensorflow.TensorSliceProto.Extent.length', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='has_length', full_name='tensorflow.TensorSliceProto.Extent.has_length',
index=0, containing_type=None, fields=[]),
],
serialized_start=134,
serialized_end=189,
)
_TENSORSLICEPROTO = _descriptor.Descriptor(
name='TensorSliceProto',
full_name='tensorflow.TensorSliceProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='extent', full_name='tensorflow.TensorSliceProto.extent', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TENSORSLICEPROTO_EXTENT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=189,
)
_TENSORSLICEPROTO_EXTENT.containing_type = _TENSORSLICEPROTO
_TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length'].fields.append(
_TENSORSLICEPROTO_EXTENT.fields_by_name['length'])
_TENSORSLICEPROTO_EXTENT.fields_by_name['length'].containing_oneof = _TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length']
_TENSORSLICEPROTO.fields_by_name['extent'].message_type = _TENSORSLICEPROTO_EXTENT
DESCRIPTOR.message_types_by_name['TensorSliceProto'] = _TENSORSLICEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorSliceProto = _reflection.GeneratedProtocolMessageType('TensorSliceProto', (_message.Message,), dict(
Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict(
DESCRIPTOR = _TENSORSLICEPROTO_EXTENT,
__module__ = 'tensorflow.core.framework.tensor_slice_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto.Extent)
))
,
DESCRIPTOR = _TENSORSLICEPROTO,
__module__ = 'tensorflow.core.framework.tensor_slice_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto)
))
_sym_db.RegisterMessage(TensorSliceProto)
_sym_db.RegisterMessage(TensorSliceProto.Extent)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\021TensorSliceProtosP\001\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| apache-2.0 | 6,296,427,293,374,174,000 | 35.343284 | 406 | 0.742094 | false | 3.27505 | false | true | false |
nrz/ylikuutio | external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/sensors/sensor.py | 2 | 15551 | # Lint as: python3
"""A sensor prototype class.
The concept is explained in: go/minitaur-gym-redesign-1.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Iterable, Optional, Sequence, Text, Tuple, Union
import gin
import gym
import numpy as np
from pybullet_envs.minitaur.robots import robot_base
from pybullet_envs.minitaur.robots import time_ordered_buffer
_ARRAY = Sequence[float]
_FloatOrArray = Union[float, _ARRAY]
_DataTypeList = Iterable[Any]
# For sensor with multiput outputs, key of the main observation in output dict.
MAIN_OBS_KEY = ""
# This allows referencing np.float32 in gin config files. For example:
# lidar_sensor.LidarSensor.dtype = @np.float32
gin.external_configurable(np.float32, module="np")
gin.external_configurable(np.float64, module="np")
gin.external_configurable(np.uint8, module="np")
# Observation blenders take a pair of low/high values. The low/high is measured
# by the latency of the observation. So the low value is actually newer in time
# and high value older. The coeff [0, 1] can be thinked as the distance between
# the low and high value value, with 0 being 100% low value and 1 as 100% high
# value.
def linear_obs_blender(low_value: Any, high_value: Any, coeff: float):
"""Linear interpolation of low/high values based on coefficient value."""
return low_value * (1 - coeff) + high_value * coeff
def closest_obs_blender(low_value: Any, high_value: Any, coeff: float):
"""Choosing the high or low value based on coefficient value."""
return low_value if coeff < 0.5 else high_value
def newer_obs_blender(low_value: Any, unused_high_value: Any,
unused_coeff: float):
"""Always choosing low value, which is the newer value between low/high."""
return low_value
def older_obs_blender(unused_low_value: Any, high_value: Any,
unused_coeff: float):
"""Always choosing the high value, which is the older value between low/high."""
return high_value
@gin.configurable
class Sensor(object):
"""A prototype class of sensors."""
def __init__(
self,
name: Text,
sensor_latency: _FloatOrArray,
interpolator_fn: Any,
enable_debug_visualization: bool = False,
):
"""A basic constructor of the sensor.
We do not provide a robot instance during __init__, as robot instances may
be reloaded/recreated during the simulation.
Args:
name: the name of the sensor
sensor_latency: There are two ways to use this expected sensor latency.
For both methods, the latency should be in the same unit as the sensor
data timestamp. 1. As a single float number, the observation will be a
1D array. For real robots, this should be set to 0.0. 2. As an array of
floats, the observation will be a 2D array based on how long the history
need to be. Thus, [0.0, 0.1, 0.2] is a history length of 3. Observations
are stacked on a new axis appended after existing axes.
interpolator_fn: Function that controls how to interpolate the two values
that is returned from the time ordered buffer.
enable_debug_visualization: Whether to draw debugging visualization.
"""
self._robot = None
self._name = name
# Observation space will be implemented by derived classes.
self._observation_space = None
self._sensor_latency = sensor_latency
self._single_latency = True if isinstance(sensor_latency,
(float, int)) else False
self._enable_debug_visualization = enable_debug_visualization
if not self._is_valid_latency():
raise ValueError("sensor_latency is expected to be a non-negative number "
"or a non-empty list of non-negative numbers.")
self._interpolator_fn = interpolator_fn or newer_obs_blender
self._axis = -1
timespan = sensor_latency if self._single_latency else max(sensor_latency)
self._observation_buffer = time_ordered_buffer.TimeOrderedBuffer(
max_buffer_timespan=timespan)
def _is_valid_latency(self):
if self._single_latency:
return self._sensor_latency >= 0
if self._sensor_latency:
return all(value >= 0 for value in self._sensor_latency)
return False
def get_name(self) -> Text:
return self._name
@property
def is_single_latency(self) -> bool:
return self._single_latency
@property
def observation_space(self) -> gym.spaces.Space:
return self._observation_space
@property
def enable_debug_visualization(self):
return self._enable_debug_visualization
@enable_debug_visualization.setter
def enable_debug_visualization(self, enable):
self._enable_debug_visualization = enable
def get_observation_datatype(self):
"""Returns the data type for the numpy structured array.
It is recommended to define a list of tuples: (name, datatype, shape)
Reference: https://docs.scipy.org/doc/numpy-1.15.0/user/basics.rec.html
Ex:
return [('motor_angles', np.float64, (8, ))] # motor angle sensor
return [('IMU_x', np.float64), ('IMU_z', np.float64), ] # IMU
Will be deprecated (b/150818246) in favor of observation_space.
Returns:
datatype: a list of data types.
"""
raise NotImplementedError("Deprecated. Are you using the old robot class?")
def get_lower_bound(self):
"""Returns the lower bound of the observation.
Will be deprecated (b/150818246) in favor of observation_space.
Returns:
lower_bound: the lower bound of sensor values in np.array format
"""
raise NotImplementedError("Deprecated. Are you using the old robot class?")
def get_upper_bound(self):
"""Returns the upper bound of the observation.
Will be deprecated (b/150818246) in favor of observation_space.
Returns:
upper_bound: the upper bound of sensor values in np.array format
"""
raise NotImplementedError("Deprecated. Are you using the old robot class?")
def _get_original_observation(self) -> Tuple[float, Any]:
"""Gets the non-modified observation.
Different from the get_observation, which can pollute and sensor data with
noise and latency, this method shall return the best effort measurements of
the sensor. For simulated robots, this will return the clean data. For reals
robots, just return the measurements as is. All inherited class shall
implement this method.
Returns:
The timestamp and the original sensor measurements.
Raises:
NotImplementedError for the base class.
"""
raise NotImplementedError("Not implemented for base class." "")
def get_observation(self):
"""Returns the observation data.
Returns:
observation: the observed sensor values in np.array format
"""
obs = self._observation_buffer.get_delayed_value(self._sensor_latency)
if self._single_latency:
if isinstance(self._observation_space, gym.spaces.Dict):
return self._interpolator_fn(obs.value_0, obs.value_1, obs.coeff)
else:
return np.asarray(
self._interpolator_fn(obs.value_0, obs.value_1, obs.coeff))
else:
if isinstance(self._observation_space, gym.spaces.Dict):
# interpolate individual sub observation
interpolated = [
self._interpolator_fn(data.value_0, data.value_1, data.coeff)
for data in obs
]
stacked_per_sub_obs = {}
for k in interpolated[0]:
stacked_per_sub_obs[k] = np.stack(
np.asarray([d[k] for d in interpolated]), axis=self._axis)
return stacked_per_sub_obs
else:
obs = np.asarray([
self._interpolator_fn(data.value_0, data.value_1, data.coeff)
for data in obs
])
return np.stack(obs, axis=self._axis)
def set_robot(self, robot: robot_base.RobotBase):
"""Set a robot instance."""
self._robot = robot
def get_robot(self):
"""Returns the robot instance."""
return self._robot
def on_reset(self, env):
"""A callback function for the reset event.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
self._observation_buffer.reset()
self.on_new_observation()
def on_step(self, env):
"""A callback function for the control step event.
Args:
env: the environment who invokes this callback function.
"""
pass
def visualize(self):
"""Visualizes the sensor information."""
pass
def on_new_observation(self):
"""A callback for each observation received.
To be differentiated from on_step, which will be called only once per
control step (i.e. env.step), this API will be called everytime in the
substep/action repeat loop, when new observations are expected. Each derived
sensor class should implement this API by implementing:
my_obs = call env/robot api to get the observation
self._observation_buffer.add(my_obs)
"""
timestamp, obs = self._get_original_observation()
if self._enable_debug_visualization:
self.visualize()
self._observation_buffer.add(timestamp, obs)
def on_terminate(self, env):
"""A callback function for the terminate event.
Args:
env: the environment who invokes this callback function.
"""
pass
def _stack_space(self,
space: Union[gym.spaces.Box, gym.spaces.Dict],
dtype: np.dtype = None) -> Any:
"""Returns stacked version of observation space.
This stacks a gym.spaces.Box or gym.spaces.Dict action space based on the
length of the sensor latency and the axis for stacking specified in the
sensor. A gym.spaces.Box is just stacked, but a gym.spaces.Dict is
recursively stacked, preserving its dictionary structure while stacking
any gym.spaces.Box contained within. For example, the input action space:
gym.spaces.Dict({
'space_1': gym.spaces.Box(low=0, high=10, shape=(1,)),
'space_2': gym.spaces.Dict({
'space_3': gym.spaces.Box(low=0, high=10, shape=(2,)),
}),
}))
would be converted to the following if sensor latency was [0, 1]:
gym.spaces.Dict({
'space_1': gym.spaces.Box(low=0, high=10, shape=(1, 2)),
'space_2': gym.spaces.Dict({
'space_3': gym.spaces.Box(low=0, high=10, shape=(2, 2)),
}),
}))
Args:
space: A gym.spaces.Dict or gym.spaces.Box to be stacked.
dtype: Datatype for the stacking.
Returns:
stacked_space: A stacked version of the action space.
"""
if self._single_latency:
return space
# Allow sensors such as last_action_sensor to override the dtype.
dtype = dtype or space.dtype
if isinstance(space, gym.spaces.Box):
return self._stack_space_box(space, dtype)
elif isinstance(space, gym.spaces.Dict):
return self._stack_space_dict(space, dtype)
else:
raise ValueError(f"Space {space} is an unsupported type.")
def _stack_space_box(self, space: gym.spaces.Box,
dtype: np.dtype) -> gym.spaces.Box:
"""Returns stacked version of a box observation space.
This stacks a gym.spaces.Box action space based on the length of the sensor
latency and the axis for stacking specified in the sensor.
Args:
space: A gym.spaces.Box to be stacked.
dtype: Datatype for the stacking
Returns:
stacked_space: A stacked version of the gym.spaces.Box action space.
"""
length = len(self._sensor_latency)
stacked_space = gym.spaces.Box(
low=np.repeat(
np.expand_dims(space.low, axis=self._axis), length,
axis=self._axis),
high=np.repeat(
np.expand_dims(space.high, axis=self._axis),
length,
axis=self._axis),
dtype=dtype)
return stacked_space
def _stack_space_dict(self, space: gym.spaces.Dict,
dtype: np.dtype) -> gym.spaces.Dict:
"""Returns stacked version of a dict observation space.
This stacks a gym.spaces.Dict action space based on the length of the sensor
latency and the recursive structure of the gym.spaces.Dict itself.
Args:
space: A gym.spaces.Dict to be stacked.
dtype: Datatype for the stacking.
Returns:
stacked_space: A stacked version of the dictionary action space.
"""
return gym.spaces.Dict([
(k, self._stack_space(v, dtype)) for k, v in space.spaces.items()
])
def _encode_obs_dict_keys(self, obs_dict):
"""Encodes sub obs keys of observation dict or observsation space dict."""
return {encode_sub_obs_key(self, k): v for k, v in obs_dict.items()}
class BoxSpaceSensor(Sensor):
"""A prototype class of sensors with Box shapes."""
def __init__(self,
name: Text,
shape: Tuple[int, ...],
lower_bound: _FloatOrArray = -np.pi,
upper_bound: _FloatOrArray = np.pi,
dtype=np.float64) -> None:
"""Constructs a box type sensor.
Will be deprecated (b/150818246) once we switch to gym spaces.
Args:
name: the name of the sensor
shape: the shape of the sensor values
lower_bound: the lower_bound of sensor value, in float or np.array.
upper_bound: the upper_bound of sensor value, in float or np.array.
dtype: data type of sensor value
"""
super(BoxSpaceSensor, self).__init__(
name=name, sensor_latency=0.0, interpolator_fn=newer_obs_blender)
self._shape = shape
self._dtype = dtype
if isinstance(lower_bound, float):
self._lower_bound = np.full(shape, lower_bound, dtype=dtype)
else:
self._lower_bound = np.array(lower_bound)
if isinstance(upper_bound, float):
self._upper_bound = np.full(shape, upper_bound, dtype=dtype)
else:
self._upper_bound = np.array(upper_bound)
def set_robot(self, robot):
# Since all old robot class do not inherit from RobotBase, we can enforce
# the checking here.
if isinstance(robot, robot_base.RobotBase):
raise ValueError(
"Cannot use new robot interface RobotBase with old sensor calss.")
self._robot = robot
def get_shape(self) -> Tuple[int, ...]:
return self._shape
def get_dimension(self) -> int:
return len(self._shape)
def get_dtype(self):
return self._dtype
def get_observation_datatype(self) -> _DataTypeList:
"""Returns box-shape data type."""
return [(self._name, self._dtype, self._shape)]
def get_lower_bound(self) -> _ARRAY:
"""Returns the computed lower bound."""
return self._lower_bound
def get_upper_bound(self) -> _ARRAY:
"""Returns the computed upper bound."""
return self._upper_bound
def get_observation(self) -> np.ndarray:
return np.asarray(self._get_observation(), dtype=self._dtype)
def _get_original_observation(self) -> Tuple[float, Any]:
# Maintains compatibility with the new sensor classes."""
raise NotImplementedError("Not implemented for this class.")
def on_new_observation(self):
# Maintains compatibility with the new sensor classes."""
pass
def encode_sub_obs_key(s: Sensor, sub_obs_name: Optional[Text]):
"""Returns a sub observation key for use in observation dictionary."""
if sub_obs_name == MAIN_OBS_KEY:
return s.get_name()
else:
return f"{s.get_name()}/{sub_obs_name}"
| agpl-3.0 | 3,822,107,709,921,278,000 | 33.481153 | 82 | 0.663301 | false | 3.854027 | false | false | false |
winklerand/pandas | asv_bench/benchmarks/replace.py | 1 | 2171 | from .pandas_vb_common import *
class replace_fillna(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_fillna(self):
self.ts.fillna(0.0, inplace=True)
class replace_large_dict(object):
goal_time = 0.2
def setup(self):
self.n = (10 ** 6)
self.start_value = (10 ** 5)
self.to_rep = {i: self.start_value + i for i in range(self.n)}
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
def time_replace_large_dict(self):
self.s.replace(self.to_rep, inplace=True)
class replace_convert(object):
goal_time = 0.5
def setup(self):
self.n = (10 ** 3)
self.to_ts = {i: pd.Timestamp(i) for i in range(self.n)}
self.to_td = {i: pd.Timedelta(i) for i in range(self.n)}
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)),
'B': np.random.randint(self.n, size=(10 ** 3))})
def time_replace_series_timestamp(self):
self.s.replace(self.to_ts)
def time_replace_series_timedelta(self):
self.s.replace(self.to_td)
def time_replace_frame_timestamp(self):
self.df.replace(self.to_ts)
def time_replace_frame_timedelta(self):
self.df.replace(self.to_td)
class replace_replacena(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_replacena(self):
self.ts.replace(np.nan, 0.0, inplace=True)
| bsd-3-clause | 1,624,599,673,181,421,300 | 30.014286 | 91 | 0.587748 | false | 3.132756 | false | false | false |
sebastiaangroot/kmaldetect | tools/build/gen_syscall_table.py | 1 | 1860 | """
A simple python script to generate a sh table that takes the name of a syscall as input and translates it to the number corrosponding with that syscall.
This function is used in the sig_gen.sh script, used to generate an application signature for detection in kmaldetect.
Keep in mind that the '\n' characters used here will be translated to your OS's newline convention.
"""
import sys
import getopt
def gen_function(content, f):
f.write('function get_syscall_index\n')
f.write('{\n')
f.write('\tcase $1 in\n')
for line in content:
if line.startswith('#define __NR_') and line.find('stub_') == -1:
if line[9:].find('\t') != -1:
num = line[line.find('\t', line.find('__NR_')):].lstrip('\t').strip() #num = the characters after the tab / whitespace characters, after the _NR__
name = line[line.find('__NR_') + 5:].split('\t')[0] #name = the characters after the _NR__ but before the tab / whitespace characters
elif line[9:].find(' ') != -1:
num = line[line.find(' ', line.find('__NR_')):].lstrip(' ').strip()
name = line[line.find('__NR_') + 5:].split(' ')[0]
else: #There has to be a space or tab after the #define _NR__xxx. This was not the case, so call continue on the for loop
continue
f.write('\t\t\'' + name + '\')\n')
f.write('\t\t\treturn ' + num + '\n')
f.write('\t\t\t;;\n')
f.write('\tesac\n')
f.write('}\n')
infile = '' # path to the unistd_xx.h header
outfile = '' # path to the outfile, which will be filled with a .sh function for the use in sig_gen.sh
content = '' # content of infile
opts, args = getopt.getopt(sys.argv[1:], 'i:o:', ['infile=', 'outfile='])
for o, a in opts:
if o in ('--infile', '-i'):
infile = a
elif o in ('--outfile', '-o'):
outfile = a
with open(infile, 'r') as f:
content = f.readlines()
f = open(outfile, 'a')
gen_function(content, f)
f.flush()
f.close() | gpl-2.0 | 9,037,986,725,254,694,000 | 38.595745 | 152 | 0.634409 | false | 3.059211 | false | false | false |
lijoantony/django-oscar-api | oscarapi/basket/operations.py | 1 | 3871 | "This module contains operation on baskets and lines"
from django.conf import settings
from oscar.core.loading import get_model, get_class
from oscar.core.utils import get_default_currency
from oscar.core.prices import Price
__all__ = (
'apply_offers',
'assign_basket_strategy',
'prepare_basket',
'get_basket',
'get_basket_id_from_session',
'get_anonymous_basket',
'get_user_basket',
'store_basket_in_session',
'request_contains_basket',
'flush_and_delete_basket',
'request_contains_line',
'save_line_with_default_currency',
)
Basket = get_model('basket', 'Basket')
Applicator = get_class('offer.utils', 'Applicator')
Selector = None
def apply_offers(request, basket):
"Apply offers and discounts to cart"
if not basket.is_empty:
Applicator().apply(request, basket)
def assign_basket_strategy(basket, request):
# fixes too early import of Selector
# TODO: check if this is still true, now the basket models nolonger
# require this module to be loaded.
global Selector
if hasattr(request, 'strategy'):
basket.strategy = request.strategy
else: # in management commands, the request might not be available.
if Selector is None:
Selector = get_class('partner.strategy', 'Selector')
basket.strategy = Selector().strategy(
request=request, user=request.user)
apply_offers(request, basket)
return basket
def prepare_basket(basket, request):
assign_basket_strategy(basket, request)
store_basket_in_session(basket, request.session)
return basket
def get_basket(request, prepare=True):
"Get basket from the request."
if request.user.is_authenticated():
basket = get_user_basket(request.user)
else:
basket = get_anonymous_basket(request)
if basket is None:
basket = Basket.objects.create()
basket.save()
return prepare_basket(basket, request) if prepare else basket
def get_basket_id_from_session(request):
return request.session.get(settings.OSCAR_BASKET_COOKIE_OPEN)
def editable_baskets():
return Basket.objects.filter(status__in=["Open", "Saved"])
def get_anonymous_basket(request):
"Get basket from session."
basket_id = get_basket_id_from_session(request)
try:
basket = editable_baskets().get(pk=basket_id)
except Basket.DoesNotExist:
basket = None
return basket
def get_user_basket(user):
"get basket for a user."
try:
basket, __ = editable_baskets().get_or_create(owner=user)
except Basket.MultipleObjectsReturned:
# Not sure quite how we end up here with multiple baskets.
# We merge them and create a fresh one
old_baskets = list(editable_baskets().filter(owner=user))
basket = old_baskets[0]
for other_basket in old_baskets[1:]:
basket.merge(other_basket, add_quantities=False)
return basket
def store_basket_in_session(basket, session):
session[settings.OSCAR_BASKET_COOKIE_OPEN] = basket.pk
session.save()
def request_contains_basket(request, basket):
if basket.can_be_edited:
if request.user.is_authenticated():
return request.user == basket.owner
return get_basket_id_from_session(request) == basket.pk
return False
def flush_and_delete_basket(basket, using=None):
"Delete basket and all lines"
basket.flush()
basket.delete(using)
def request_contains_line(request, line):
basket = get_basket(request, prepare=False)
if basket and basket.pk == line.basket.pk:
return request_contains_basket(request, basket)
return False
def save_line_with_default_currency(line, *args, **kwargs):
if not line.price_currency:
line.price_currency = get_default_currency()
return line.save(*args, **kwargs)
| bsd-3-clause | 7,321,325,571,453,484,000 | 27.463235 | 72 | 0.675794 | false | 3.722115 | false | false | false |
mtils/ems | ems/qt/graphics/scene_manager.py | 1 | 7140 |
from ems.typehint import accepts
from ems.qt.event_hook_proxy import SignalEventHookProxy
from ems.qt import QtWidgets, QtGui, QtCore, QtPrintSupport
from ems.qt.graphics.graphics_scene import GraphicsScene, BackgroundCorrector
from ems.qt.graphics.graphics_widget import GraphicsWidget
from ems.qt.graphics.storage.interfaces import SceneStorageManager
from ems.qt.graphics.tool import GraphicsTool
from ems.qt.graphics.tool import GraphicsToolDispatcher
from ems.qt.graphics.text_tool import TextTool
from ems.qt.graphics.pixmap_tool import PixmapTool
from ems.qt.graphics.interfaces import Finalizer
from ems.qt.graphics.page_item import PageItemHider, PageItem
Qt = QtCore.Qt
QObject = QtCore.QObject
QRectF = QtCore.QRectF
pyqtProperty = QtCore.pyqtProperty
pyqtSlot = QtCore.pyqtSlot
QWidget = QtWidgets.QWidget
QVBoxLayout = QtWidgets.QVBoxLayout
QToolBar = QtWidgets.QToolBar
QSlider = QtWidgets.QSlider
QAction = QtWidgets.QAction
QKeySequence = QtGui.QKeySequence
QPrintPreviewDialog = QtPrintSupport.QPrintPreviewDialog
QPainter = QtGui.QPainter
class SceneManager(QObject):
def __init__(self, parent=None, storageManager=None):
super(SceneManager, self).__init__(parent)
self._scene = None
self._widget = None
self._tools = None
self._storageManager = None
self._importStorageManager = None
self._loadAction = None
self._saveAction = None
self._importAction = None
self._exportAction = None
self._actions = []
self._finalizers = [BackgroundCorrector(), PageItemHider()]
if storageManager:
self.setStorageManager(storageManager)
def actions(self):
if not self._actions:
self._populateActions()
return self._actions
def getScene(self):
if not self._scene:
self._scene = GraphicsScene()
self._scene.deleteRequested.connect(self.deleteIfWanted)
return self._scene
scene = pyqtProperty(GraphicsScene, getScene)
def getWidget(self):
if not self._widget:
self._widget = GraphicsWidget(scene=self.scene, tools=self.tools)
self._addActionsToWidget(self._widget)
self._widget.printPreviewRequested.connect(self.showPrintPreviewDialog)
return self._widget
widget = pyqtProperty(GraphicsWidget, getWidget)
def getTools(self):
if not self._tools:
self._tools = self._createTools()
return self._tools
tools = pyqtProperty(GraphicsTool, getTools)
def load(self, *args):
if self._storageManager:
return self._storageManager.load()
def save(self, *args):
if self._storageManager:
return self._storageManager.save()
def importScene(self, *args):
if self._importStorageManager:
return self._importStorageManager.load()
def exportScene(self, *args):
if self._importStorageManager:
return self._importStorageManager.save()
def getStorageManager(self):
return self._storageManager
@pyqtSlot(SceneStorageManager)
def setStorageManager(self, storageManager):
self._storageManager = storageManager
self._storageManager.setScene(self.scene)
self._storageManager.setTools(self.tools)
storageManager = pyqtProperty(SceneStorageManager, getStorageManager, setStorageManager)
def getImportStorageManager(self):
return self._importStorageManager
def setImportStorageManager(self, storageManager):
self._importStorageManager = storageManager
self._importStorageManager.setScene(self.scene)
self._importStorageManager.setTools(self.tools)
importStorageManager = pyqtProperty(SceneStorageManager, getImportStorageManager, setImportStorageManager)
@property
def loadAction(self):
if self._loadAction:
return self._loadAction
self._loadAction = QAction('Load', self.getWidget(), shortcut = QKeySequence.Open)
self._loadAction.triggered.connect(self.load)
return self._loadAction
@property
def saveAction(self):
if self._saveAction:
return self._saveAction
self._saveAction = QAction('Save', self.getWidget(), shortcut = QKeySequence.Save)
self._saveAction.triggered.connect(self.save)
return self._saveAction
@property
def importAction(self):
if self._importAction:
return self._importAction
self._importAction = QAction('Import', self.getWidget())
self._importAction.triggered.connect(self.importScene)
return self._importAction
@property
def exportAction(self):
if self._exportAction:
return self._exportAction
self._exportAction = QAction('Export', self.getWidget())
self._exportAction.triggered.connect(self.exportScene)
return self._exportAction
def printScene(self, printer, painter=None):
painter = painter if isinstance(painter, QPainter) else QPainter(printer)
for finalizer in self._finalizers:
finalizer.toFinalized(self.scene)
pageItem = self._findPageItem()
if pageItem:
self.scene.render(painter, QRectF(), pageItem.boundingRect())
else:
self.scene.render(painter)
for finalizer in self._finalizers:
finalizer.toEditable(self.scene)
def showPrintPreviewDialog(self):
margin = 30
parent = self.getWidget()
self.printPrvDlg = QPrintPreviewDialog(parent)
self.printPrvDlg.setWindowTitle(u'Druckvorschau')
self.printPrvDlg.paintRequested.connect(self.printScene)
self.printPrvDlg.resize(parent.width()-margin, parent.height()-margin)
self.printPrvDlg.show()
def deleteIfWanted(self):
items = self.scene.selectedItems()
if not len(items):
return
for item in items:
self.scene.removeItem(item)
@accepts(Finalizer)
def addFinalizer(self, finalizer):
self._finalizers.append(finalizer)
def hasFinalizer(self, finalizer):
return finalizer in self._finalizers
def finalizer(self, cls):
for finalizer in self._finalizers:
if isinstance(finalizer, cls):
return finalizer
def _createTools(self):
tools = GraphicsToolDispatcher(self)
tools.setScene(self.scene)
textTool = TextTool()
tools.addTool(textTool)
pixmapTool = PixmapTool()
tools.addTool(pixmapTool)
return tools
def _populateActions(self):
if self._actions:
return
self._actions.append(self.loadAction)
self._actions.append(self.saveAction)
self._actions.append(self.importAction)
self._actions.append(self.exportAction)
def _addActionsToWidget(self, widget):
for action in self.actions():
widget.addAction(action)
def _findPageItem(self):
for item in self.scene.items():
if isinstance(item, PageItem):
return item | mit | 2,377,236,452,932,278,300 | 32.683962 | 110 | 0.67605 | false | 4.170561 | false | false | false |
pgmillon/ansible | lib/ansible/modules/database/postgresql/postgresql_tablespace.py | 1 | 16280 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_tablespace
short_description: Add or remove PostgreSQL tablespaces from remote hosts
description:
- Adds or removes PostgreSQL tablespaces from remote hosts
U(https://www.postgresql.org/docs/current/sql-createtablespace.html),
U(https://www.postgresql.org/docs/current/manage-ag-tablespaces.html).
version_added: '2.8'
options:
tablespace:
description:
- Name of the tablespace to add or remove.
required: true
type: str
aliases:
- name
location:
description:
- Path to the tablespace directory in the file system.
- Ensure that the location exists and has right privileges.
type: path
aliases:
- path
state:
description:
- Tablespace state.
- I(state=present) implies the tablespace must be created if it doesn't exist.
- I(state=absent) implies the tablespace must be removed if present.
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
- See the Notes section for information about check mode restrictions.
type: str
default: present
choices: [ absent, present ]
owner:
description:
- Name of the role to set as an owner of the tablespace.
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
type: str
set:
description:
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
- When reset is passed as an option's value, if the option was set previously, it will be removed
U(https://www.postgresql.org/docs/current/sql-altertablespace.html).
type: dict
rename_to:
description:
- New name of the tablespace.
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
notes:
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
can not be run inside the transaction block.
author:
- Flavien Chantelot (@Dorn-)
- Antoine Levy-Lambert (@antoinell)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create a new tablespace called acme and set bob as an its owner
postgresql_tablespace:
name: acme
owner: bob
location: /data/foo
- name: Create a new tablespace called bar with tablespace options
postgresql_tablespace:
name: bar
set:
random_page_cost: 1
seq_page_cost: 1
- name: Reset random_page_cost option
postgresql_tablespace:
name: bar
set:
random_page_cost: reset
- name: Rename the tablespace from bar to pcie_ssd
postgresql_tablespace:
name: bar
rename_to: pcie_ssd
- name: Drop tablespace called bloat
postgresql_tablespace:
name: bloat
state: absent
'''
RETURN = r'''
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
tablespace:
description: Tablespace name.
returned: always
type: str
sample: 'ssd'
owner:
description: Tablespace owner.
returned: always
type: str
sample: 'Bob'
options:
description: Tablespace options.
returned: always
type: dict
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
location:
description: Path to the tablespace in the file system.
returned: always
type: str
sample: '/incredible/fast/ssd'
newname:
description: New tablespace name
returned: if existent
type: str
sample: new_ssd
state:
description: Tablespace state at the end of execution.
returned: always
type: str
sample: 'present'
'''
try:
from psycopg2 import __version__ as PSYCOPG2_VERSION
from psycopg2.extras import DictCursor
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import pg_quote_identifier
from ansible.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
class PgTablespace(object):
"""Class for working with PostgreSQL tablespaces.
Args:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
Attrs:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
exists (bool) -- flag the tablespace exists in the DB or not
owner (str) -- tablespace owner
location (str) -- path to the tablespace directory in the file system
executed_queries (list) -- list of executed queries
new_name (str) -- new name for the tablespace
opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.owner = ''
self.settings = {}
self.location = ''
self.executed_queries = []
self.new_name = ''
self.opt_not_supported = False
# Collect info:
self.get_info()
def get_info(self):
"""Get tablespace information."""
# Check that spcoptions exists:
opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'", add_to_executed=False)
# For 9.1 version and earlier:
location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spclocation'", add_to_executed=False)
if location:
location = 'spclocation'
else:
location = 'pg_tablespace_location(t.oid)'
if not opt:
self.opt_not_supported = True
query = ("SELECT r.rolname, (SELECT Null), %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
else:
query = ("SELECT r.rolname, t.spcoptions, %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
res = exec_sql(self, query, add_to_executed=False)
if not res:
self.exists = False
return False
if res[0][0]:
self.exists = True
self.owner = res[0][0]
if res[0][1]:
# Options exist:
for i in res[0][1]:
i = i.split('=')
self.settings[i[0]] = i[1]
if res[0][2]:
# Location exists:
self.location = res[0][2]
def create(self, location):
"""Create tablespace.
Return True if success, otherwise, return False.
args:
location (str) -- tablespace directory path in the FS
"""
query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location))
return exec_sql(self, query, ddl=True)
def drop(self):
"""Drop tablespace.
Return True if success, otherwise, return False.
"""
return exec_sql(self, "DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True)
def set_owner(self, new_owner):
"""Set tablespace owner.
Return True if success, otherwise, return False.
args:
new_owner (str) -- name of a new owner for the tablespace"
"""
if new_owner == self.owner:
return False
query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner)
return exec_sql(self, query, ddl=True)
def rename(self, newname):
"""Rename tablespace.
Return True if success, otherwise, return False.
args:
newname (str) -- new name for the tablespace"
"""
query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname)
self.new_name = newname
return exec_sql(self, query, ddl=True)
def set_settings(self, new_settings):
"""Set tablespace settings (options).
If some setting has been changed, set changed = True.
After all settings list is handling, return changed.
args:
new_settings (list) -- list of new settings
"""
# settings must be a dict {'key': 'value'}
if self.opt_not_supported:
return False
changed = False
# Apply new settings:
for i in new_settings:
if new_settings[i] == 'reset':
if i in self.settings:
changed = self.__reset_setting(i)
self.settings[i] = None
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
return changed
def __reset_setting(self, setting):
"""Reset tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return exec_sql(self, query, ddl=True)
def __set_setting(self, setting):
"""Set tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
tablespace=dict(type='str', aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
location=dict(type='path', aliases=['path']),
owner=dict(type='str'),
set=dict(type='dict'),
rename_to=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
tablespace = module.params["tablespace"]
state = module.params["state"]
location = module.params["location"]
owner = module.params["owner"]
rename_to = module.params["rename_to"]
settings = module.params["set"]
if state == 'absent' and (location or owner or rename_to or settings):
module.fail_json(msg="state=absent is mutually exclusive location, "
"owner, rename_to, and set")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Change autocommit to False if check_mode:
if module.check_mode:
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=False)
else:
db_connection.set_isolation_level(READ_COMMITTED)
# Set defaults:
autocommit = False
changed = False
##############
# Create PgTablespace object and do main job:
tblspace = PgTablespace(module, cursor, tablespace)
# If tablespace exists with different location, exit:
if tblspace.exists and location and location != tblspace.location:
module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location))
# Create new tablespace:
if not tblspace.exists and state == 'present':
if rename_to:
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
if not location:
module.fail_json(msg="'location' parameter must be passed with "
"state=present if the tablespace doesn't exist")
# Because CREATE TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.create(location)
# Drop non-existing tablespace:
elif not tblspace.exists and state == 'absent':
# Nothing to do:
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
# Drop existing tablespace:
elif tblspace.exists and state == 'absent':
# Because DROP TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.drop()
# Rename tablespace:
elif tblspace.exists and rename_to:
if tblspace.name != rename_to:
changed = tblspace.rename(rename_to)
if state == 'present':
# Refresh information:
tblspace.get_info()
# Change owner and settings:
if state == 'present' and tblspace.exists:
if owner:
changed = tblspace.set_owner(owner)
if settings:
changed = tblspace.set_settings(settings)
tblspace.get_info()
# Rollback if it's possible and check_mode:
if not autocommit:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
tablespace=tblspace.name,
owner=tblspace.owner,
queries=tblspace.executed_queries,
options=tblspace.settings,
location=tblspace.location,
)
if state == 'present':
kw['state'] = 'present'
if tblspace.new_name:
kw['newname'] = tblspace.new_name
elif state == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,845,519,248,354,585,000 | 31.047244 | 120 | 0.615602 | false | 3.92952 | false | false | false |
inspirehep/invenio-formatter | invenio_formatter/models.py | 1 | 1670 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Database cache for formatter."""
from invenio_ext.sqlalchemy import db
class Bibfmt(db.Model):
"""Represent a Bibfmt record."""
__tablename__ = 'bibfmt'
id_bibrec = db.Column(
db.MediumInteger(8, unsigned=True),
nullable=False,
server_default='0',
primary_key=True,
autoincrement=False)
format = db.Column(
db.String(10),
nullable=False,
server_default='',
primary_key=True,
index=True)
kind = db.Column(
db.String(10),
nullable=False,
server_default='',
index=True
)
last_updated = db.Column(
db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00',
index=True)
value = db.Column(db.iLargeBinary)
needs_2nd_pass = db.Column(db.TinyInteger(1), server_default='0')
__all__ = ('Bibfmt', )
| gpl-2.0 | -1,471,349,621,209,438,200 | 26.377049 | 74 | 0.651497 | false | 3.8041 | false | false | false |
Polytechnique-org/xorgauth | xorgauth/accounts/migrations/0012_make_user_names_blank.py | 1 | 1028 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-04 13:41
from __future__ import unicode_literals
from django.db import migrations, models
import xorgauth.utils.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_make_user_ids_blank'),
]
operations = [
migrations.AlterField(
model_name='user',
name='firstname',
field=xorgauth.utils.fields.UnboundedCharField(blank=True, null=True, verbose_name='first name'),
),
migrations.AlterField(
model_name='user',
name='lastname',
field=xorgauth.utils.fields.UnboundedCharField(blank=True, null=True, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='sex',
field=models.CharField(blank=True, choices=[('male', 'Male'), ('female', 'Female')], max_length=6,
null=True, verbose_name='sex'),
),
]
| agpl-3.0 | 2,887,847,014,832,465,000 | 31.125 | 110 | 0.577821 | false | 4.063241 | false | false | false |
Souloist/Audio-Effects | Effects/Amplitude_Modulation/AM_example.py | 1 | 1911 | # Play a wave file with amplitude modulation.
# Assumes wave file is mono.
# This implementation reads and plays a one frame (sample) at a time (no blocking)
"""
Read a signal from a wave file, do amplitude modulation, play to output
Original: pyrecplay_modulation.py by Gerald Schuller, Octtober 2013
Modified to read a wave file - Ivan Selesnick, September 2015
"""
# f0 = 0 # Normal audio
f0 = 400 # 'Duck' audio
import pyaudio
import struct
import wave
import math
# Open wave file (mono)
input_wavefile = 'author.wav'
# input_wavefile = 'sin01_mono.wav'
# input_wavefile = 'sin01_stereo.wav'
wf = wave.open( input_wavefile, 'rb')
RATE = wf.getframerate()
WIDTH = wf.getsampwidth()
LEN = wf.getnframes()
CHANNELS = wf.getnchannels()
print 'The sampling rate is {0:d} samples per second'.format(RATE)
print 'Each sample is {0:d} bytes'.format(WIDTH)
print 'The signal is {0:d} samples long'.format(LEN)
print 'The signal has {0:d} channel(s)'.format(CHANNELS)
# Open audio stream
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(WIDTH),
channels = 1,
rate = RATE,
input = False,
output = True)
print('* Playing...')
# Loop through wave file
for n in range(0, LEN):
# Get sample from wave file
input_string = wf.readframes(1)
# Convert binary string to tuple of numbers
input_tuple = struct.unpack('h', input_string)
# (h: two bytes per sample (WIDTH = 2))
# Use first value (of two if stereo)
input_value = input_tuple[0]
# Amplitude modulation (f0 Hz cosine)
output_value = input_value * math.cos(2*math.pi*f0*n/RATE)
# Convert value to binary string
output_string = struct.pack('h', output_value)
# Write binary string to audio output stream
stream.write(output_string)
print('* Done')
stream.stop_stream()
stream.close()
p.terminate()
| mit | -6,821,632,004,709,263,000 | 26.695652 | 82 | 0.6719 | false | 3.255537 | false | false | false |
mattpitkin/GraWIToNStatisticsLectures | figures/scripts/pvalue.py | 1 | 1242 | #!/usr/bin/env python
"""
Make plots showing how to calculate the p-value
"""
import matplotlib.pyplot as pl
from scipy.stats import norm
from scipy.special import erf
import numpy as np
mu = 0. # the mean, mu
sigma = 1. # standard deviation
x = np.linspace(-4, 4, 1000) # x
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(7,4), dpi=100)
# value of x for calculating p-value
Z = 1.233
y = norm.pdf(x, mu, sigma)
# plot pdfs
pl.plot(x, y, 'r')
pl.plot([-Z, -Z], [0., np.max(y)], 'k--')
pl.plot([Z, Z], [0., np.max(y)], 'k--')
pl.fill_between(x, np.zeros(len(x)), y, where=x<=-Z, facecolor='green', interpolate=True, alpha=0.6)
pl.fill_between(x, np.zeros(len(x)), y, where=x>=Z, facecolor='green', interpolate=True, alpha=0.6)
pvalue = 1.-erf(Z/np.sqrt(2.))
ax = pl.gca()
ax.set_xlabel('$Z$', fontsize=14)
ax.set_ylabel('$p(Z)$', fontsize=14)
ax.set_xlim(-4, 4)
ax.grid(True)
ax.text(Z+0.1, 0.3, '$Z_{\\textrm{obs}} = 1.233$', fontsize=16)
ax.text(-3.6, 0.31, '$p$-value$= %.2f$' % pvalue, fontsize=18,
bbox={'facecolor': 'none', 'pad':12, 'ec': 'r'})
fig.subplots_adjust(bottom=0.15)
pl.savefig('../pvalue.pdf')
pl.show()
| mit | 2,189,113,514,410,378,800 | 22.884615 | 100 | 0.625604 | false | 2.425781 | false | false | false |
mozvip/Sick-Beard | sickbeard/logger.py | 1 | 6374 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import threading
import logging
import sickbeard
from sickbeard import classes
# number of log files to keep
NUM_LOGS = 3
# log size in bytes
LOG_SIZE = 10000000 # 10 megs
ERROR = logging.ERROR
WARNING = logging.WARNING
MESSAGE = logging.INFO
DEBUG = logging.DEBUG
reverseNames = {u'ERROR': ERROR,
u'WARNING': WARNING,
u'INFO': MESSAGE,
u'DEBUG': DEBUG}
class SBRotatingLogHandler(object):
def __init__(self, log_file, num_files, num_bytes):
self.num_files = num_files
self.num_bytes = num_bytes
self.log_file = log_file
self.cur_handler = None
self.writes_since_check = 0
self.log_lock = threading.Lock()
def initLogging(self, consoleLogging=True):
self.log_file = os.path.join(sickbeard.LOG_DIR, self.log_file)
self.cur_handler = self._config_handler()
logging.getLogger('sickbeard').addHandler(self.cur_handler)
logging.getLogger('subliminal').addHandler(self.cur_handler)
# define a Handler which writes INFO messages or higher to the sys.stderr
if consoleLogging:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'))
# add the handler to the root logger
logging.getLogger('sickbeard').addHandler(console)
logging.getLogger('subliminal').addHandler(console)
logging.getLogger('sickbeard').setLevel(logging.DEBUG)
logging.getLogger('subliminal').setLevel(logging.ERROR)
def _config_handler(self):
"""
Configure a file handler to log at file_name and return it.
"""
file_handler = logging.FileHandler(self.log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%b-%d %H:%M:%S'))
return file_handler
def _log_file_name(self, i):
"""
Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends
it to the extension (blah.log.3 for i == 3)
i: Log number to ues
"""
return self.log_file + ('.' + str(i) if i else '')
def _num_logs(self):
"""
Scans the log folder and figures out how many log files there are already on disk
Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1
"""
cur_log = 0
while os.path.isfile(self._log_file_name(cur_log)):
cur_log += 1
return cur_log - 1
def _rotate_logs(self):
sb_logger = logging.getLogger('sickbeard')
subli_logger = logging.getLogger('subliminal')
# delete the old handler
if self.cur_handler:
self.cur_handler.flush()
self.cur_handler.close()
sb_logger.removeHandler(self.cur_handler)
subli_logger.removeHandler(self.cur_handler)
# rename or delete all the old log files
for i in range(self._num_logs(), -1, -1):
cur_file_name = self._log_file_name(i)
try:
if i >= NUM_LOGS:
os.remove(cur_file_name)
else:
os.rename(cur_file_name, self._log_file_name(i+1))
except WindowsError:
pass
# the new log handler will always be on the un-numbered .log file
new_file_handler = self._config_handler()
self.cur_handler = new_file_handler
sb_logger.addHandler(new_file_handler)
subli_logger.addHandler(new_file_handler)
def log(self, toLog, logLevel=MESSAGE):
with self.log_lock:
# check the size and see if we need to rotate
if self.writes_since_check >= 10:
if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) >= LOG_SIZE:
self._rotate_logs()
self.writes_since_check = 0
else:
self.writes_since_check += 1
meThread = threading.currentThread().getName()
message = meThread + u" :: " + toLog
out_line = message.encode('utf-8')
sb_logger = logging.getLogger('sickbeard')
try:
if logLevel == DEBUG:
sb_logger.debug(out_line)
elif logLevel == MESSAGE:
sb_logger.info(out_line)
elif logLevel == WARNING:
sb_logger.warning(out_line)
elif logLevel == ERROR:
sb_logger.error(out_line)
# add errors to the UI logger
classes.ErrorViewer.add(classes.UIError(message))
else:
sb_logger.log(logLevel, out_line)
except ValueError:
pass
sb_log_instance = SBRotatingLogHandler('sickbeard.log', NUM_LOGS, LOG_SIZE)
def log(toLog, logLevel=MESSAGE):
sb_log_instance.log(toLog, logLevel) | gpl-3.0 | -7,407,349,847,844,336,000 | 32.464865 | 118 | 0.568089 | false | 4.067645 | false | false | false |
c2corg/v6_api | c2corg_api/search/mappings/image_mapping.py | 1 | 1144 | from c2corg_api.models.image import IMAGE_TYPE, Image
from c2corg_api.search.mapping import SearchDocument, BaseMeta
from c2corg_api.search.mapping_types import QueryableMixin, QEnumArray, \
QInteger, QDate
class SearchImage(SearchDocument):
class Meta(BaseMeta):
doc_type = IMAGE_TYPE
activities = QEnumArray(
'act', model_field=Image.activities)
categories = QEnumArray(
'cat', model_field=Image.categories)
image_type = QEnumArray(
'ityp', model_field=Image.image_type)
elevation = QInteger(
'ialt', range=True)
date_time = QDate('idate', 'date_time')
FIELDS = [
'activities', 'categories', 'image_type', 'elevation', 'date_time'
]
@staticmethod
def to_search_document(document, index):
search_document = SearchDocument.to_search_document(document, index)
if document.redirects_to:
return search_document
SearchDocument.copy_fields(
search_document, document, SearchImage.FIELDS)
return search_document
SearchImage.queryable_fields = QueryableMixin.get_queryable_fields(SearchImage)
| agpl-3.0 | 6,425,518,314,371,161,000 | 29.105263 | 79 | 0.681818 | false | 3.726384 | false | false | false |
ar4s/django | django/forms/widgets.py | 1 | 32506 | """
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
from itertools import chain
import warnings
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.html import conditional_escape, format_html
from django.utils.translation import ugettext_lazy
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import datetime_safe, formats, six
from django.utils.six.moves.urllib.parse import urljoin
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput',
'EmailInput', 'URLInput', 'NumberInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput',
'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget',
'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget',
)
MEDIA_TYPES = ('css','js')
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
# Any leftover attributes must be invalid.
# if media_attrs != {}:
# raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys()))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [format_html('<script type="text/javascript" src="{0}"></script>', self.absolute_path(path)) for path in self._js]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[
[format_html('<link href="{0}" type="text/css" media="{1}" rel="stylesheet" />', self.absolute_path(path), medium)
for path in self._css[medium]]
for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend == True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases,
attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{0} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value: value=None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
is_hidden = True
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id', None)
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{0} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name, None)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = '%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s'
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
url_markup_template = '<a href="{0}">{1}</a>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = format_html(self.url_markup_template,
value.url,
force_text(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{0}>\r\n{1}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateInput(TextInput):
def __init__(self, attrs=None, format=None):
super(DateInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATE_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_date(value)
return value.strftime(self.format)
return value
class DateTimeInput(TextInput):
def __init__(self, attrs=None, format=None):
super(DateTimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_datetime(value)
return value.strftime(self.format)
return value
class TimeInput(TextInput):
def __init__(self, attrs=None, format=None):
super(TimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('TIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
return value.strftime(self.format)
return value
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{0} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{0}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value == None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{0}"{1}>{2}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{0}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value, None)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{0}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{0}"', self.id_for_label)
else:
label_for = ''
return format_html('<label{0}>{1} {2}</label>', label_for, self.tag(), self.choice_label)
def is_checked(self):
return self.value == self.choice_value
def tag(self):
final_attrs = dict(self.attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{0} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class RadioInput(RadioChoiceInput):
def __init__(self, *args, **kwargs):
msg = "RadioInput has been deprecated. Use RadioChoiceInput instead."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
super(RadioInput, self).__init__(*args, **kwargs)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id', None)
start_tag = format_html('<ul id="{0}">', id_) if id_ else '<ul>'
output = [start_tag]
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple,list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{0}'.format(i)
sub_ul_renderer = ChoiceFieldRenderer(name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html('<li>{0}{1}</li>', choice_value,
sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html('<li>{0}</li>', force_text(w)))
output.append('</ul>')
return mark_safe('\n'.join(output))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
is_hidden = True
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
widget.is_hidden = True
| bsd-3-clause | -8,474,089,965,727,551,000 | 35.813137 | 130 | 0.596628 | false | 4.139837 | false | false | false |
gift-surg/GIFT-Grab | src/tests/blackmagic/stereo_capture.py | 1 | 4325 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Example demonstrating how stereo video frames can be captured
using a frame grabber card that supports this feature.
"""
import time
import cv2
import numpy as np
from pygiftgrab import (IObserver, VideoSourceFactory,
ColourSpace, Device, VideoFrame)
class StereoFrameSaver(IObserver):
"""
Simple class that demonstrates how mono and stereo frames,
and their respective parameters can be queried and the actual
frame data can be saved using the GIFT-Grab stereo API.
"""
def __init__(self):
super(StereoFrameSaver, self).__init__()
self.current = 0
def update(self, frame):
self.current += 1
# 4 is the number of variations of stereo/mono
# calls to the data method, using it here as well to
# avoid flooding the user's terminal
if self.current <= 4:
# display number of stereo frames, should be 2
# for this device
print(
'Got {} stereo frames'.format(
frame.stereo_count()
)
)
# display length of data of each stereo frame,
# each stereo frame should consist of same number
# of bytes for this device
print(
'Stereo data length (bytes):\n'
'\tdata_length(): {}\n'
'\tdata_length(0): {}\n'
'\tdata_length(1): {}\n'.format(
frame.data_length(), frame.data_length(0),
frame.data_length(1)
)
)
frame_shape = (frame.rows(), frame.cols(), 4)
# the slicing below, i.e. [:, :, :3], is due to OpenCV's
# imwrite expecting BGR data, so we strip out the alpha
# channel of each frame when saving it
if self.current == 1:
# all three calls below save the same frame,
# that is the first of the two stereo frames
cv2.imwrite(
'mono-frame.data.png',
np.reshape(frame.data(), frame_shape)[:, :, :3]
)
cv2.imwrite(
'mono-frame.data-False.png',
np.reshape(frame.data(False), frame_shape)[:, :, :3]
)
cv2.imwrite(
'mono-frame.data-False-0.png',
np.reshape(frame.data(False, 0), frame_shape)[:, :, :3]
)
elif self.current == 2:
# the two calls below save the two stereo frames,
# however the data needs to be reshaped, as the
# call to the data method yields a flat NumPy array
cv2.imwrite(
'stereo-frame.data-False-0.png',
np.reshape(frame.data(False, 0), frame_shape)[:, :, :3]
)
cv2.imwrite(
'stereo-frame.data-False-1.png',
np.reshape(frame.data(False, 1), frame_shape)[:, :, :3]
)
elif self.current == 3:
# the two calls below save the two stereo frames,
# without the need for reshaping the data, as the
# call to the data method already yields a
# structured NumPy array
cv2.imwrite(
'mono-frame.data-True.png',
frame.data(True)[:, :, :3]
)
cv2.imwrite(
'mono-frame.data-True-0.png',
frame.data(True, 0)[:, :, :3]
)
elif self.current == 4:
# the two calls below save the two stereo frames,
# without the need for reshaping the data, as the
# call to the data method already yields a
# structured NumPy array
cv2.imwrite(
'stereo-frame.data-True-0.png',
frame.data(True, 0)[:, :, :3]
)
cv2.imwrite(
'stereo-frame.data-True-1.png',
frame.data(True, 1)[:, :, :3]
)
if __name__ == '__main__':
sfac = VideoSourceFactory.get_instance()
source = sfac.get_device(
Device.DeckLink4KExtreme12G, ColourSpace.BGRA
)
saver = StereoFrameSaver()
source.attach(saver)
time.sleep(2) # operate pipeline for 2 sec
source.detach(saver)
| bsd-3-clause | -5,324,610,510,568,604,000 | 32.269231 | 71 | 0.52 | false | 4.080189 | false | false | false |
arcade-lab/tia-infrastructure | tools/simulator/system.py | 1 | 9352 | """
Top-level system wrapper.
"""
import re
import sys
import pandas as pd
from simulator.exception import SimulatorException
class System:
"""
A system class to wrap a collection of processing and memory elements as well as the channels through which they
communicate.
"""
def __init__(self):
"""
Empty system.
"""
# Start at the zeroth cycle, and initialize system elements as empty lists to allow for appends.
self.cycle = 0
self.processing_elements = []
self.memories = []
self.buffers = []
# Add hierarchical elements for easier access.
self.quartets = []
self.blocks = []
self.arrays = []
# --- Time-stepping Method ---
def iterate(self, interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace):
"""
Move ahead one clock cycle, period or whatever you want to call it (this is a functional simulator).
:param interactive: waiting on the user at each cycle
:param show_processing_elements: showing processing element information
:param show_memories: showing memory element information
:param show_buffers: showing channel information
:return: whether the system has halted
"""
# Initially, assume the system is halting this cycle.
halt = True
# Print out a debug header, if requested.
if interactive or show_processing_elements or show_memories or show_buffers:
print(f"\n--- Cycle: {self.cycle} ---\n")
# Perform local processing element operations.
if show_processing_elements:
print("Processing Elements\n")
for processing_element in self.processing_elements:
processing_element.iterate(show_processing_elements, keep_execution_trace)
for processing_element in self.processing_elements:
halt &= processing_element.core.halt_register # Only halt if all processing elements have halted.
# Perform memory operations.
if show_memories:
print("Memories\n")
for memory in self.memories:
memory.iterate(show_memories)
# Commit all pending buffer transactions.
if show_buffers:
print("Buffers\n")
for buffer in self.buffers:
buffer.commit(show_buffers)
halt &= buffer.empty # Only halt the system if all buffers are empty.
# Move time forward assuming we are not halting.
if not halt:
self.cycle += 1
# Return whether we should halt.
return halt
# --- Display Methods ---
def halt_message(self):
"""
Print a message showing the state of the system upon halting.
"""
# Formatted message.
print(f"\n--- System halted after {self.cycle} cycles. ---\n")
print("Final Memory Layout\n")
for memory in self.memories:
print(f"name: {memory.name}")
print("contents:")
i = 0
while i < 10:
if i < len(memory.contents):
print(f"0x{memory.contents[i]:08x}")
else:
break
i += 1
if len(memory.contents) > 10:
print("...\n")
else:
print("bound\n")
def interrupted_message(self):
"""
Print a message showing the state of the system upon being interrupted by the user in a simulation.
:param self: system wrapper
"""
# Formatted message.
print(f"\n--- System interrupted after {self.cycle} cycles. ---\n")
print("Final Memory Layout\n")
for memory in self.memories:
print(f"name: {memory.name}")
print("contents:")
i = 0
while i < 10:
if i < len(memory.contents):
print(f"0x{memory.contents[i]:08x}")
else:
break
i += 1
if len(memory.contents) > 10:
print("...\n")
else:
print("bound\n")
# --- Top-level Methods ---
def register(self, element):
"""
Register a functional unit (processing element, memory, etc.) with the event loop.
:param element: functional unit
"""
# Make sure the functional unit has a special registration method.
registration_operation = getattr(element, "_register")
if not callable(registration_operation):
exception_string = f"The functional unit of type {type(element)} does not have internal system " \
+ f"registration method."
raise SimulatorException(exception_string)
# Call the functional unit's internal method.
element._register(self)
def finalize(self):
"""
Alphabetize components in the event loop for clean debug output and make sure all processing elements are
indexed.
"""
# The numerical strings are the ones we care about.
def natural_number_sort_key(entity):
name = entity.name
key_string_list = re.findall(r"(\d+)", name)
if len(key_string_list) > 0:
return [int(key_string) for key_string in key_string_list]
else:
return []
# Sort all the entities.
self.processing_elements = sorted(self.processing_elements, key=natural_number_sort_key)
for i, processing_element in enumerate(self.processing_elements):
if processing_element.name != f"processing_element_{i}":
exception_string = f"Missing processing element {i}."
raise SimulatorException(exception_string)
self.memories = sorted(self.memories, key=natural_number_sort_key)
self.buffers = sorted(self.buffers, key=natural_number_sort_key)
def run(self, interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace):
"""
Execute until the system halts or a user issues an interrupt or writes an EOF.
:param interactive: whether to wait for user input on each cycle
:param show_processing_elements: whether to show processing element status each cycle
:param show_memories: whether to show a summary of the memory contents each cycle
:param show_buffers: whether to show channel state each cycle
:param keep_execution_trace: whether to keep a running log of executed instructions on each processing element
:return: whether the system has halted and whether it was interrupted
"""
# Simple event/read-evaluate loop.
halt = False
interrupted = False
while True:
try:
if interactive:
if self.cycle > 0:
user_input = input("Press [Enter] to continue. Type \"exit\", or use [Ctrl-C] o [Ctrl-D] to "
+ "exit.\n").strip()
if user_input == "exit":
break
elif user_input != "":
print(f"Unrecognized command: {user_input}.", file=sys.stderr)
halt = self.iterate(interactive,
show_processing_elements,
show_memories,
show_buffers,
keep_execution_trace)
if halt:
self.halt_message()
break
except (KeyboardInterrupt, EOFError):
interrupted = True
self.interrupted_message()
break
# Return the status flags.
return halt, interrupted
def reset_processing_elements(self):
"""
Reset all the processing elements in a system.
"""
# Use the reset() methods built in to the processing elements.
for processing_element in self.processing_elements:
processing_element.reset()
def reset_memories(self):
"""
Reset all the memories in a system.
"""
# Use the reset() methods built in to the memories.
for memory in self.memories:
memory.reset()
def reset_buffers(self):
"""
Reset all the buffers in a system.
"""
# Use the buffers' own reset() methods.
for buffer in self.buffers:
buffer.reset()
def reset(self):
"""
Reset all the processing elements, memories and buffers.
"""
# Just wrap our own methods.
self.reset_processing_elements()
self.reset_memories()
self.reset_buffers()
@property
def processing_element_traces(self):
# Return a dictionary of execution traces.
return {processing_element.name: processing_element.core.execution_trace
for processing_element in self.processing_elements}
@property
def processing_element_traces_as_data_frame(self):
# For convenient CSV output and analysis.
return pd.DataFrame(self.processing_element_traces)
| mit | 3,835,425,596,161,252,000 | 34.558935 | 118 | 0.572391 | false | 4.752033 | false | false | false |
Mariaanisimova/pythonintask | IVTp/2014/Shcherbakov_R_A/task_12_22.py | 1 | 1719 | # Задача 12. Вариант 22.
# Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python
# гл. 6).
# Щербаков Р.А.
# 22.05.2016
print("""
Добро пожаловать на игру крестики нолики
чтобы сделать ход введите число от 0 до 8
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8""")
doska=["-","-","-","-","-","-","-","-","-"]
bol=True
wins=False
schet=0
def disp(doska):
print("\n\t"+doska[0]+" | "+doska[1]+" | "+doska[2]+"\n\t---------"+
"\n\t"+doska[3]+" | "+doska[4]+" | "+doska[5]+"\n\t---------"+
"\n\t"+doska[6]+" | "+doska[7]+" | "+doska[8]+"\n\t---------")
def win(doska):
twin=((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))
for row in twin:
if doska[row[0]]==doska[row[1]]==doska[row[2]]!="-":
return True
while wins!=True:
if(schet==5):
break
if(bol):
n1=input("\nХод игрока 1: ")
if(doska[int(n1)]=="-"):
doska[int(n1)]="X"
disp(doska)
bol=False
wins=win(doska)
schet+=1
else:
print("Занято")
else:
n2=input("\nХод игрока 2: ")
if(doska[int(n2)]=="-"):
doska[int(n2)]="O"
disp(doska)
bol=True
wins=win(doska)
else:
print("Занято")
if(wins and bol):
print("Победил игрок 2")
elif(wins and not bol):
print("Победил игрок 1")
else:
print("Ничья")
input("Ok") | apache-2.0 | 7,851,925,064,230,096,000 | 25.137931 | 76 | 0.446205 | false | 2.092541 | false | false | false |
muff1nman/duplicity | duplicity/manifest.py | 1 | 16791 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <[email protected]>
# Copyright 2007 Kenneth Loafman <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Create and edit manifest for session contents"""
from future_builtins import filter
import re
from duplicity import log
from duplicity import globals
from duplicity import util
class ManifestError(Exception):
"""
Exception raised when problem with manifest
"""
pass
class Manifest:
"""
List of volumes and information about each one
"""
def __init__(self, fh=None):
"""
Create blank Manifest
@param fh: fileobj for manifest
@type fh: DupPath
@rtype: Manifest
@return: manifest
"""
self.hostname = None
self.local_dirname = None
self.volume_info_dict = {} # dictionary vol numbers -> vol infos
self.fh = fh
self.files_changed = []
def set_dirinfo(self):
"""
Set information about directory from globals,
and write to manifest file.
@rtype: Manifest
@return: manifest
"""
self.hostname = globals.hostname
self.local_dirname = globals.local_path.name # @UndefinedVariable
if self.fh:
if self.hostname:
self.fh.write("Hostname %s\n" % self.hostname)
if self.local_dirname:
self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
return self
def check_dirinfo(self):
"""
Return None if dirinfo is the same, otherwise error message
Does not raise an error message if hostname or local_dirname
are not available.
@rtype: string
@return: None or error message
"""
if globals.allow_source_mismatch:
return
if self.hostname and self.hostname != globals.hostname:
errmsg = _("Fatal Error: Backup source host has changed.\n"
"Current hostname: %s\n"
"Previous hostname: %s") % (globals.hostname, self.hostname)
code = log.ErrorCode.hostname_mismatch
code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname))
elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable
errmsg = _("Fatal Error: Backup source directory has changed.\n"
"Current directory: %s\n"
"Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable
code = log.ErrorCode.source_dir_mismatch
code_extra = "%s %s" % (util.escape(globals.local_path.name), util.escape(self.local_dirname)) # @UndefinedVariable
else:
return
log.FatalError(errmsg + "\n\n" +
_("Aborting because you may have accidentally tried to "
"backup two different data sets to the same remote "
"location, or using the same archive directory. If "
"this is not a mistake, use the "
"--allow-source-mismatch switch to avoid seeing this "
"message"), code, code_extra)
def set_files_changed_info(self, files_changed):
if files_changed:
self.files_changed = files_changed
if self.fh:
self.fh.write("Filelist %d\n" % len(self.files_changed))
for fileinfo in self.files_changed:
self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])))
def add_volume_info(self, vi):
"""
Add volume info vi to manifest and write to manifest
@param vi: volume info to add
@type vi: VolumeInfo
@return: void
"""
vol_num = vi.volume_number
self.volume_info_dict[vol_num] = vi
if self.fh:
self.fh.write(vi.to_string() + "\n")
def del_volume_info(self, vol_num):
"""
Remove volume vol_num from the manifest
@param vol_num: volume number to delete
@type vi: int
@return: void
"""
try:
del self.volume_info_dict[vol_num]
except Exception:
raise ManifestError("Volume %d not present in manifest" % (vol_num,))
def to_string(self):
"""
Return string version of self (just concatenate vi strings)
@rtype: string
@return: self in string form
"""
result = ""
if self.hostname:
result += "Hostname %s\n" % self.hostname
if self.local_dirname:
result += "Localdir %s\n" % Quote(self.local_dirname)
result += "Filelist %d\n" % len(self.files_changed)
for fileinfo in self.files_changed:
result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))
vol_num_list = self.volume_info_dict.keys()
vol_num_list.sort()
def vol_num_to_string(vol_num):
return self.volume_info_dict[vol_num].to_string()
result = "%s%s\n" % (result,
"\n".join(map(vol_num_to_string, vol_num_list)))
return result
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s, return self
"""
def get_field(fieldname):
"""
Return the value of a field by parsing s, or None if no field
"""
m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I)
if not m:
return None
else:
return Unquote(m.group(2))
self.hostname = get_field("hostname")
self.local_dirname = get_field("localdir")
# Get file changed list
filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S)
match = filelist_regexp.search(s)
filecount = 0
if match:
filecount = int(match.group(2))
if filecount > 0:
def parse_fileinfo(line):
fileinfo = line.strip().split()
return (fileinfo[0], ''.join(fileinfo[1:]))
self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n')))
assert filecount == len(self.files_changed)
next_vi_string_regexp = re.compile("(^|\\n)(volume\\s.*?)"
"(\\nvolume\\s|$)", re.I | re.S)
starting_s_index = 0
highest_vol = 0
latest_vol = 0
while 1:
match = next_vi_string_regexp.search(s[starting_s_index:])
if not match:
break
vi = VolumeInfo().from_string(match.group(2))
self.add_volume_info(vi)
highest_vol = max(highest_vol, vi.volume_number)
latest_vol = vi.volume_number
starting_s_index += match.end(2)
# If we restarted after losing some remote volumes, the highest volume
# seen may be higher than the last volume recorded. That is, the
# manifest could contain "vol1, vol2, vol3, vol2." If so, we don't
# want to keep vol3's info.
for i in range(latest_vol + 1, highest_vol + 1):
self.del_volume_info(i)
return self
def get_files_changed(self):
return self.files_changed
def __eq__(self, other):
"""
Two manifests are equal if they contain the same volume infos
"""
vi_list1 = self.volume_info_dict.keys()
vi_list1.sort()
vi_list2 = other.volume_info_dict.keys()
vi_list2.sort()
if vi_list1 != vi_list2:
log.Notice(_("Manifests not equal because different volume numbers"))
return False
for i in range(len(vi_list1)):
if not vi_list1[i] == vi_list2[i]:
log.Notice(_("Manifests not equal because volume lists differ"))
return False
if (self.hostname != other.hostname or
self.local_dirname != other.local_dirname):
log.Notice(_("Manifests not equal because hosts or directories differ"))
return False
return True
def __ne__(self, other):
"""
Defines !=. Not doing this always leads to annoying bugs...
"""
return not self.__eq__(other)
def write_to_path(self, path):
"""
Write string version of manifest to given path
"""
assert not path.exists()
fout = path.open("wb")
fout.write(self.to_string())
assert not fout.close()
path.setdata()
def get_containing_volumes(self, index_prefix):
"""
Return list of volume numbers that may contain index_prefix
"""
return filter(lambda vol_num:
self.volume_info_dict[vol_num].contains(index_prefix),
self.volume_info_dict.keys())
class VolumeInfoError(Exception):
"""
Raised when there is a problem initializing a VolumeInfo from string
"""
pass
class VolumeInfo:
"""
Information about a single volume
"""
def __init__(self):
"""VolumeInfo initializer"""
self.volume_number = None
self.start_index = None
self.start_block = None
self.end_index = None
self.end_block = None
self.hashes = {}
def set_info(self, vol_number,
start_index, start_block,
end_index, end_block):
"""
Set essential VolumeInfo information, return self
Call with starting and ending paths stored in the volume. If
a multivol diff gets split between volumes, count it as being
part of both volumes.
"""
self.volume_number = vol_number
self.start_index = start_index
self.start_block = start_block
self.end_index = end_index
self.end_block = end_block
return self
def set_hash(self, hash_name, data):
"""
Set the value of hash hash_name (e.g. "MD5") to data
"""
self.hashes[hash_name] = data
def get_best_hash(self):
"""
Return pair (hash_type, hash_data)
SHA1 is the best hash, and MD5 is the second best hash. None
is returned if no hash is available.
"""
if not self.hashes:
return None
try:
return ("SHA1", self.hashes['SHA1'])
except KeyError:
pass
try:
return ("MD5", self.hashes['MD5'])
except KeyError:
pass
return self.hashes.items()[0]
def to_string(self):
"""
Return nicely formatted string reporting all information
"""
def index_to_string(index):
"""Return printable version of index without any whitespace"""
if index:
s = "/".join(index)
return Quote(s)
else:
return "."
slist = ["Volume %d:" % self.volume_number]
whitespace = " "
slist.append("%sStartingPath %s %s" %
(whitespace, index_to_string(self.start_index), (self.start_block or " ")))
slist.append("%sEndingPath %s %s" %
(whitespace, index_to_string(self.end_index), (self.end_block or " ")))
for key in self.hashes:
slist.append("%sHash %s %s" %
(whitespace, key, self.hashes[key]))
return "\n".join(slist)
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s as created by to_string
"""
def string_to_index(s):
"""
Return tuple index from string
"""
s = Unquote(s)
if s == ".":
return ()
return tuple(s.split("/"))
linelist = s.strip().split("\n")
# Set volume number
m = re.search("^Volume ([0-9]+):", linelist[0], re.I)
if not m:
raise VolumeInfoError("Bad first line '%s'" % (linelist[0],))
self.volume_number = int(m.group(1))
# Set other fields
for line in linelist[1:]:
if not line:
continue
line_split = line.strip().split()
field_name = line_split[0].lower()
other_fields = line_split[1:]
if field_name == "Volume":
log.Warn(_("Warning, found extra Volume identifier"))
break
elif field_name == "startingpath":
self.start_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.start_block = int(other_fields[1])
else:
self.start_block = None
elif field_name == "endingpath":
self.end_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.end_block = int(other_fields[1])
else:
self.end_block = None
elif field_name == "hash":
self.set_hash(other_fields[0], other_fields[1])
if self.start_index is None or self.end_index is None:
raise VolumeInfoError("Start or end index not set")
return self
def __eq__(self, other):
"""
Used in test suite
"""
if not isinstance(other, VolumeInfo):
log.Notice(_("Other is not VolumeInfo"))
return None
if self.volume_number != other.volume_number:
log.Notice(_("Volume numbers don't match"))
return None
if self.start_index != other.start_index:
log.Notice(_("start_indicies don't match"))
return None
if self.end_index != other.end_index:
log.Notice(_("end_index don't match"))
return None
hash_list1 = self.hashes.items()
hash_list1.sort()
hash_list2 = other.hashes.items()
hash_list2.sort()
if hash_list1 != hash_list2:
log.Notice(_("Hashes don't match"))
return None
return 1
def __ne__(self, other):
"""
Defines !=
"""
return not self.__eq__(other)
def contains(self, index_prefix, recursive=1):
"""
Return true if volume might contain index
If recursive is true, then return true if any index starting
with index_prefix could be contained. Otherwise, just check
if index_prefix itself is between starting and ending
indicies.
"""
if recursive:
return (self.start_index[:len(index_prefix)] <=
index_prefix <= self.end_index)
else:
return self.start_index <= index_prefix <= self.end_index
nonnormal_char_re = re.compile("(\\s|[\\\\\"'])")
def Quote(s):
"""
Return quoted version of s safe to put in a manifest or volume info
"""
if not nonnormal_char_re.search(s):
return s # no quoting necessary
slist = []
for char in s:
if nonnormal_char_re.search(char):
slist.append("\\x%02x" % ord(char))
else:
slist.append(char)
return '"%s"' % "".join(slist)
def Unquote(quoted_string):
"""
Return original string from quoted_string produced by above
"""
if not quoted_string[0] == '"' or quoted_string[0] == "'":
return quoted_string
assert quoted_string[0] == quoted_string[-1]
return_list = []
i = 1 # skip initial char
while i < len(quoted_string) - 1:
char = quoted_string[i]
if char == "\\":
# quoted section
assert quoted_string[i + 1] == "x"
return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))
i += 4
else:
return_list.append(char)
i += 1
return "".join(return_list)
| gpl-2.0 | -1,987,972,922,932,362,800 | 32.183794 | 128 | 0.546602 | false | 4.071532 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.