repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
kawashiro/dewyatochka2
src/dewyatochka/plugins/cool_story/parser/_base.py
1
4448
# -*- coding: UTF-8 """ Common parsers logic Classes ======= AbstractParser -- Abstract parser Functions ========= parse_multiline_html -- Join html paragraphs collection into one multi line string Attributes ========== RawPost -- Raw post immutable structure """ import re from collections import namedtuple from functools import reduce from abc import ABCMeta, abstractmethod, abstractproperty from lxml.html import HtmlElement, tostring from pyquery import PyQuery from dewyatochka.core.utils.http import WebClient __all__ = ['AbstractParser', 'RawPost', 'parse_multiline_html'] # Raw post immutable structure (id: int, title: str, text: str, tags: frozenset) RawPost = namedtuple('RawPost', ('id', 'source', 'title', 'text', 'tags')) # Regexp to extract text from raw post html code __post_new_line_regexp = re.compile(r'<br\s*/?>', re.I) __post_sanitize_regexp = re.compile(r'<.*?>') def parse_multiline_html(paragraphs) -> str: """ Join html paragraphs collection into one multi line string :param iterable paragraphs: Paragraphs HTML nodes list :return: """ return '\n'.join( filter( None, map( lambda line: __post_sanitize_regexp.sub(r'', line).strip(), reduce( lambda msg_lines, lines: msg_lines + lines, [__post_new_line_regexp.split(tostring(line, encoding='unicode')) for line in paragraphs] ) ) ) ) class AbstractParser(metaclass=ABCMeta): """ Parser implementation Each parser is an iterable object that yields posts beginning from the last and ending on the first post """ def __init__(self): """ Init parser object, define mandatory attributes """ self.__client = None @abstractmethod def _parse_post(self, html_element: HtmlElement) -> RawPost: """ Parse post html element :param HTMLElement html_element: :return RawPost: """ pass @abstractmethod def _parse_posts_collection(self, html: PyQuery) -> list: """ Get posts HTMLElement[] collection :param PyQuery html: Page PyQuery object :return list: """ pass @abstractmethod def _parse_pages_collection(self, html: PyQuery) -> list: """ Get pages urls for indexation :param PyQuery html: Page PyQuery object :return list: """ pass def parse_page_html(self, html) -> list: """ Parse page's html code and get stories list :param str|PyQuery html: Page html code or PyQuery object :return list: """ html_doc = html if isinstance(html, PyQuery) else PyQuery(html) return [self._parse_post(post) for post in self._parse_posts_collection(html_doc)] def parse_page_url(self, page: str) -> list: """ Get stories from page by page url :param str page: Page url :return list: """ return self.parse_page_html(self._client.get(page)) @property def _web_host(self) -> str: """ Remote server hostname, normally same as hostname :return str: """ return self.name @abstractproperty def name(self) -> str: """ Get unique name :return str: """ pass @property def _client(self) -> WebClient: """ Get web client instance :return WebClient: """ if self.__client is None: # noinspection PyTypeChecker self.__client = WebClient(self._web_host) return self.__client def __iter__(self, start_page='') -> RawPost: """ Yields all the posts found beginning from the page specified :param str start_page: Page url (e.g. "/20131117") or empty to start from beginning :return RawPost: """ posts = [] pages_links = [start_page or '/'] while True: while pages_links: current_page = pages_links.pop(0) html_doc = self._client.get(current_page) posts = self.parse_page_html(html_doc) if posts: pages_links = self._parse_pages_collection(html_doc) break if not posts: raise StopIteration() while posts: yield posts.pop(0)
gpl-3.0
-971,577,892,149,413,400
26.288344
91
0.582284
false
4.260536
false
false
false
Luxoft/SDLP2
SDL_Core/tools/InterfaceGenerator/generator/parsers/RPCBase.py
1
26596
"""RPC XML base parser. Contains base parser for SDLRPC v1/v2 and JSON RPC XML format. """ import collections import xml.etree.ElementTree from generator import Model class ParseError(Exception): """Parse error. This exception is raised when XML contains errors and can't be parsed. """ pass class Parser(object): """RPC XML Parser base. This class must not be used directly. One of its subclasses must be used instead. """ def __init__(self): """Constructor.""" self._types = {} self._enums = collections.OrderedDict() self._structs = collections.OrderedDict() self._functions = collections.OrderedDict() self._params = {} def parse(self, filename): """Parse XML. Returns an instance of generator.Model.Interface containing parsed interface or raises ParseError if input XML contains errors and can't be parsed. Keyword arguments: filename -- name of input XML file. """ tree = xml.etree.ElementTree.parse(filename) root = tree.getroot() self._enums = self._initialize_enums() self._structs = collections.OrderedDict() self._functions = collections.OrderedDict() self._params = {} self._types = dict(self._enums.items()) self._parse_root(root) return Model.Interface(enums=self._enums, structs=self._structs, functions=self._functions, params=self._params) def _initialize_enums(self): """Initialize enums. The default implementation returns an OrderedDict with two empty enums: "FunctionID" and "messageType". Required for formats where these enums must be generated automatically according to the declared in the XML functions. These enums are filled during the parsing of the functions. """ return collections.OrderedDict( [("FunctionID", Model.Enum(name="FunctionID")), ("messageType", Model.Enum(name="messageType"))]) def _check_enum_name(self, enum): """Check enum name. This method is called to check whether the newly parsed enum's name conflicts with some predefined enum. This implementation raises an error if enum name is one of the predefined enums "FunctionID" or "messageType" which must not be declared explicitly in the XML. """ if enum.name in ["FunctionID", "messageType"]: raise ParseError( "Enum '" + enum.name + "' is generated automatically in SDLRPCV1 and" " must not be declared in xml file") def _check_function_param_name(self, function_param_name): """Check function param name. This method is called to check whether the newly parsed function parameter name conflicts with some predefined name. This implementation doesn't check anything because there is no predefined names in base RPC XML. """ pass def _parse_root(self, root): """Parse root XML element. Default implementation parses root as interface element without a prefix. Keyword arguments: root -- root element. """ self._parse_interface(root, "") def _parse_interface(self, interface, prefix): """Parse interface element. Keyword arguments: interface -- interface element. prefix -- string prefix for all types of the interface. """ if interface.tag != "interface": raise ParseError("Invalid interface tag: " + interface.tag) params, subelements, attrib = self._parse_base_item(interface, "") for param in ["description", "design_description", "todos"]: if 0 != len(params[param]): attrib[param] = "\n".join(params[param]) if 0 != len(params["issues"]): attrib["issues"] = "\n".join(i.value for i in params["issues"]) self._params = dict( self._params.items() + [(prefix + p[0], p[1]) for p in attrib.items()]) for element in subelements: if element.tag == "enum": enum = self._parse_enum(element, prefix) self._check_enum_name(enum) self._add_item(self._enums, enum) self._add_type(enum) elif element.tag == "struct": struct = self._parse_struct(element, prefix) self._add_item(self._structs, struct) self._add_type(struct) elif element.tag == "function": function = self._parse_function(element, prefix) self._add_item(self._functions, function, (function.function_id, function.message_type)) else: raise ParseError("Unexpected element: " + element.tag) @staticmethod def _add_item(items, item, key=None): """Add new item in the items dictionary with given key. Performs additional check for presence in the dictionary and throws ParseError exception if key already exist. """ if key is None: key = item.name if key in items: raise ParseError(type(item).__name__ + " '" + str(key) + "' is declared more than once") items[key] = item def _add_type(self, _type): """Add new type in the internal types dictionary. Performs additional check for presence type with same name in the dictionary and throws ParseError exception if key already exist. """ if _type.name in self._types: raise ParseError("Type '" + _type.name + "' is declared as both struct and enum") self._types[_type.name] = _type def _parse_enum(self, element, prefix): """Parse element as enumeration. Returns an instance of generator.Model.Enum """ params, subelements, attributes = \ self._parse_base_item(element, prefix) internal_scope = None scope = None for attribute in attributes: if attribute == "internal_scope": internal_scope = attributes[attribute] elif attribute == "scope": scope = attributes[attribute] else: raise ParseError("Unexpected attribute '" + attribute + "' in enum '" + params["name"] + "'") params["internal_scope"] = internal_scope params["scope"] = scope elements = collections.OrderedDict() for subelement in subelements: if subelement.tag == "element": self._add_item(elements, self._parse_enum_element(subelement)) else: raise ParseError("Unexpected element '" + subelement.tag + "' in enum '" + params["name"] + "'") params["elements"] = elements # Magic usage is correct # pylint: disable=W0142 return Model.Enum(**params) def _parse_struct(self, element, prefix): """Parse element as structure. Returns an instance of generator.Model.Struct """ params, subelements, attrib = self._parse_base_item(element, prefix) scope = None for attribute in attrib: if attribute == "scope": scope = attrib[attribute] else: raise ParseError("Unexpected attribute '" + attribute + "' in struct '" + params["name"] + "'") params["scope"] = scope members = collections.OrderedDict() for subelement in subelements: if subelement.tag == "param": self._add_item(members, self._parse_param(subelement, prefix)) else: raise ParseError("Unexpected subelement '" + subelement.name + "' in struct '" + params["name"] + "'") params["members"] = members # Magic usage is correct # pylint: disable=W0142 return Model.Struct(**params) def _parse_function(self, element, prefix): """Parse element as function. Returns an instance of generator.Model.Function """ params, subelements, attributes = \ self._parse_base_item(element, prefix) function_id, message_type = self._parse_function_id_type( params["name"], attributes) scope = None for attribute in attributes: if attribute == "scope": scope = attributes[attribute] else: raise ParseError("Unexpected attribute '" + attribute + "' in function '" + params["name"] + "'") params["function_id"] = function_id params["message_type"] = message_type params["scope"] = scope function_params = collections.OrderedDict() for subelement in subelements: if subelement.tag == "param": function_param = self._parse_function_param(subelement, prefix) self._check_function_param_name(function_param.name) if function_param.name in function_params: raise ParseError("Parameter '" + function_param.name + "' is specified more than once" + " for function '" + params["name"] + "'") function_params[function_param.name] = function_param else: raise ParseError("Unexpected subelement '" + subelement.tag + "' in function '" + params["name"] + "'") params["params"] = function_params # Magic usage is correct # pylint: disable=W0142 return Model.Function(**params) def _parse_function_id_type(self, function_name, attrib): """Parse function id and message type according to XML format. This implementation takes function name as function id and extracts attribute "messagetype" as message type and searches them in enums "FunctionID" and "messageType" adding the missing elements if necessary. Returns function id and message type as an instances of EnumElement. """ if "messagetype" not in attrib: raise ParseError("No messagetype specified for function '" + function_name + "'") function_id = self._provide_enum_element_for_function( "FunctionID", function_name) message_type = self._provide_enum_element_for_function( "messageType", self._extract_attrib(attrib, "messagetype")) return function_id, message_type def _provide_enum_element_for_function(self, enum_name, element_name): """Provide enum element for functions. Search an element in an enum and add it if it is missing. Returns EnumElement. """ if enum_name not in self._types: raise ParseError("Enum '" + enum_name + "' is not initialized") enum = self._types[enum_name] if not isinstance(enum, Model.Enum): raise ParseError("'" + enum_name + "' is not an enum") if element_name not in enum.elements: enum.elements[element_name] = Model.EnumElement(name=element_name) return enum.elements[element_name] def _parse_base_item(self, element, prefix): """Parse element as base item. Returns an params, sub-elements and attributes of the element """ params = {} description = [] design_description = [] issues = [] todos = [] subelements = [] if "name" not in element.attrib: raise ParseError("Name is not specified for " + element.tag) params["name"] = prefix + element.attrib["name"] attrib = dict(element.attrib.items()) del attrib["name"] params["platform"] = self._extract_attrib(attrib, "platform") for subelement in element: if subelement.tag == "description": description.append(self._parse_simple_element(subelement)) elif subelement.tag == "designdescription": design_description.append( self._parse_simple_element(subelement)) elif subelement.tag == "todo": todos.append(self._parse_simple_element(subelement)) elif subelement.tag == "issue": issues.append(self._parse_issue(subelement)) else: subelements.append(subelement) params["description"] = description params["design_description"] = design_description params["issues"] = issues params["todos"] = todos return params, subelements, attrib @staticmethod def _parse_simple_element(element): """Parse element as simple element and returns it's text. Element is simple when it contains no subelements and attributes. Returns element text if present or empty string if not """ if len(element) != 0: raise ParseError("Unexpected subelements in '" + element.tag + "'") if len(element.attrib) != 0: raise ParseError("Unexpected attributes in '" + element.tag + "'") return element.text if element.text is not None else "" @staticmethod def _parse_issue(element): """Parse element as issue. Issue must not contain subelements and attributes. Returns an instance of generator.Model.Issue """ if len(element) != 0: raise ParseError("Unexpected subelements in issue") if "creator" not in element.attrib: raise ParseError("No creator in issue") if len(element.attrib) != 1: raise ParseError("Unexpected attributes in issue") return Model.Issue( creator=element.attrib["creator"], value=element.text if element.text is not None else "") def _parse_enum_element(self, element): """Parse element as element of enumeration. Returns an instance of generator.Model.EnumElement """ params, subelements, attributes = self._parse_base_item(element, "") if len(subelements) != 0: raise ParseError("Unexpected subelements in enum element") self._ignore_attribute(attributes, "hexvalue") internal_name = None value = None for attribute in attributes: if attribute == "internal_name": internal_name = attributes[attribute] elif attribute == "value": try: value = int(attributes[attribute]) except: raise ParseError("Invalid value for enum element: '" + attributes[attribute] + "'") else: raise ParseError("Unexpected attribute '" + attribute + "' in enum element") params["internal_name"] = internal_name params["value"] = value # Magic usage is correct # pylint: disable=W0142 return Model.EnumElement(**params) def _parse_param(self, element, prefix): """Parse element as structure parameter. Returns an instance of generator.Model.Param """ params, subelements, attrib = \ self._parse_param_base_item(element, prefix) if len(attrib) != 0: raise ParseError("""Unknown attribute(s) {0} in param {1} """.format(attrib, params["name"])) if len(subelements) != 0: raise ParseError("Unknown subelements in param '" + params["name"] + "'") # Magic usage is correct # pylint: disable=W0142 return Model.Param(**params) def _parse_function_param(self, element, prefix): """Parse element as function parameter. Returns an instance of generator.Model.FunctionParam """ params, subelements, attrib = \ self._parse_param_base_item(element, prefix) default_value = None default_value_string = self._extract_attrib(attrib, "defvalue") if default_value_string is not None: param_type = params["param_type"] if type(param_type) is Model.Boolean: default_value = \ self._get_bool_from_string(default_value_string) elif type(param_type) is Model.Integer: try: default_value = int(default_value_string) except: raise ParseError("Invalid value for integer: '" + default_value_string + "'") elif type(param_type) is Model.Double: try: default_value = float(default_value_string) except: raise ParseError("Invalid value for float: '" + default_value_string + "'") elif type(param_type) is Model.String: default_value = default_value_string elif type(param_type) is Model.Enum or \ type(param_type) is Model.EnumSubset: if type(param_type) is Model.EnumSubset: allowed_elements = param_type.allowed_elements else: allowed_elements = param_type.elements if default_value_string not in allowed_elements: raise ParseError("Default value '" + default_value_string + "' for parameter '" + params["name"] + "' is not a member of " + type(param_type).__name__ + "'" + params["name"] + "'") default_value = allowed_elements[default_value_string] else: raise ParseError("Default value specified for " + type(param_type).__name__) params["default_value"] = default_value if len(attrib) != 0: raise ParseError("Unexpected attributes in parameter '" + params["name"] + "'") if len(subelements) != 0: raise ParseError("Unexpected subelements in parameter '" + params["name"] + "'") # Magic usage is correct # pylint: disable=W0142 return Model.FunctionParam(**params) def _parse_param_base_item(self, element, prefix): """Parse base param items. Returns params, other subelements and attributes. """ params, subelements, attrib = self._parse_base_item(element, "") params["is_mandatory"] = self._extract_optional_bool_attrib( attrib, "mandatory", True) scope = self._extract_attrib(attrib, "scope") if scope is not None: params["scope"] = scope self._ignore_attribute(attrib, "defvalue") param_type = None type_name = self._extract_attrib(attrib, "type") if type_name is None: raise ParseError("Type is not specified for parameter '" + params["name"] + "'") if type_name == "Boolean": param_type = Model.Boolean() elif type_name == "Integer" or \ type_name == "Float": min_value = self._extract_optional_number_attrib( attrib, "minvalue", int if type_name == "Integer" else float) max_value = self._extract_optional_number_attrib( attrib, "maxvalue", int if type_name == "Integer" else float) param_type = \ (Model.Integer if type_name == "Integer" else Model.Double)( min_value=min_value, max_value=max_value) elif type_name == "String": min_length = self._extract_optional_number_attrib( attrib, "minlength") # if minlength is not defined default value is 1 if min_length is None: min_length = 1 max_length = self._extract_optional_number_attrib( attrib, "maxlength") param_type = Model.String(min_length=min_length, max_length=max_length) else: if 1 == type_name.count("."): custom_type_name = type_name.replace(".", "_") else: custom_type_name = prefix + type_name if custom_type_name in self._types: param_type = self._types[custom_type_name] else: raise ParseError("Unknown type '" + type_name + "'") if self._extract_optional_bool_attrib(attrib, "array", False): min_size = self._extract_optional_number_attrib(attrib, "minsize") max_size = self._extract_optional_number_attrib(attrib, "maxsize") param_type = Model.Array(element_type=param_type, min_size=min_size, max_size=max_size) base_type = \ param_type.element_type if isinstance(param_type, Model.Array) \ else param_type other_subelements = [] for subelement in subelements: if subelement.tag == "element": if type(base_type) is not Model.Enum and \ type(base_type) is not Model.EnumSubset: raise ParseError("Elements specified for parameter '" + params["name"] + "' of type " + type(base_type).__name__) if type(base_type) is Model.Enum: base_type = Model.EnumSubset( name=params["name"], enum=base_type, description=params["description"], design_description=params["design_description"], issues=params["issues"], todos=params["todos"], allowed_elements={}) if "name" not in subelement.attrib: raise ParseError( "Element name is not specified for parameter '" + params["name"] + "'") element_name = subelement.attrib["name"] if len(subelement.attrib) != 1: raise ParseError("Unexpected attributes for element '" + element_name + "' of parameter '" + params["name"]) if len(subelement.getchildren()) != 0: raise ParseError("Unexpected subelements for element '" + element_name + "' of parameter '" + params["name"]) if element_name in base_type.allowed_elements: raise ParseError("Element '" + element_name + "' is specified more than once for" + " parameter '" + params["name"] + "'") if element_name not in base_type.enum.elements: raise ParseError("Element '" + element_name + "' is not a member of enum '" + base_type.enum.name + "'") base_type.allowed_elements[element_name] = \ base_type.enum.elements[element_name] else: other_subelements.append(subelement) if isinstance(param_type, Model.Array): param_type.element_type = base_type else: param_type = base_type params["param_type"] = param_type return params, other_subelements, attrib def _extract_optional_bool_attrib(self, attrib, name, default): """Extract boolean attribute with given name. Returns value of the attribute. """ value = self._extract_attrib(attrib, name) if value is None: value = default else: value = self._get_bool_from_string(value) return value def _extract_optional_number_attrib(self, attrib, name, _type=int): """Extract number attribute with given name. Returns value of the attribute. """ value = self._extract_attrib(attrib, name) if value is not None: try: value = _type(value) except: raise ParseError("Invlaid value for " + _type.__name__ + ": '" + value + "'") return value @staticmethod def _extract_attrib(attrib, name): """Extract attribute with given name. Returns value of the attribute. """ value = None if name in attrib: value = attrib[name] del attrib[name] return value @staticmethod def _get_bool_from_string(bool_string): """Convert string representation of boolean to real bool value. Returns converted value. """ value = None if bool_string in ['0', 'false']: value = False elif bool_string in ['1', 'true']: value = True else: raise ParseError("Invalid value for bool: '" + bool_string + "'") return value def _ignore_attribute(self, attrib, name): """To be called when attribute is meaningless in terms of code generation but it's presence is not issue. Removes this attribute from attribute list. """ if name in attrib: del attrib[name] print ("Ignoring attribute '" + name + "'") return True
lgpl-2.1
-8,461,146,256,486,704,000
34.795424
83
0.537487
false
4.926097
false
false
false
valeros/platformio
platformio/builder/scripts/frameworks/energia.py
1
2038
# Copyright 2014-2016 Ivan Kravets <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Energia Energia Wiring-based framework enables pretty much anyone to start easily creating microcontroller-based projects and applications. Its easy-to-use libraries and functions provide developers of all experience levels to start blinking LEDs, buzzing buzzers and sensing sensors more quickly than ever before. http://energia.nu/reference/ """ from os.path import join from SCons.Script import DefaultEnvironment env = DefaultEnvironment() env.Replace( PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-energia${PLATFORM[2:]}") ) ENERGIA_VERSION = int( open(join(env.subst("$PLATFORMFW_DIR"), "version.txt")).read().replace(".", "").strip()) # include board variant env.VariantDirWrap( join("$BUILD_DIR", "FrameworkEnergiaVariant"), join("$PLATFORMFW_DIR", "variants", "${BOARD_OPTIONS['build']['variant']}") ) env.Append( CPPDEFINES=[ "ARDUINO=101", "ENERGIA=%d" % ENERGIA_VERSION ], CPPPATH=[ join("$BUILD_DIR", "FrameworkEnergia"), join("$BUILD_DIR", "FrameworkEnergiaVariant") ] ) if env.get("BOARD_OPTIONS", {}).get("build", {}).get("core") == "lm4f": env.Append( LINKFLAGS=["-Wl,--entry=ResetISR"] ) # # Target: Build Core Library # libs = [] libs.append(env.BuildLibrary( join("$BUILD_DIR", "FrameworkEnergia"), join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}") )) env.Append(LIBS=libs)
apache-2.0
4,873,976,666,905,393,000
26.540541
79
0.694799
false
3.483761
false
false
false
mdaif/olympia
apps/compat/views.py
1
7432
import json import re from django import http from django.db.models import Count from django.shortcuts import redirect, render from django.views.decorators.csrf import csrf_exempt from tower import ugettext as _ import amo import amo.utils from addons.decorators import owner_or_unlisted_reviewer from amo.decorators import post_required from amo.utils import urlparams from amo.urlresolvers import reverse from addons.models import Addon from search.utils import floor_version from versions.compare import version_dict as vdict, version_int as vint from .models import CompatReport, AppCompat, CompatTotals from .forms import AppVerForm, CompatForm def index(request, version=None): template = 'compat/index.html' COMPAT = [v for v in amo.COMPAT if v['app'] == request.APP.id] compat_dict = dict((v['main'], v) for v in COMPAT) if not COMPAT: return render(request, template, {'results': False}) if version not in compat_dict: return http.HttpResponseRedirect(reverse('compat.index', args=[COMPAT[0]['main']])) qs = AppCompat.search() binary = None initial = {'appver': '%s-%s' % (request.APP.id, version), 'type': 'all'} initial.update(request.GET.items()) form = CompatForm(initial) if request.GET and form.is_valid(): if form.cleaned_data['appver']: app, ver = form.cleaned_data['appver'].split('-') if int(app) != request.APP.id or ver != version: new = reverse('compat.index', args=[ver], add_prefix=False) url = '/%s%s' % (amo.APP_IDS[int(app)].short, new) type_ = form.cleaned_data['type'] or None return http.HttpResponseRedirect(urlparams(url, type=type_)) if form.cleaned_data['type'] != 'all': binary = form.cleaned_data['type'] == 'binary' compat, app = compat_dict[version], str(request.APP.id) compat_queries = ( ('prev', qs.query(**{ 'top_95.%s.%s' % (app, vint(compat['previous'])): True, 'support.%s.max__gte' % app: vint(compat['previous'])})), ('top_95', qs.query(**{'top_95_all.%s' % app: True})), ('all', qs), ) compat_levels = [(key, version_compat(queryset, compat, app, binary)) for key, queryset in compat_queries] usage_addons, usage_total = usage_stats(request, compat, app, binary) return render(request, template, {'version': version, 'usage_addons': usage_addons, 'usage_total': usage_total, 'compat_levels': compat_levels, 'form': form, 'results': True, 'show_previous': request.GET.get('previous')}) def version_compat(qs, compat, app, binary): facets = [] for v, prev in zip(compat['versions'], (None,) + compat['versions']): d = {'from': vint(v)} if prev: d['to'] = vint(prev) facets.append(d) # Pick up everything else for an Other count. facets.append({'to': vint(compat['versions'][-1])}) facet = {'range': {'support.%s.max' % app: facets}} if binary is not None: qs = qs.query(binary=binary) qs = qs.facet(by_status=facet) result = qs[:0].raw() total_addons = result['hits']['total'] ranges = result['facets']['by_status']['ranges'] titles = compat['versions'] + (_('Other'),) faceted = [(v, r['count']) for v, r in zip(titles, ranges)] return total_addons, faceted def usage_stats(request, compat, app, binary=None): # Get the list of add-ons for usage stats. qs = AppCompat.search().order_by('-usage.%s' % app).values_dict() if request.GET.get('previous'): qs = qs.filter(**{ 'support.%s.max__gte' % app: vint(compat['previous'])}) else: qs = qs.filter(**{'support.%s.max__gte' % app: 0}) if binary is not None: qs = qs.filter(binary=binary) addons = amo.utils.paginate(request, qs) for obj in addons.object_list: obj['usage'] = obj['usage'][app] obj['max_version'] = obj['max_version'][app] return addons, CompatTotals.objects.get(app=app).total @csrf_exempt @post_required def incoming(request): # Turn camelCase into snake_case. def snake_case(s): return re.sub('[A-Z]+', '_\g<0>', s).lower() try: data = [(snake_case(k), v) for k, v in json.loads(request.body).items()] except Exception: return http.HttpResponseBadRequest() # Build up a new report. report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', '')) fields = CompatReport._meta.get_all_field_names() for key, value in data: if key in fields: setattr(report, key, value) else: return http.HttpResponseBadRequest() report.save() return http.HttpResponse(status=204) def reporter(request): query = request.GET.get('guid') if query: qs = None if query.isdigit(): qs = Addon.with_unlisted.filter(id=query) if not qs: qs = Addon.with_unlisted.filter(slug=query) if not qs: qs = Addon.with_unlisted.filter(guid=query) if not qs and len(query) > 4: qs = CompatReport.objects.filter(guid__startswith=query) if qs: guid = qs[0].guid addon = Addon.with_unlisted.get(guid=guid) if addon.is_listed or owner_or_unlisted_reviewer(request, addon): return redirect('compat.reporter_detail', guid) addons = (Addon.with_unlisted.filter(authors=request.user) if request.user.is_authenticated() else []) return render(request, 'compat/reporter.html', dict(query=query, addons=addons)) def reporter_detail(request, guid): try: addon = Addon.with_unlisted.get(guid=guid) except Addon.DoesNotExist: addon = None name = addon.name if addon else guid qs = CompatReport.objects.filter(guid=guid) if (addon and not addon.is_listed and not owner_or_unlisted_reviewer(request, addon)): # Not authorized? Let's pretend this addon simply doesn't exist. name = guid qs = CompatReport.objects.none() form = AppVerForm(request.GET) if request.GET and form.is_valid() and form.cleaned_data['appver']: # Apply filters only if we have a good app/version combination. app, ver = form.cleaned_data['appver'].split('-') app = amo.APP_IDS[int(app)] ver = vdict(floor_version(ver))['major'] # 3.6 => 3 # Ideally we'd have a `version_int` column to do strict version # comparing, but that's overkill for basic version filtering here. qs = qs.filter(app_guid=app.guid, app_version__startswith=str(ver) + '.') works_ = dict(qs.values_list('works_properly').annotate(Count('id'))) works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)} works_properly = request.GET.get('works_properly') if works_properly: qs = qs.filter(works_properly=works_properly) reports = amo.utils.paginate(request, qs.order_by('-created'), 100) return render(request, 'compat/reporter_detail.html', dict(reports=reports, works=works, works_properly=works_properly, name=name, guid=guid, form=form))
bsd-3-clause
887,170,371,341,290,900
37.507772
78
0.606432
false
3.659281
false
false
false
ajhager/copycat
copycat/workspace/bond.py
1
13667
# Copyright (c) 2007-2017 Joseph Hager. # # Copycat is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License, # as published by the Free Software Foundation. # # Copycat is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Copycat; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Bond""" import math import copycat.toolbox as toolbox from copycat.workspace import Structure, Mapping class Bond(Structure): """Bond Attributes: bond_category: direction_category: from_object: to_object: bond_facet: Which facet is being related. from_object_descriptor: to_object_descriptor:""" def __init__(self, workspace, from_object, to_object, bond_category, bond_facet, from_object_descriptor, to_object_descriptor): """Initialize Bond.""" super(Bond, self).__init__() self.workspace = workspace self.slipnet = self.workspace.slipnet if from_object.left_string_position < to_object.left_string_position: self.direction_category = self.slipnet.plato_right self.left_object = from_object self.right_object = to_object else: self.direction_category = self.slipnet.plato_left self.left_object = to_object self.right_object = from_object if bond_category == self.slipnet.plato_sameness: self.direction_category = None self.left_string_position = min(from_object.left_string_position, to_object.left_string_position) self.right_string_position = max(from_object.right_string_position, to_object.right_string_position) self.proposal_level = None self.string = from_object.string self.structure_category = Bond self.bond_category = bond_category self.bond_facet = bond_facet self.from_object = from_object self.to_object = to_object self.from_object_descriptor = from_object_descriptor self.to_object_descriptor = to_object_descriptor def __eq__(self, other): """Return True if this and the given bond represent the same bond.""" if other is None or not isinstance(other, Bond): return False return all([self.from_object == other.from_object, self.to_object == other.to_object, self.bond_category == other.bond_category, self.direction_category == other.direction_category]) def __hash__(self): return hash((self.from_object, self.to_object, self.bond_category, self.direction_category)) def calculate_external_strength(self): """Return the bond's external strength.""" return self.local_support() def calculate_internal_strength(self): """Bonds between objects of the same type are stronger than bonds between different types. Letter category bonds are stronger than other types of bonds. A more general mechanism is needed.""" if type(self.from_object) is type(self.to_object): member_compatibility_factor = 1.0 else: member_compatibility_factor = .7 if self.bond_facet == self.slipnet.plato_letter_category: bond_facet_factor = 1.0 else: bond_facet_factor = .7 degree_of_association = self.bond_category.bond_degree_of_association() return min(100, round(member_compatibility_factor * \ bond_facet_factor * degree_of_association)) def choose_left_neighbor(self): """Return one of the left neighbors of the bond chosen by salience.""" if self.is_leftmost_in_string(): return None left_neighbors = [] for left_neighbor_object in self.left_object.all_left_neighbors(): left1 = left_neighbor_object.string_number left2 = self.left_object.string_number possible_left_neighbor = self.string.left_right_bonds.get((left1, left2)) if possible_left_neighbor != None: left_neighbors.append(possible_left_neighbor) saliences = [neighbor.salience() for neighbor in left_neighbors] return toolbox.weighted_select(saliences, left_neighbors) def choose_right_neighbor(self): """Return one of the right neighbors of the bond chosen by salience.""" if self.is_rightmost_in_string(): return None right_neighbors = [] for right_neighbor_object in self.right_object.all_right_neighbors(): right1 = self.right_object.string_number right2 = right_neighbor_object.string_number possible_right_neighbor = self.string.left_right_bonds.get((right1, right2)) if possible_right_neighbor != None: right_neighbors.append(possible_right_neighbor) saliences = [neighbor.salience() for neighbor in right_neighbors] return toolbox.weighted_select(saliences, right_neighbors) def happiness(self): """Return the happiness of the bond.""" if self.group != None: return self.group.total_strength return 0 def unhappiness(self): """Return the unhappiness of the bond.""" return 100 - self.happiness() def salience(self): """Return the salience of the bond.""" return round(toolbox.average(self.importance(), self.unhappiness())) def has_members(self, object1, object2): ''' Return True of the two objects are the objects in this bond. ''' objects = [self.from_object, self.to_object] return object1 in objects and object2 in objects def importance(self): """Sameness bonds are more important than other bonds of other categories.""" if self.bond_category == self.slipnet.plato_sameness: return 100 return 50 def incompatible_bonds(self): """Return the bonds that are incompatible with the bond.""" return list(set([self.left_object.right_bond, self.right_object.left_bond]) - set([None])) def incompatible_correspondences(self): """Return the correspondences that are incompatible with this bond. This only applies to directed bonds and to correspondences between objects at the edges of strings. E.g., in "abc -> abd, pqrs -> ?, if there is a correspondence between the "a" and the "p" (with concept mapping "leftmost -> leftmost"), and a right going succesor bond from the "a" to the "b" in "abc", then the correspondence will be incompatible with a left going predecessor bond from the "q" to the "p" in "pqrs", because the correspondence would then imply both "leftmost -> leftmost" (the letters) and "right -> left (the bonds.)""" incompatible_correspondences = [] if self.is_leftmost_in_string(): correspondence = self.left_object.correspondence if not correspondence: return [] other_object = correspondence.other_object(self.left_object) elif self.is_rightmost_in_string(): correspondence = self.right_object.correspondence if not correspondence: return [] other_object = correspondence.other_object(self.right_object) else: return [] plato_string_position_category = self.slipnet.plato_string_position_category string_position_category_mapping = None for mapping in correspondence.get_concept_mappings(): if mapping.description_type1 == plato_string_position_category: string_position_category_mapping = mapping if string_position_category_mapping is None: return [] if other_object.is_leftmost_in_string(): other_bond = other_object.right_bond elif other_object.is_rightmost_in_string(): other_bond = other_object.left_bond else: return [] if not other_bond: return [] if other_bond.direction_category is None: return [] mapping = Mapping(self.workspace, self.slipnet.plato_direction_category, self.slipnet.plato_direction_category, self.direction_category, other_bond.direction_category, None, None) if mapping.is_incompatible_concept_mapping(string_position_category_mapping): incompatible_correspondences.append(correspondence) return incompatible_correspondences def is_in_group(self, group): """Return True if the bond is in the given group.""" objects = group.objects return self.from_object in objects and self.to_object in objects def is_proposed(self): """Return True if proposal level is less than the level for built structures.""" return self.proposal_level < self.workspace.built def is_leftmost_in_string(self): """Return True if the bond is on the left edge of the string.""" return self.left_string_position == 0 def is_rightmost_in_string(self): """Return True if the bond is on the right edge of the string.""" return self.right_string_position == self.string.length - 1 def flipped_version(self): """Return the flipped version of this bond. For example, if the bond is a successor bond going to the right, returns a predecessor bond going to the left using the same two objects.""" category = self.slipnet.get_related_node(self.bond_category, self.slipnet.plato_opposite) flipped_bond = Bond(self.workspace, self.to_object, self.from_object, category, self.bond_facet, self.to_object_descriptor, self.from_object_descriptor) flipped_bond.proposal_level = self.proposal_level return flipped_bond def letter_span(self): """Return the number of letters spanned by the bond. This is 2 if the objects are not groups; otherwise it is the sum of the lengths of the groups.""" return self.from_object.letter_span() + self.to_object.letter_span() def local_density(self): """Return a rough measure of the density in the string of bonds of the same bond category and direction category as the given bond. This method is used in calculating the external strength of a bond.""" def calc(direction): """Inner calculation.""" slot_sum = 0 support_sum = 0 method_name = 'choose_%s_neighbor' % direction last_object = {'left': self.left_object, 'right': self.right_object}[direction] next_object = getattr(last_object, method_name)() while next_object: slot_sum += 1 first = next_object.string_number last = last_object.string_number bond = self.string.left_right_bonds.get((first, last)) if bond: if bond.bond_category == self.bond_category and \ bond.direction_category == self.direction_category: support_sum += 1 last_object = next_object next_object = getattr(next_object, method_name)() return slot_sum, support_sum slot_sum, support_sum = map(sum, zip(calc('left'), calc('right'))) if slot_sum == 0: return 100 return round(100 * (support_sum / float(slot_sum))) def local_support(self): """Return this bond's local support in the string.""" number = self.number_of_local_supporting_bonds() if number == 0: return 0 density = self.local_density() adjusted_density = 100 * (math.sqrt(density / 100.0)) number_factor = min(1, .6 ** (1.0 / number ** 3)) return round(adjusted_density * number_factor) def number_of_local_supporting_bonds(self): """Return the number of supporting bonds in the given bond's string. Looks at all the other bonds in the string, counting bonds of the same bond category and direction category. Does not take distance into account; all qualifying bonds in the string are counted the same.""" number_of_supporting_bonds = 0 letter_distance = self.workspace.letter_distance bonds = self.string.get_bonds() if self in bonds: bonds.remove(self) for bond in self.string.get_bonds(): if all([letter_distance(self.left_object, bond.left_object) != 0, letter_distance(self.right_object, bond.right_object) != 0, bond.bond_category == self.bond_category, bond.direction_category == self.direction_category]): number_of_supporting_bonds += 1 return number_of_supporting_bonds
gpl-2.0
-5,235,821,656,455,039,000
42.113565
88
0.615497
false
4.257632
false
false
false
bopen/mariobros
mariobros/mario.py
1
11792
# -*- coding: utf-8 -*- # python 2 support via python-future from __future__ import absolute_import, division, print_function, unicode_literals from builtins import bytes, dict, int, str import atexit import collections import distutils.spawn import importlib import logging import re import shlex import subprocess import sys import uuid import future.utils import luigi from luigi.contrib.s3 import S3Target import mako.template LOGGER = logging.getLogger('luigi-interface') TEMPLATE = """% for var_def, val_def in default_namespace.items(): % if var_def not in ['action_template', 'sources_repls', 'target_pattern']: ${var_def} = ${val_def} %endif % endfor % if default_namespace['target_pattern']: ${default_namespace['target_pattern']}: ${default_namespace['sources_repls']} ${default_namespace['action_template']} % endif % for task_name, section_namespace in section_namespaces.items(): % if task_name != 'DEFAULT': [${task_name}] %for var, val in section_namespaces[task_name].items(): % if var not in ['action_template', 'sources_repls', 'target_pattern']: ${var} = ${val} % endif % endfor ${section_namespace['target_pattern']}: ${section_namespace['sources_repls']} ${section_namespace['action_template']} % endif % endfor """ def pretty_unicode(obj): """Filter to pretty print iterables.""" if not isinstance(obj, (str, bytes)): try: return ' '.join(str(item) for item in obj) except TypeError: pass return str(obj) class ExistingFile(luigi.ExternalTask): """Define Luigi External Task class for existing files requires.""" target = luigi.Parameter() def output(self): return luigi.LocalTarget(self.target) class ReRuleTask(luigi.Task): """Define Luigi task class through regular expression. """ # target_pattern = '' # sources_repls = [] # action_namespace = {} # action_template = '' # SHALL = '/bin/bash' @staticmethod def factory( name, target_pattern, sources_repls=(), action_template='', action_namespace={}, priority=0, worker_timeout=None, resources={}, disabled=False, dry_run_suffix='', SHELL='/bin/bash'): """Create Luigi task class. :param str name: Task name. :param str target_pattern: Target pattern. :param list sources_repls: List of source replacements. :param str action_template: Action template. :param dict action_namespace: Action namespace. :param int priority: Priority Luigi task metadata. :param int worker_timeout: Worker timeout Luigi task metadata. :param dict resources: Resources Luigi task metadata. :param bool disabled: Disabled Luigi task metadata. :param unicode dry_run_suffix: Suffix to be added to file created during dry run. :rtype: subclass_of_ReRuleTask """ # FIXME: move class init code to init method? if not target_pattern.startswith('(?<![^/])'): target_pattern = '(?<![^/])' + target_pattern if not target_pattern.endswith('$'): target_pattern += '$' if action_template.strip() == '': action_template = 'echo "${SOURCES} -> ${TARGET}"' _target_pattern = re.compile(target_pattern) return type(future.utils.native_str(name), (ReRuleTask,), locals()) @classmethod def match(cls, target): """Perform target matching. :rtype: bool """ return bool(cls._target_pattern.search(target)) target = luigi.Parameter() def render_sources(self): """Perform rendering of the sources. :rtype: str """ return tuple(self._target_pattern.sub(repl, self.target) for repl in self.sources_repls) def render_action(self): """Perform rendering of the action. :rtype: str """ sources = self.render_sources() match = self._target_pattern.match(self.target) target_namespace = dict(TARGET=self.target, SOURCES=sources, MATCH=match) return render_template( self.action_template, target_namespace, default_namespace=self.action_namespace ) def render_shell(self): sources = self.render_sources() match = self._target_pattern.match(self.target) target_namespace = dict(TARGET=self.target, SOURCES=sources, MATCH=match) return render_template( self.SHELL, target_namespace, default_namespace=self.action_namespace ) def output(self): """ The output that this Task produces. See :ref:`Task.output` :rtype: luigi.LocalTarget """ if self.target.startswith('s3://'): return S3Target(self.target) else: return luigi.LocalTarget(self.target + self.dry_run_suffix) def requires(self): """ The Tasks that this Task depends on. See :ref:`Task.requires` :rtype: list """ required = [] for source in self.render_sources(): for task_rule in ReRuleTask.__subclasses__(): if task_rule.match(source): required.append(task_rule(target=source)) break else: required.append(ExistingFile(source)) return required def run(self): """ The task run method, to be overridden in a subclass. See :ref:`Task.run` """ action = self.render_action() if self.dry_run_suffix: # log intended command line but touch the dry_run target instead LOGGER.info(action) action = 'touch ' + self.target + self.dry_run_suffix # register the dry_run target removal at program exit atexit.register(self.remove_dry_run_file) args = ['/bin/bash', '-c', action] else: shell = self.render_shell() args = shlex.split(shell) + ['-c', action] # be sure to use the abspath of the executable based on the PATH environment variable args[0] = distutils.spawn.find_executable(args[0]) LOGGER.info('COMMAND: {}'.format(args)) subprocess.check_call(args) def remove_dry_run_file(self): """Remove files generated by dry run process.""" subprocess.call('rm -f ' + self.target + self.dry_run_suffix, shell=True) def render_template(template, local_namespace, default_namespace={}): """Return the rendered template merging local and default namespaces. :param unicode template: Template. :param dict local_namespace: Local namespace. :param dict default_namespace: Default namespace. :rtype: str """ namespace = default_namespace.copy() namespace.update(local_namespace) if 'IMPORT_MODULES' in namespace: import_modules = namespace['IMPORT_MODULES'].split() namespace.update({name: importlib.import_module(name) for name in import_modules}) template_object = mako.template.Template( template, strict_undefined=True, imports=['from mariobros.mario import pretty_unicode'], # enable the filter default_filters=['pretty_unicode'], ) return template_object.render(**namespace) def render_namespace(namespace, default_namespace={}, skip_names=('action_template', 'SHELL')): """Return Render section namespaces with default section namespaces also. :param dict namespace: Section namespace. :param dict default_namespace: default section namespace. :param list skip_names: Namespace names to skip in the render process. :rtype: dict """ torender_namespace = {k: v for k, v in namespace.items() if k not in skip_names} rendered_namespace = {k: v for k, v in namespace.items() if k in skip_names} while len(torender_namespace): loop = True for key, value_template in list(torender_namespace.items()): try: value = render_template(value_template, rendered_namespace, default_namespace) torender_namespace.pop(key) rendered_namespace[key] = value loop = False except NameError: pass if loop: raise NameError("Can't render: {!r}".format(torender_namespace)) return collections.OrderedDict((k, rendered_namespace[k]) for k in namespace) def register_tasks(namespaces, default_namespace={}, dry_run_suffix=''): """Return a Luigi task class after parsed Luigi task metadata. :param dict namespaces: Task namespaces. :param dict default_namespace: Default namespaces. :param unicode dry_run_suffix: Suffix to be added to file created during dry run. :rtype: iterable """ for task_name, namespace in namespaces.items(): action_namespace = default_namespace.copy() action_namespace.update(namespace) task_keys = ['target_pattern', 'sources_repls', 'action_template', 'SHELL'] task_namespace = {k: action_namespace[k] for k in task_keys if k in action_namespace} task_namespace['sources_repls'] = task_namespace['sources_repls'].split() # luigi attributes task_namespace['resources'] = {k.partition('_')[2]: int(v) for k, v in namespace.items() if k.startswith('RESOURCES_')} task_namespace.update( {k: int(namespace[k]) for k in ['priority', 'disabled', 'worker_timeout'] if k in namespace}) yield ReRuleTask.factory( task_name, dry_run_suffix=dry_run_suffix, action_namespace=action_namespace, **task_namespace ) def print_namespaces(default_namespace, section_namespaces): """Print namespaces with the MarioFile format. :param dict default_namespace: Default namespace dictionary. :param dict section_namespaces: Section namespaces dictionary. :return: str """ template = mako.template.Template(TEMPLATE) namespaces = template.render( default_namespace=default_namespace, section_namespaces=section_namespaces ) return namespaces def render_config(section_namespaces): """Parse and render a MarioFile. :param dict section_namespaces: Section namespaces dictionary. :return: (dict, dict, dict) """ default_namespace = render_namespace(section_namespaces['DEFAULT']) rendered_namespaces = collections.OrderedDict( (k, render_namespace(v, default_namespace)) for k, v in section_namespaces.items() ) return default_namespace, rendered_namespaces def mario(rendered_namespaces, default_namespace, targets=('DEFAULT',), dry_run=False): """Generate Luigi tasks' file from MarioFile and Luigi template file :param dict rendered_namespaces: Rendered namespaces dictionary. :param dict default_namespace: Default namespace dictionary. :param iterable targets: List of targets. :param bool dry_run: Dry run flag. :rtype : iterable """ # ensure '.' is present in sys.path so 'IMPORT_MODULES = local_module' works if '.' not in sys.path: sys.path.append('.') dry_run_suffix = '-dry_run-' + str(uuid.uuid4()) if dry_run else '' rendered_namespaces = collections.OrderedDict(reversed(list(rendered_namespaces.items()))) tasks = list(register_tasks( rendered_namespaces, default_namespace=default_namespace, dry_run_suffix=dry_run_suffix )) target_tasks = [] for target in targets: for task_rule in tasks: if task_rule.match(target): target_tasks.append(task_rule(target=target)) break return target_tasks
apache-2.0
-5,749,971,859,850,435,000
34.841945
99
0.6356
false
4.215946
false
false
false
anbangleo/NlsdeWeb
Python-3.6.0/Lib/test/test_mailbox.py
2
92803
import os import sys import time import stat import socket import email import email.message import re import io import tempfile from test import support import unittest import textwrap import mailbox import glob class TestBase: all_mailbox_types = (mailbox.Message, mailbox.MaildirMessage, mailbox.mboxMessage, mailbox.MHMessage, mailbox.BabylMessage, mailbox.MMDFMessage) def _check_sample(self, msg): # Inspect a mailbox.Message representation of the sample message self.assertIsInstance(msg, email.message.Message) self.assertIsInstance(msg, mailbox.Message) for key, value in _sample_headers.items(): self.assertIn(value, msg.get_all(key)) self.assertTrue(msg.is_multipart()) self.assertEqual(len(msg.get_payload()), len(_sample_payloads)) for i, payload in enumerate(_sample_payloads): part = msg.get_payload(i) self.assertIsInstance(part, email.message.Message) self.assertNotIsInstance(part, mailbox.Message) self.assertEqual(part.get_payload(), payload) def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): support.rmtree(target) elif os.path.exists(target): support.unlink(target) class TestMailbox(TestBase): maxDiff = None _factory = None # Overridden by subclasses to reuse tests _template = 'From: foo\n\n%s\n' def setUp(self): self._path = support.TESTFN self._delete_recursively(self._path) self._box = self._factory(self._path) def tearDown(self): self._box.close() self._delete_recursively(self._path) def test_add(self): # Add copies of a sample message keys = [] keys.append(self._box.add(self._template % 0)) self.assertEqual(len(self._box), 1) keys.append(self._box.add(mailbox.Message(_sample_message))) self.assertEqual(len(self._box), 2) keys.append(self._box.add(email.message_from_string(_sample_message))) self.assertEqual(len(self._box), 3) keys.append(self._box.add(io.BytesIO(_bytes_sample_message))) self.assertEqual(len(self._box), 4) keys.append(self._box.add(_sample_message)) self.assertEqual(len(self._box), 5) keys.append(self._box.add(_bytes_sample_message)) self.assertEqual(len(self._box), 6) with self.assertWarns(DeprecationWarning): keys.append(self._box.add( io.TextIOWrapper(io.BytesIO(_bytes_sample_message)))) self.assertEqual(len(self._box), 7) self.assertEqual(self._box.get_string(keys[0]), self._template % 0) for i in (1, 2, 3, 4, 5, 6): self._check_sample(self._box[keys[i]]) _nonascii_msg = textwrap.dedent("""\ From: foo Subject: Falinaptár házhozszállítással. Már rendeltél? 0 """) def test_add_invalid_8bit_bytes_header(self): key = self._box.add(self._nonascii_msg.encode('latin-1')) self.assertEqual(len(self._box), 1) self.assertEqual(self._box.get_bytes(key), self._nonascii_msg.encode('latin-1')) def test_invalid_nonascii_header_as_string(self): subj = self._nonascii_msg.splitlines()[1] key = self._box.add(subj.encode('latin-1')) self.assertEqual(self._box.get_string(key), 'Subject: =?unknown-8bit?b?RmFsaW5hcHThciBo4Xpob3pzeuFsbO104XNz' 'YWwuIE3hciByZW5kZWx06Ww/?=\n\n') def test_add_nonascii_string_header_raises(self): with self.assertRaisesRegex(ValueError, "ASCII-only"): self._box.add(self._nonascii_msg) self._box.flush() self.assertEqual(len(self._box), 0) self.assertMailboxEmpty() def test_add_that_raises_leaves_mailbox_empty(self): def raiser(*args, **kw): raise Exception("a fake error") support.patch(self, email.generator.BytesGenerator, 'flatten', raiser) with self.assertRaises(Exception): self._box.add(email.message_from_string("From: Alphöso")) self.assertEqual(len(self._box), 0) self._box.close() self.assertMailboxEmpty() _non_latin_bin_msg = textwrap.dedent("""\ From: [email protected] To: báz Subject: Maintenant je vous présente mon collègue, le pouf célèbre \tJean de Baddie Mime-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit Да, они летят. """).encode('utf-8') def test_add_8bit_body(self): key = self._box.add(self._non_latin_bin_msg) self.assertEqual(self._box.get_bytes(key), self._non_latin_bin_msg) with self._box.get_file(key) as f: self.assertEqual(f.read(), self._non_latin_bin_msg.replace(b'\n', os.linesep.encode())) self.assertEqual(self._box[key].get_payload(), "Да, они летят.\n") def test_add_binary_file(self): with tempfile.TemporaryFile('wb+') as f: f.write(_bytes_sample_message) f.seek(0) key = self._box.add(f) self.assertEqual(self._box.get_bytes(key).split(b'\n'), _bytes_sample_message.split(b'\n')) def test_add_binary_nonascii_file(self): with tempfile.TemporaryFile('wb+') as f: f.write(self._non_latin_bin_msg) f.seek(0) key = self._box.add(f) self.assertEqual(self._box.get_bytes(key).split(b'\n'), self._non_latin_bin_msg.split(b'\n')) def test_add_text_file_warns(self): with tempfile.TemporaryFile('w+') as f: f.write(_sample_message) f.seek(0) with self.assertWarns(DeprecationWarning): key = self._box.add(f) self.assertEqual(self._box.get_bytes(key).split(b'\n'), _bytes_sample_message.split(b'\n')) def test_add_StringIO_warns(self): with self.assertWarns(DeprecationWarning): key = self._box.add(io.StringIO(self._template % "0")) self.assertEqual(self._box.get_string(key), self._template % "0") def test_add_nonascii_StringIO_raises(self): with self.assertWarns(DeprecationWarning): with self.assertRaisesRegex(ValueError, "ASCII-only"): self._box.add(io.StringIO(self._nonascii_msg)) self.assertEqual(len(self._box), 0) self._box.close() self.assertMailboxEmpty() def test_remove(self): # Remove messages using remove() self._test_remove_or_delitem(self._box.remove) def test_delitem(self): # Remove messages using __delitem__() self._test_remove_or_delitem(self._box.__delitem__) def _test_remove_or_delitem(self, method): # (Used by test_remove() and test_delitem().) key0 = self._box.add(self._template % 0) key1 = self._box.add(self._template % 1) self.assertEqual(len(self._box), 2) method(key0) self.assertEqual(len(self._box), 1) self.assertRaises(KeyError, lambda: self._box[key0]) self.assertRaises(KeyError, lambda: method(key0)) self.assertEqual(self._box.get_string(key1), self._template % 1) key2 = self._box.add(self._template % 2) self.assertEqual(len(self._box), 2) method(key2) self.assertEqual(len(self._box), 1) self.assertRaises(KeyError, lambda: self._box[key2]) self.assertRaises(KeyError, lambda: method(key2)) self.assertEqual(self._box.get_string(key1), self._template % 1) method(key1) self.assertEqual(len(self._box), 0) self.assertRaises(KeyError, lambda: self._box[key1]) self.assertRaises(KeyError, lambda: method(key1)) def test_discard(self, repetitions=10): # Discard messages key0 = self._box.add(self._template % 0) key1 = self._box.add(self._template % 1) self.assertEqual(len(self._box), 2) self._box.discard(key0) self.assertEqual(len(self._box), 1) self.assertRaises(KeyError, lambda: self._box[key0]) self._box.discard(key0) self.assertEqual(len(self._box), 1) self.assertRaises(KeyError, lambda: self._box[key0]) def test_get(self): # Retrieve messages using get() key0 = self._box.add(self._template % 0) msg = self._box.get(key0) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.get_payload(), '0\n') self.assertIsNone(self._box.get('foo')) self.assertIs(self._box.get('foo', False), False) self._box.close() self._box = self._factory(self._path) key1 = self._box.add(self._template % 1) msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.get_payload(), '1\n') def test_getitem(self): # Retrieve message using __getitem__() key0 = self._box.add(self._template % 0) msg = self._box[key0] self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.get_payload(), '0\n') self.assertRaises(KeyError, lambda: self._box['foo']) self._box.discard(key0) self.assertRaises(KeyError, lambda: self._box[key0]) def test_get_message(self): # Get Message representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) msg0 = self._box.get_message(key0) self.assertIsInstance(msg0, mailbox.Message) self.assertEqual(msg0['from'], 'foo') self.assertEqual(msg0.get_payload(), '0\n') self._check_sample(self._box.get_message(key1)) def test_get_bytes(self): # Get bytes representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) self.assertEqual(self._box.get_bytes(key0), (self._template % 0).encode('ascii')) self.assertEqual(self._box.get_bytes(key1), _bytes_sample_message) def test_get_string(self): # Get string representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) self.assertEqual(self._box.get_string(key0), self._template % 0) self.assertEqual(self._box.get_string(key1).split('\n'), _sample_message.split('\n')) def test_get_file(self): # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) with self._box.get_file(key0) as file: data0 = file.read() with self._box.get_file(key1) as file: data1 = file.read() self.assertEqual(data0.decode('ascii').replace(os.linesep, '\n'), self._template % 0) self.assertEqual(data1.decode('ascii').replace(os.linesep, '\n'), _sample_message) def test_get_file_can_be_closed_twice(self): # Issue 11700 key = self._box.add(_sample_message) f = self._box.get_file(key) f.close() f.close() def test_iterkeys(self): # Get keys using iterkeys() self._check_iteration(self._box.iterkeys, do_keys=True, do_values=False) def test_keys(self): # Get keys using keys() self._check_iteration(self._box.keys, do_keys=True, do_values=False) def test_itervalues(self): # Get values using itervalues() self._check_iteration(self._box.itervalues, do_keys=False, do_values=True) def test_iter(self): # Get values using __iter__() self._check_iteration(self._box.__iter__, do_keys=False, do_values=True) def test_values(self): # Get values using values() self._check_iteration(self._box.values, do_keys=False, do_values=True) def test_iteritems(self): # Get keys and values using iteritems() self._check_iteration(self._box.iteritems, do_keys=True, do_values=True) def test_items(self): # Get keys and values using items() self._check_iteration(self._box.items, do_keys=True, do_values=True) def _check_iteration(self, method, do_keys, do_values, repetitions=10): for value in method(): self.fail("Not empty") keys, values = [], [] for i in range(repetitions): keys.append(self._box.add(self._template % i)) values.append(self._template % i) if do_keys and not do_values: returned_keys = list(method()) elif do_values and not do_keys: returned_values = list(method()) else: returned_keys, returned_values = [], [] for key, value in method(): returned_keys.append(key) returned_values.append(value) if do_keys: self.assertEqual(len(keys), len(returned_keys)) self.assertEqual(set(keys), set(returned_keys)) if do_values: count = 0 for value in returned_values: self.assertEqual(value['from'], 'foo') self.assertLess(int(value.get_payload()), repetitions) count += 1 self.assertEqual(len(values), count) def test_contains(self): # Check existence of keys using __contains__() self.assertNotIn('foo', self._box) key0 = self._box.add(self._template % 0) self.assertIn(key0, self._box) self.assertNotIn('foo', self._box) key1 = self._box.add(self._template % 1) self.assertIn(key1, self._box) self.assertIn(key0, self._box) self.assertNotIn('foo', self._box) self._box.remove(key0) self.assertNotIn(key0, self._box) self.assertIn(key1, self._box) self.assertNotIn('foo', self._box) self._box.remove(key1) self.assertNotIn(key1, self._box) self.assertNotIn(key0, self._box) self.assertNotIn('foo', self._box) def test_len(self, repetitions=10): # Get message count keys = [] for i in range(repetitions): self.assertEqual(len(self._box), i) keys.append(self._box.add(self._template % i)) self.assertEqual(len(self._box), i + 1) for i in range(repetitions): self.assertEqual(len(self._box), repetitions - i) self._box.remove(keys[i]) self.assertEqual(len(self._box), repetitions - i - 1) def test_set_item(self): # Modify messages using __setitem__() key0 = self._box.add(self._template % 'original 0') self.assertEqual(self._box.get_string(key0), self._template % 'original 0') key1 = self._box.add(self._template % 'original 1') self.assertEqual(self._box.get_string(key1), self._template % 'original 1') self._box[key0] = self._template % 'changed 0' self.assertEqual(self._box.get_string(key0), self._template % 'changed 0') self._box[key1] = self._template % 'changed 1' self.assertEqual(self._box.get_string(key1), self._template % 'changed 1') self._box[key0] = _sample_message self._check_sample(self._box[key0]) self._box[key1] = self._box[key0] self._check_sample(self._box[key1]) self._box[key0] = self._template % 'original 0' self.assertEqual(self._box.get_string(key0), self._template % 'original 0') self._check_sample(self._box[key1]) self.assertRaises(KeyError, lambda: self._box.__setitem__('foo', 'bar')) self.assertRaises(KeyError, lambda: self._box['foo']) self.assertEqual(len(self._box), 2) def test_clear(self, iterations=10): # Remove all messages using clear() keys = [] for i in range(iterations): self._box.add(self._template % i) for i, key in enumerate(keys): self.assertEqual(self._box.get_string(key), self._template % i) self._box.clear() self.assertEqual(len(self._box), 0) for i, key in enumerate(keys): self.assertRaises(KeyError, lambda: self._box.get_string(key)) def test_pop(self): # Get and remove a message using pop() key0 = self._box.add(self._template % 0) self.assertIn(key0, self._box) key1 = self._box.add(self._template % 1) self.assertIn(key1, self._box) self.assertEqual(self._box.pop(key0).get_payload(), '0\n') self.assertNotIn(key0, self._box) self.assertIn(key1, self._box) key2 = self._box.add(self._template % 2) self.assertIn(key2, self._box) self.assertEqual(self._box.pop(key2).get_payload(), '2\n') self.assertNotIn(key2, self._box) self.assertIn(key1, self._box) self.assertEqual(self._box.pop(key1).get_payload(), '1\n') self.assertNotIn(key1, self._box) self.assertEqual(len(self._box), 0) def test_popitem(self, iterations=10): # Get and remove an arbitrary (key, message) using popitem() keys = [] for i in range(10): keys.append(self._box.add(self._template % i)) seen = [] for i in range(10): key, msg = self._box.popitem() self.assertIn(key, keys) self.assertNotIn(key, seen) seen.append(key) self.assertEqual(int(msg.get_payload()), keys.index(key)) self.assertEqual(len(self._box), 0) for key in keys: self.assertRaises(KeyError, lambda: self._box[key]) def test_update(self): # Modify multiple messages using update() key0 = self._box.add(self._template % 'original 0') key1 = self._box.add(self._template % 'original 1') key2 = self._box.add(self._template % 'original 2') self._box.update({key0: self._template % 'changed 0', key2: _sample_message}) self.assertEqual(len(self._box), 3) self.assertEqual(self._box.get_string(key0), self._template % 'changed 0') self.assertEqual(self._box.get_string(key1), self._template % 'original 1') self._check_sample(self._box[key2]) self._box.update([(key2, self._template % 'changed 2'), (key1, self._template % 'changed 1'), (key0, self._template % 'original 0')]) self.assertEqual(len(self._box), 3) self.assertEqual(self._box.get_string(key0), self._template % 'original 0') self.assertEqual(self._box.get_string(key1), self._template % 'changed 1') self.assertEqual(self._box.get_string(key2), self._template % 'changed 2') self.assertRaises(KeyError, lambda: self._box.update({'foo': 'bar', key0: self._template % "changed 0"})) self.assertEqual(len(self._box), 3) self.assertEqual(self._box.get_string(key0), self._template % "changed 0") self.assertEqual(self._box.get_string(key1), self._template % "changed 1") self.assertEqual(self._box.get_string(key2), self._template % "changed 2") def test_flush(self): # Write changes to disk self._test_flush_or_close(self._box.flush, True) def test_popitem_and_flush_twice(self): # See #15036. self._box.add(self._template % 0) self._box.add(self._template % 1) self._box.flush() self._box.popitem() self._box.flush() self._box.popitem() self._box.flush() def test_lock_unlock(self): # Lock and unlock the mailbox self.assertFalse(os.path.exists(self._get_lock_path())) self._box.lock() self.assertTrue(os.path.exists(self._get_lock_path())) self._box.unlock() self.assertFalse(os.path.exists(self._get_lock_path())) def test_close(self): # Close mailbox and flush changes to disk self._test_flush_or_close(self._box.close, False) def _test_flush_or_close(self, method, should_call_close): contents = [self._template % i for i in range(3)] self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) oldbox = self._box method() if should_call_close: self._box.close() self._box = self._factory(self._path) keys = self._box.keys() self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) oldbox.close() def test_dump_message(self): # Write message representations to disk for input in (email.message_from_string(_sample_message), _sample_message, io.BytesIO(_bytes_sample_message)): output = io.BytesIO() self._box._dump_message(input, output) self.assertEqual(output.getvalue(), _bytes_sample_message.replace(b'\n', os.linesep.encode())) output = io.BytesIO() self.assertRaises(TypeError, lambda: self._box._dump_message(None, output)) def _get_lock_path(self): # Return the path of the dot lock file. May be overridden. return self._path + '.lock' class TestMailboxSuperclass(TestBase, unittest.TestCase): def test_notimplemented(self): # Test that all Mailbox methods raise NotImplementedException. box = mailbox.Mailbox('path') self.assertRaises(NotImplementedError, lambda: box.add('')) self.assertRaises(NotImplementedError, lambda: box.remove('')) self.assertRaises(NotImplementedError, lambda: box.__delitem__('')) self.assertRaises(NotImplementedError, lambda: box.discard('')) self.assertRaises(NotImplementedError, lambda: box.__setitem__('', '')) self.assertRaises(NotImplementedError, lambda: box.iterkeys()) self.assertRaises(NotImplementedError, lambda: box.keys()) self.assertRaises(NotImplementedError, lambda: box.itervalues().__next__()) self.assertRaises(NotImplementedError, lambda: box.__iter__().__next__()) self.assertRaises(NotImplementedError, lambda: box.values()) self.assertRaises(NotImplementedError, lambda: box.iteritems().__next__()) self.assertRaises(NotImplementedError, lambda: box.items()) self.assertRaises(NotImplementedError, lambda: box.get('')) self.assertRaises(NotImplementedError, lambda: box.__getitem__('')) self.assertRaises(NotImplementedError, lambda: box.get_message('')) self.assertRaises(NotImplementedError, lambda: box.get_string('')) self.assertRaises(NotImplementedError, lambda: box.get_bytes('')) self.assertRaises(NotImplementedError, lambda: box.get_file('')) self.assertRaises(NotImplementedError, lambda: '' in box) self.assertRaises(NotImplementedError, lambda: box.__contains__('')) self.assertRaises(NotImplementedError, lambda: box.__len__()) self.assertRaises(NotImplementedError, lambda: box.clear()) self.assertRaises(NotImplementedError, lambda: box.pop('')) self.assertRaises(NotImplementedError, lambda: box.popitem()) self.assertRaises(NotImplementedError, lambda: box.update((('', ''),))) self.assertRaises(NotImplementedError, lambda: box.flush()) self.assertRaises(NotImplementedError, lambda: box.lock()) self.assertRaises(NotImplementedError, lambda: box.unlock()) self.assertRaises(NotImplementedError, lambda: box.close()) class TestMaildir(TestMailbox, unittest.TestCase): _factory = lambda self, path, factory=None: mailbox.Maildir(path, factory) def setUp(self): TestMailbox.setUp(self) if (os.name == 'nt') or (sys.platform == 'cygwin'): self._box.colon = '!' def assertMailboxEmpty(self): self.assertEqual(os.listdir(os.path.join(self._path, 'tmp')), []) def test_add_MM(self): # Add a MaildirMessage instance msg = mailbox.MaildirMessage(self._template % 0) msg.set_subdir('cur') msg.set_info('foo') key = self._box.add(msg) self.assertTrue(os.path.exists(os.path.join(self._path, 'cur', '%s%sfoo' % (key, self._box.colon)))) def test_get_MM(self): # Get a MaildirMessage instance msg = mailbox.MaildirMessage(self._template % 0) msg.set_subdir('cur') msg.set_flags('RF') key = self._box.add(msg) msg_returned = self._box.get_message(key) self.assertIsInstance(msg_returned, mailbox.MaildirMessage) self.assertEqual(msg_returned.get_subdir(), 'cur') self.assertEqual(msg_returned.get_flags(), 'FR') def test_set_MM(self): # Set with a MaildirMessage instance msg0 = mailbox.MaildirMessage(self._template % 0) msg0.set_flags('TP') key = self._box.add(msg0) msg_returned = self._box.get_message(key) self.assertEqual(msg_returned.get_subdir(), 'new') self.assertEqual(msg_returned.get_flags(), 'PT') msg1 = mailbox.MaildirMessage(self._template % 1) self._box[key] = msg1 msg_returned = self._box.get_message(key) self.assertEqual(msg_returned.get_subdir(), 'new') self.assertEqual(msg_returned.get_flags(), '') self.assertEqual(msg_returned.get_payload(), '1\n') msg2 = mailbox.MaildirMessage(self._template % 2) msg2.set_info('2,S') self._box[key] = msg2 self._box[key] = self._template % 3 msg_returned = self._box.get_message(key) self.assertEqual(msg_returned.get_subdir(), 'new') self.assertEqual(msg_returned.get_flags(), 'S') self.assertEqual(msg_returned.get_payload(), '3\n') def test_consistent_factory(self): # Add a message. msg = mailbox.MaildirMessage(self._template % 0) msg.set_subdir('cur') msg.set_flags('RF') key = self._box.add(msg) # Create new mailbox with class FakeMessage(mailbox.MaildirMessage): pass box = mailbox.Maildir(self._path, factory=FakeMessage) box.colon = self._box.colon msg2 = box.get_message(key) self.assertIsInstance(msg2, FakeMessage) def test_initialize_new(self): # Initialize a non-existent mailbox self.tearDown() self._box = mailbox.Maildir(self._path) self._check_basics() self._delete_recursively(self._path) self._box = self._factory(self._path, factory=None) self._check_basics() def test_initialize_existing(self): # Initialize an existing mailbox self.tearDown() for subdir in '', 'tmp', 'new', 'cur': os.mkdir(os.path.normpath(os.path.join(self._path, subdir))) self._box = mailbox.Maildir(self._path) self._check_basics() def _check_basics(self, factory=None): # (Used by test_open_new() and test_open_existing().) self.assertEqual(self._box._path, os.path.abspath(self._path)) self.assertEqual(self._box._factory, factory) for subdir in '', 'tmp', 'new', 'cur': path = os.path.join(self._path, subdir) mode = os.stat(path)[stat.ST_MODE] self.assertTrue(stat.S_ISDIR(mode), "Not a directory: '%s'" % path) def test_list_folders(self): # List folders self._box.add_folder('one') self._box.add_folder('two') self._box.add_folder('three') self.assertEqual(len(self._box.list_folders()), 3) self.assertEqual(set(self._box.list_folders()), set(('one', 'two', 'three'))) def test_get_folder(self): # Open folders self._box.add_folder('foo.bar') folder0 = self._box.get_folder('foo.bar') folder0.add(self._template % 'bar') self.assertTrue(os.path.isdir(os.path.join(self._path, '.foo.bar'))) folder1 = self._box.get_folder('foo.bar') self.assertEqual(folder1.get_string(folder1.keys()[0]), self._template % 'bar') def test_add_and_remove_folders(self): # Delete folders self._box.add_folder('one') self._box.add_folder('two') self.assertEqual(len(self._box.list_folders()), 2) self.assertEqual(set(self._box.list_folders()), set(('one', 'two'))) self._box.remove_folder('one') self.assertEqual(len(self._box.list_folders()), 1) self.assertEqual(set(self._box.list_folders()), set(('two',))) self._box.add_folder('three') self.assertEqual(len(self._box.list_folders()), 2) self.assertEqual(set(self._box.list_folders()), set(('two', 'three'))) self._box.remove_folder('three') self.assertEqual(len(self._box.list_folders()), 1) self.assertEqual(set(self._box.list_folders()), set(('two',))) self._box.remove_folder('two') self.assertEqual(len(self._box.list_folders()), 0) self.assertEqual(self._box.list_folders(), []) def test_clean(self): # Remove old files from 'tmp' foo_path = os.path.join(self._path, 'tmp', 'foo') bar_path = os.path.join(self._path, 'tmp', 'bar') with open(foo_path, 'w') as f: f.write("@") with open(bar_path, 'w') as f: f.write("@") self._box.clean() self.assertTrue(os.path.exists(foo_path)) self.assertTrue(os.path.exists(bar_path)) foo_stat = os.stat(foo_path) os.utime(foo_path, (time.time() - 129600 - 2, foo_stat.st_mtime)) self._box.clean() self.assertFalse(os.path.exists(foo_path)) self.assertTrue(os.path.exists(bar_path)) def test_create_tmp(self, repetitions=10): # Create files in tmp directory hostname = socket.gethostname() if '/' in hostname: hostname = hostname.replace('/', r'\057') if ':' in hostname: hostname = hostname.replace(':', r'\072') pid = os.getpid() pattern = re.compile(r"(?P<time>\d+)\.M(?P<M>\d{1,6})P(?P<P>\d+)" r"Q(?P<Q>\d+)\.(?P<host>[^:/]+)") previous_groups = None for x in range(repetitions): tmp_file = self._box._create_tmp() head, tail = os.path.split(tmp_file.name) self.assertEqual(head, os.path.abspath(os.path.join(self._path, "tmp")), "File in wrong location: '%s'" % head) match = pattern.match(tail) self.assertIsNotNone(match, "Invalid file name: '%s'" % tail) groups = match.groups() if previous_groups is not None: self.assertGreaterEqual(int(groups[0]), int(previous_groups[0]), "Non-monotonic seconds: '%s' before '%s'" % (previous_groups[0], groups[0])) if int(groups[0]) == int(previous_groups[0]): self.assertGreaterEqual(int(groups[1]), int(previous_groups[1]), "Non-monotonic milliseconds: '%s' before '%s'" % (previous_groups[1], groups[1])) self.assertEqual(int(groups[2]), pid, "Process ID mismatch: '%s' should be '%s'" % (groups[2], pid)) self.assertEqual(int(groups[3]), int(previous_groups[3]) + 1, "Non-sequential counter: '%s' before '%s'" % (previous_groups[3], groups[3])) self.assertEqual(groups[4], hostname, "Host name mismatch: '%s' should be '%s'" % (groups[4], hostname)) previous_groups = groups tmp_file.write(_bytes_sample_message) tmp_file.seek(0) self.assertEqual(tmp_file.read(), _bytes_sample_message) tmp_file.close() file_count = len(os.listdir(os.path.join(self._path, "tmp"))) self.assertEqual(file_count, repetitions, "Wrong file count: '%s' should be '%s'" % (file_count, repetitions)) def test_refresh(self): # Update the table of contents self.assertEqual(self._box._toc, {}) key0 = self._box.add(self._template % 0) key1 = self._box.add(self._template % 1) self.assertEqual(self._box._toc, {}) self._box._refresh() self.assertEqual(self._box._toc, {key0: os.path.join('new', key0), key1: os.path.join('new', key1)}) key2 = self._box.add(self._template % 2) self.assertEqual(self._box._toc, {key0: os.path.join('new', key0), key1: os.path.join('new', key1)}) self._box._refresh() self.assertEqual(self._box._toc, {key0: os.path.join('new', key0), key1: os.path.join('new', key1), key2: os.path.join('new', key2)}) def test_refresh_after_safety_period(self): # Issue #13254: Call _refresh after the "file system safety # period" of 2 seconds has passed; _toc should still be # updated because this is the first call to _refresh. key0 = self._box.add(self._template % 0) key1 = self._box.add(self._template % 1) self._box = self._factory(self._path) self.assertEqual(self._box._toc, {}) # Emulate sleeping. Instead of sleeping for 2 seconds, use the # skew factor to make _refresh think that the filesystem # safety period has passed and re-reading the _toc is only # required if mtimes differ. self._box._skewfactor = -3 self._box._refresh() self.assertEqual(sorted(self._box._toc.keys()), sorted([key0, key1])) def test_lookup(self): # Look up message subpaths in the TOC self.assertRaises(KeyError, lambda: self._box._lookup('foo')) key0 = self._box.add(self._template % 0) self.assertEqual(self._box._lookup(key0), os.path.join('new', key0)) os.remove(os.path.join(self._path, 'new', key0)) self.assertEqual(self._box._toc, {key0: os.path.join('new', key0)}) # Be sure that the TOC is read back from disk (see issue #6896 # about bad mtime behaviour on some systems). self._box.flush() self.assertRaises(KeyError, lambda: self._box._lookup(key0)) self.assertEqual(self._box._toc, {}) def test_lock_unlock(self): # Lock and unlock the mailbox. For Maildir, this does nothing. self._box.lock() self._box.unlock() def test_folder (self): # Test for bug #1569790: verify that folders returned by .get_folder() # use the same factory function. def dummy_factory (s): return None box = self._factory(self._path, factory=dummy_factory) folder = box.add_folder('folder1') self.assertIs(folder._factory, dummy_factory) folder1_alias = box.get_folder('folder1') self.assertIs(folder1_alias._factory, dummy_factory) def test_directory_in_folder (self): # Test that mailboxes still work if there's a stray extra directory # in a folder. for i in range(10): self._box.add(mailbox.Message(_sample_message)) # Create a stray directory os.mkdir(os.path.join(self._path, 'cur', 'stray-dir')) # Check that looping still works with the directory present. for msg in self._box: pass @unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()') @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()') def test_file_permissions(self): # Verify that message files are created without execute permissions msg = mailbox.MaildirMessage(self._template % 0) orig_umask = os.umask(0) try: key = self._box.add(msg) finally: os.umask(orig_umask) path = os.path.join(self._path, self._box._lookup(key)) mode = os.stat(path).st_mode self.assertFalse(mode & 0o111) @unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()') @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()') def test_folder_file_perms(self): # From bug #3228, we want to verify that the file created inside a Maildir # subfolder isn't marked as executable. orig_umask = os.umask(0) try: subfolder = self._box.add_folder('subfolder') finally: os.umask(orig_umask) path = os.path.join(subfolder._path, 'maildirfolder') st = os.stat(path) perms = st.st_mode self.assertFalse((perms & 0o111)) # Execute bits should all be off. def test_reread(self): # Do an initial unconditional refresh self._box._refresh() # Put the last modified times more than two seconds into the past # (because mtime may have a two second granularity) for subdir in ('cur', 'new'): os.utime(os.path.join(self._box._path, subdir), (time.time()-5,)*2) # Because mtime has a two second granularity in worst case (FAT), a # refresh is done unconditionally if called for within # two-second-plus-a-bit of the last one, just in case the mbox has # changed; so now we have to wait for that interval to expire. # # Because this is a test, emulate sleeping. Instead of # sleeping for 2 seconds, use the skew factor to make _refresh # think that 2 seconds have passed and re-reading the _toc is # only required if mtimes differ. self._box._skewfactor = -3 # Re-reading causes the ._toc attribute to be assigned a new dictionary # object, so we'll check that the ._toc attribute isn't a different # object. orig_toc = self._box._toc def refreshed(): return self._box._toc is not orig_toc self._box._refresh() self.assertFalse(refreshed()) # Now, write something into cur and remove it. This changes # the mtime and should cause a re-read. Note that "sleep # emulation" is still in effect, as skewfactor is -3. filename = os.path.join(self._path, 'cur', 'stray-file') support.create_empty_file(filename) os.unlink(filename) self._box._refresh() self.assertTrue(refreshed()) class _TestSingleFile(TestMailbox): '''Common tests for single-file mailboxes''' def test_add_doesnt_rewrite(self): # When only adding messages, flush() should not rewrite the # mailbox file. See issue #9559. # Inode number changes if the contents are written to another # file which is then renamed over the original file. So we # must check that the inode number doesn't change. inode_before = os.stat(self._path).st_ino self._box.add(self._template % 0) self._box.flush() inode_after = os.stat(self._path).st_ino self.assertEqual(inode_before, inode_after) # Make sure the message was really added self._box.close() self._box = self._factory(self._path) self.assertEqual(len(self._box), 1) def test_permissions_after_flush(self): # See issue #5346 # Make the mailbox world writable. It's unlikely that the new # mailbox file would have these permissions after flush(), # because umask usually prevents it. mode = os.stat(self._path).st_mode | 0o666 os.chmod(self._path, mode) self._box.add(self._template % 0) i = self._box.add(self._template % 1) # Need to remove one message to make flush() create a new file self._box.remove(i) self._box.flush() self.assertEqual(os.stat(self._path).st_mode, mode) class _TestMboxMMDF(_TestSingleFile): def tearDown(self): super().tearDown() self._box.close() self._delete_recursively(self._path) for lock_remnant in glob.glob(self._path + '.*'): support.unlink(lock_remnant) def assertMailboxEmpty(self): with open(self._path) as f: self.assertEqual(f.readlines(), []) def test_add_from_string(self): # Add a string starting with 'From ' to the mailbox key = self._box.add('From foo@bar blah\nFrom: foo\n\n0\n') self.assertEqual(self._box[key].get_from(), 'foo@bar blah') self.assertEqual(self._box[key].get_payload(), '0\n') def test_add_from_bytes(self): # Add a byte string starting with 'From ' to the mailbox key = self._box.add(b'From foo@bar blah\nFrom: foo\n\n0\n') self.assertEqual(self._box[key].get_from(), 'foo@bar blah') self.assertEqual(self._box[key].get_payload(), '0\n') def test_add_mbox_or_mmdf_message(self): # Add an mboxMessage or MMDFMessage for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg = class_('From foo@bar blah\nFrom: foo\n\n0\n') key = self._box.add(msg) def test_open_close_open(self): # Open and inspect previously-created mailbox values = [self._template % i for i in range(3)] for value in values: self._box.add(value) self._box.close() mtime = os.path.getmtime(self._path) self._box = self._factory(self._path) self.assertEqual(len(self._box), 3) for key in self._box.iterkeys(): self.assertIn(self._box.get_string(key), values) self._box.close() self.assertEqual(mtime, os.path.getmtime(self._path)) def test_add_and_close(self): # Verifying that closing a mailbox doesn't change added items self._box.add(_sample_message) for i in range(3): self._box.add(self._template % i) self._box.add(_sample_message) self._box._file.flush() self._box._file.seek(0) contents = self._box._file.read() self._box.close() with open(self._path, 'rb') as f: self.assertEqual(contents, f.read()) self._box = self._factory(self._path) @unittest.skipUnless(hasattr(os, 'fork'), "Test needs fork().") @unittest.skipUnless(hasattr(socket, 'socketpair'), "Test needs socketpair().") def test_lock_conflict(self): # Fork off a child process that will lock the mailbox temporarily, # unlock it and exit. c, p = socket.socketpair() self.addCleanup(c.close) self.addCleanup(p.close) pid = os.fork() if pid == 0: # child try: # lock the mailbox, and signal the parent it can proceed self._box.lock() c.send(b'c') # wait until the parent is done, and unlock the mailbox c.recv(1) self._box.unlock() finally: os._exit(0) # In the parent, wait until the child signals it locked the mailbox. p.recv(1) try: self.assertRaises(mailbox.ExternalClashError, self._box.lock) finally: # Signal the child it can now release the lock and exit. p.send(b'p') # Wait for child to exit. Locking should now succeed. exited_pid, status = os.waitpid(pid, 0) self._box.lock() self._box.unlock() def test_relock(self): # Test case for bug #1575506: the mailbox class was locking the # wrong file object in its flush() method. msg = "Subject: sub\n\nbody\n" key1 = self._box.add(msg) self._box.flush() self._box.close() self._box = self._factory(self._path) self._box.lock() key2 = self._box.add(msg) self._box.flush() self.assertTrue(self._box._locked) self._box.close() class TestMbox(_TestMboxMMDF, unittest.TestCase): _factory = lambda self, path, factory=None: mailbox.mbox(path, factory) @unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()') @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()') def test_file_perms(self): # From bug #3228, we want to verify that the mailbox file isn't executable, # even if the umask is set to something that would leave executable bits set. # We only run this test on platforms that support umask. try: old_umask = os.umask(0o077) self._box.close() os.unlink(self._path) self._box = mailbox.mbox(self._path, create=True) self._box.add('') self._box.close() finally: os.umask(old_umask) st = os.stat(self._path) perms = st.st_mode self.assertFalse((perms & 0o111)) # Execute bits should all be off. def test_terminating_newline(self): message = email.message.Message() message['From'] = '[email protected]' message.set_payload('No newline at the end') i = self._box.add(message) # A newline should have been appended to the payload message = self._box.get(i) self.assertEqual(message.get_payload(), 'No newline at the end\n') def test_message_separator(self): # Check there's always a single blank line after each message self._box.add('From: foo\n\n0') # No newline at the end with open(self._path) as f: data = f.read() self.assertEqual(data[-3:], '0\n\n') self._box.add('From: foo\n\n0\n') # Newline at the end with open(self._path) as f: data = f.read() self.assertEqual(data[-3:], '0\n\n') class TestMMDF(_TestMboxMMDF, unittest.TestCase): _factory = lambda self, path, factory=None: mailbox.MMDF(path, factory) class TestMH(TestMailbox, unittest.TestCase): _factory = lambda self, path, factory=None: mailbox.MH(path, factory) def assertMailboxEmpty(self): self.assertEqual(os.listdir(self._path), ['.mh_sequences']) def test_list_folders(self): # List folders self._box.add_folder('one') self._box.add_folder('two') self._box.add_folder('three') self.assertEqual(len(self._box.list_folders()), 3) self.assertEqual(set(self._box.list_folders()), set(('one', 'two', 'three'))) def test_get_folder(self): # Open folders def dummy_factory (s): return None self._box = self._factory(self._path, dummy_factory) new_folder = self._box.add_folder('foo.bar') folder0 = self._box.get_folder('foo.bar') folder0.add(self._template % 'bar') self.assertTrue(os.path.isdir(os.path.join(self._path, 'foo.bar'))) folder1 = self._box.get_folder('foo.bar') self.assertEqual(folder1.get_string(folder1.keys()[0]), self._template % 'bar') # Test for bug #1569790: verify that folders returned by .get_folder() # use the same factory function. self.assertIs(new_folder._factory, self._box._factory) self.assertIs(folder0._factory, self._box._factory) def test_add_and_remove_folders(self): # Delete folders self._box.add_folder('one') self._box.add_folder('two') self.assertEqual(len(self._box.list_folders()), 2) self.assertEqual(set(self._box.list_folders()), set(('one', 'two'))) self._box.remove_folder('one') self.assertEqual(len(self._box.list_folders()), 1) self.assertEqual(set(self._box.list_folders()), set(('two',))) self._box.add_folder('three') self.assertEqual(len(self._box.list_folders()), 2) self.assertEqual(set(self._box.list_folders()), set(('two', 'three'))) self._box.remove_folder('three') self.assertEqual(len(self._box.list_folders()), 1) self.assertEqual(set(self._box.list_folders()), set(('two',))) self._box.remove_folder('two') self.assertEqual(len(self._box.list_folders()), 0) self.assertEqual(self._box.list_folders(), []) def test_sequences(self): # Get and set sequences self.assertEqual(self._box.get_sequences(), {}) msg0 = mailbox.MHMessage(self._template % 0) msg0.add_sequence('foo') key0 = self._box.add(msg0) self.assertEqual(self._box.get_sequences(), {'foo':[key0]}) msg1 = mailbox.MHMessage(self._template % 1) msg1.set_sequences(['bar', 'replied', 'foo']) key1 = self._box.add(msg1) self.assertEqual(self._box.get_sequences(), {'foo':[key0, key1], 'bar':[key1], 'replied':[key1]}) msg0.set_sequences(['flagged']) self._box[key0] = msg0 self.assertEqual(self._box.get_sequences(), {'foo':[key1], 'bar':[key1], 'replied':[key1], 'flagged':[key0]}) self._box.remove(key1) self.assertEqual(self._box.get_sequences(), {'flagged':[key0]}) def test_issue2625(self): msg0 = mailbox.MHMessage(self._template % 0) msg0.add_sequence('foo') key0 = self._box.add(msg0) refmsg0 = self._box.get_message(key0) def test_issue7627(self): msg0 = mailbox.MHMessage(self._template % 0) key0 = self._box.add(msg0) self._box.lock() self._box.remove(key0) self._box.unlock() def test_pack(self): # Pack the contents of the mailbox msg0 = mailbox.MHMessage(self._template % 0) msg1 = mailbox.MHMessage(self._template % 1) msg2 = mailbox.MHMessage(self._template % 2) msg3 = mailbox.MHMessage(self._template % 3) msg0.set_sequences(['foo', 'unseen']) msg1.set_sequences(['foo']) msg2.set_sequences(['foo', 'flagged']) msg3.set_sequences(['foo', 'bar', 'replied']) key0 = self._box.add(msg0) key1 = self._box.add(msg1) key2 = self._box.add(msg2) key3 = self._box.add(msg3) self.assertEqual(self._box.get_sequences(), {'foo':[key0,key1,key2,key3], 'unseen':[key0], 'flagged':[key2], 'bar':[key3], 'replied':[key3]}) self._box.remove(key2) self.assertEqual(self._box.get_sequences(), {'foo':[key0,key1,key3], 'unseen':[key0], 'bar':[key3], 'replied':[key3]}) self._box.pack() self.assertEqual(self._box.keys(), [1, 2, 3]) key0 = key0 key1 = key0 + 1 key2 = key1 + 1 self.assertEqual(self._box.get_sequences(), {'foo':[1, 2, 3], 'unseen':[1], 'bar':[3], 'replied':[3]}) # Test case for packing while holding the mailbox locked. key0 = self._box.add(msg1) key1 = self._box.add(msg1) key2 = self._box.add(msg1) key3 = self._box.add(msg1) self._box.remove(key0) self._box.remove(key2) self._box.lock() self._box.pack() self._box.unlock() self.assertEqual(self._box.get_sequences(), {'foo':[1, 2, 3, 4, 5], 'unseen':[1], 'bar':[3], 'replied':[3]}) def _get_lock_path(self): return os.path.join(self._path, '.mh_sequences.lock') class TestBabyl(_TestSingleFile, unittest.TestCase): _factory = lambda self, path, factory=None: mailbox.Babyl(path, factory) def assertMailboxEmpty(self): with open(self._path) as f: self.assertEqual(f.readlines(), []) def tearDown(self): super().tearDown() self._box.close() self._delete_recursively(self._path) for lock_remnant in glob.glob(self._path + '.*'): support.unlink(lock_remnant) def test_labels(self): # Get labels from the mailbox self.assertEqual(self._box.get_labels(), []) msg0 = mailbox.BabylMessage(self._template % 0) msg0.add_label('foo') key0 = self._box.add(msg0) self.assertEqual(self._box.get_labels(), ['foo']) msg1 = mailbox.BabylMessage(self._template % 1) msg1.set_labels(['bar', 'answered', 'foo']) key1 = self._box.add(msg1) self.assertEqual(set(self._box.get_labels()), set(['foo', 'bar'])) msg0.set_labels(['blah', 'filed']) self._box[key0] = msg0 self.assertEqual(set(self._box.get_labels()), set(['foo', 'bar', 'blah'])) self._box.remove(key1) self.assertEqual(set(self._box.get_labels()), set(['blah'])) class FakeFileLikeObject: def __init__(self): self.closed = False def close(self): self.closed = True class FakeMailBox(mailbox.Mailbox): def __init__(self): mailbox.Mailbox.__init__(self, '', lambda file: None) self.files = [FakeFileLikeObject() for i in range(10)] def get_file(self, key): return self.files[key] class TestFakeMailBox(unittest.TestCase): def test_closing_fd(self): box = FakeMailBox() for i in range(10): self.assertFalse(box.files[i].closed) for i in range(10): box[i] for i in range(10): self.assertTrue(box.files[i].closed) class TestMessage(TestBase, unittest.TestCase): _factory = mailbox.Message # Overridden by subclasses to reuse tests def setUp(self): self._path = support.TESTFN def tearDown(self): self._delete_recursively(self._path) def test_initialize_with_eMM(self): # Initialize based on email.message.Message instance eMM = email.message_from_string(_sample_message) msg = self._factory(eMM) self._post_initialize_hook(msg) self._check_sample(msg) def test_initialize_with_string(self): # Initialize based on string msg = self._factory(_sample_message) self._post_initialize_hook(msg) self._check_sample(msg) def test_initialize_with_file(self): # Initialize based on contents of file with open(self._path, 'w+') as f: f.write(_sample_message) f.seek(0) msg = self._factory(f) self._post_initialize_hook(msg) self._check_sample(msg) def test_initialize_with_binary_file(self): # Initialize based on contents of binary file with open(self._path, 'wb+') as f: f.write(_bytes_sample_message) f.seek(0) msg = self._factory(f) self._post_initialize_hook(msg) self._check_sample(msg) def test_initialize_with_nothing(self): # Initialize without arguments msg = self._factory() self._post_initialize_hook(msg) self.assertIsInstance(msg, email.message.Message) self.assertIsInstance(msg, mailbox.Message) self.assertIsInstance(msg, self._factory) self.assertEqual(msg.keys(), []) self.assertFalse(msg.is_multipart()) self.assertIsNone(msg.get_payload()) def test_initialize_incorrectly(self): # Initialize with invalid argument self.assertRaises(TypeError, lambda: self._factory(object())) def test_all_eMM_attribues_exist(self): # Issue 12537 eMM = email.message_from_string(_sample_message) msg = self._factory(_sample_message) for attr in eMM.__dict__: self.assertIn(attr, msg.__dict__, '{} attribute does not exist'.format(attr)) def test_become_message(self): # Take on the state of another message eMM = email.message_from_string(_sample_message) msg = self._factory() msg._become_message(eMM) self._check_sample(msg) def test_explain_to(self): # Copy self's format-specific data to other message formats. # This test is superficial; better ones are in TestMessageConversion. msg = self._factory() for class_ in self.all_mailbox_types: other_msg = class_() msg._explain_to(other_msg) other_msg = email.message.Message() self.assertRaises(TypeError, lambda: msg._explain_to(other_msg)) def _post_initialize_hook(self, msg): # Overridden by subclasses to check extra things after initialization pass class TestMaildirMessage(TestMessage, unittest.TestCase): _factory = mailbox.MaildirMessage def _post_initialize_hook(self, msg): self.assertEqual(msg._subdir, 'new') self.assertEqual(msg._info, '') def test_subdir(self): # Use get_subdir() and set_subdir() msg = mailbox.MaildirMessage(_sample_message) self.assertEqual(msg.get_subdir(), 'new') msg.set_subdir('cur') self.assertEqual(msg.get_subdir(), 'cur') msg.set_subdir('new') self.assertEqual(msg.get_subdir(), 'new') self.assertRaises(ValueError, lambda: msg.set_subdir('tmp')) self.assertEqual(msg.get_subdir(), 'new') msg.set_subdir('new') self.assertEqual(msg.get_subdir(), 'new') self._check_sample(msg) def test_flags(self): # Use get_flags(), set_flags(), add_flag(), remove_flag() msg = mailbox.MaildirMessage(_sample_message) self.assertEqual(msg.get_flags(), '') self.assertEqual(msg.get_subdir(), 'new') msg.set_flags('F') self.assertEqual(msg.get_subdir(), 'new') self.assertEqual(msg.get_flags(), 'F') msg.set_flags('SDTP') self.assertEqual(msg.get_flags(), 'DPST') msg.add_flag('FT') self.assertEqual(msg.get_flags(), 'DFPST') msg.remove_flag('TDRP') self.assertEqual(msg.get_flags(), 'FS') self.assertEqual(msg.get_subdir(), 'new') self._check_sample(msg) def test_date(self): # Use get_date() and set_date() msg = mailbox.MaildirMessage(_sample_message) self.assertLess(abs(msg.get_date() - time.time()), 60) msg.set_date(0.0) self.assertEqual(msg.get_date(), 0.0) def test_info(self): # Use get_info() and set_info() msg = mailbox.MaildirMessage(_sample_message) self.assertEqual(msg.get_info(), '') msg.set_info('1,foo=bar') self.assertEqual(msg.get_info(), '1,foo=bar') self.assertRaises(TypeError, lambda: msg.set_info(None)) self._check_sample(msg) def test_info_and_flags(self): # Test interaction of info and flag methods msg = mailbox.MaildirMessage(_sample_message) self.assertEqual(msg.get_info(), '') msg.set_flags('SF') self.assertEqual(msg.get_flags(), 'FS') self.assertEqual(msg.get_info(), '2,FS') msg.set_info('1,') self.assertEqual(msg.get_flags(), '') self.assertEqual(msg.get_info(), '1,') msg.remove_flag('RPT') self.assertEqual(msg.get_flags(), '') self.assertEqual(msg.get_info(), '1,') msg.add_flag('D') self.assertEqual(msg.get_flags(), 'D') self.assertEqual(msg.get_info(), '2,D') self._check_sample(msg) class _TestMboxMMDFMessage: _factory = mailbox._mboxMMDFMessage def _post_initialize_hook(self, msg): self._check_from(msg) def test_initialize_with_unixfrom(self): # Initialize with a message that already has a _unixfrom attribute msg = mailbox.Message(_sample_message) msg.set_unixfrom('From foo@bar blah') msg = mailbox.mboxMessage(msg) self.assertEqual(msg.get_from(), 'foo@bar blah', msg.get_from()) def test_from(self): # Get and set "From " line msg = mailbox.mboxMessage(_sample_message) self._check_from(msg) msg.set_from('foo bar') self.assertEqual(msg.get_from(), 'foo bar') msg.set_from('foo@bar', True) self._check_from(msg, 'foo@bar') msg.set_from('blah@temp', time.localtime()) self._check_from(msg, 'blah@temp') def test_flags(self): # Use get_flags(), set_flags(), add_flag(), remove_flag() msg = mailbox.mboxMessage(_sample_message) self.assertEqual(msg.get_flags(), '') msg.set_flags('F') self.assertEqual(msg.get_flags(), 'F') msg.set_flags('XODR') self.assertEqual(msg.get_flags(), 'RODX') msg.add_flag('FA') self.assertEqual(msg.get_flags(), 'RODFAX') msg.remove_flag('FDXA') self.assertEqual(msg.get_flags(), 'RO') self._check_sample(msg) def _check_from(self, msg, sender=None): # Check contents of "From " line if sender is None: sender = "MAILER-DAEMON" self.assertIsNotNone(re.match( sender + r" \w{3} \w{3} [\d ]\d [\d ]\d:\d{2}:\d{2} \d{4}", msg.get_from())) class TestMboxMessage(_TestMboxMMDFMessage, TestMessage): _factory = mailbox.mboxMessage class TestMHMessage(TestMessage, unittest.TestCase): _factory = mailbox.MHMessage def _post_initialize_hook(self, msg): self.assertEqual(msg._sequences, []) def test_sequences(self): # Get, set, join, and leave sequences msg = mailbox.MHMessage(_sample_message) self.assertEqual(msg.get_sequences(), []) msg.set_sequences(['foobar']) self.assertEqual(msg.get_sequences(), ['foobar']) msg.set_sequences([]) self.assertEqual(msg.get_sequences(), []) msg.add_sequence('unseen') self.assertEqual(msg.get_sequences(), ['unseen']) msg.add_sequence('flagged') self.assertEqual(msg.get_sequences(), ['unseen', 'flagged']) msg.add_sequence('flagged') self.assertEqual(msg.get_sequences(), ['unseen', 'flagged']) msg.remove_sequence('unseen') self.assertEqual(msg.get_sequences(), ['flagged']) msg.add_sequence('foobar') self.assertEqual(msg.get_sequences(), ['flagged', 'foobar']) msg.remove_sequence('replied') self.assertEqual(msg.get_sequences(), ['flagged', 'foobar']) msg.set_sequences(['foobar', 'replied']) self.assertEqual(msg.get_sequences(), ['foobar', 'replied']) class TestBabylMessage(TestMessage, unittest.TestCase): _factory = mailbox.BabylMessage def _post_initialize_hook(self, msg): self.assertEqual(msg._labels, []) def test_labels(self): # Get, set, join, and leave labels msg = mailbox.BabylMessage(_sample_message) self.assertEqual(msg.get_labels(), []) msg.set_labels(['foobar']) self.assertEqual(msg.get_labels(), ['foobar']) msg.set_labels([]) self.assertEqual(msg.get_labels(), []) msg.add_label('filed') self.assertEqual(msg.get_labels(), ['filed']) msg.add_label('resent') self.assertEqual(msg.get_labels(), ['filed', 'resent']) msg.add_label('resent') self.assertEqual(msg.get_labels(), ['filed', 'resent']) msg.remove_label('filed') self.assertEqual(msg.get_labels(), ['resent']) msg.add_label('foobar') self.assertEqual(msg.get_labels(), ['resent', 'foobar']) msg.remove_label('unseen') self.assertEqual(msg.get_labels(), ['resent', 'foobar']) msg.set_labels(['foobar', 'answered']) self.assertEqual(msg.get_labels(), ['foobar', 'answered']) def test_visible(self): # Get, set, and update visible headers msg = mailbox.BabylMessage(_sample_message) visible = msg.get_visible() self.assertEqual(visible.keys(), []) self.assertIsNone(visible.get_payload()) visible['User-Agent'] = 'FooBar 1.0' visible['X-Whatever'] = 'Blah' self.assertEqual(msg.get_visible().keys(), []) msg.set_visible(visible) visible = msg.get_visible() self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever']) self.assertEqual(visible['User-Agent'], 'FooBar 1.0') self.assertEqual(visible['X-Whatever'], 'Blah') self.assertIsNone(visible.get_payload()) msg.update_visible() self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever']) self.assertIsNone(visible.get_payload()) visible = msg.get_visible() self.assertEqual(visible.keys(), ['User-Agent', 'Date', 'From', 'To', 'Subject']) for header in ('User-Agent', 'Date', 'From', 'To', 'Subject'): self.assertEqual(visible[header], msg[header]) class TestMMDFMessage(_TestMboxMMDFMessage, TestMessage): _factory = mailbox.MMDFMessage class TestMessageConversion(TestBase, unittest.TestCase): def test_plain_to_x(self): # Convert Message to all formats for class_ in self.all_mailbox_types: msg_plain = mailbox.Message(_sample_message) msg = class_(msg_plain) self._check_sample(msg) def test_x_to_plain(self): # Convert all formats to Message for class_ in self.all_mailbox_types: msg = class_(_sample_message) msg_plain = mailbox.Message(msg) self._check_sample(msg_plain) def test_x_from_bytes(self): # Convert all formats to Message for class_ in self.all_mailbox_types: msg = class_(_bytes_sample_message) self._check_sample(msg) def test_x_to_invalid(self): # Convert all formats to an invalid format for class_ in self.all_mailbox_types: self.assertRaises(TypeError, lambda: class_(False)) def test_type_specific_attributes_removed_on_conversion(self): reference = {class_: class_(_sample_message).__dict__ for class_ in self.all_mailbox_types} for class1 in self.all_mailbox_types: for class2 in self.all_mailbox_types: if class1 is class2: continue source = class1(_sample_message) target = class2(source) type_specific = [a for a in reference[class1] if a not in reference[class2]] for attr in type_specific: self.assertNotIn(attr, target.__dict__, "while converting {} to {}".format(class1, class2)) def test_maildir_to_maildir(self): # Convert MaildirMessage to MaildirMessage msg_maildir = mailbox.MaildirMessage(_sample_message) msg_maildir.set_flags('DFPRST') msg_maildir.set_subdir('cur') date = msg_maildir.get_date() msg = mailbox.MaildirMessage(msg_maildir) self._check_sample(msg) self.assertEqual(msg.get_flags(), 'DFPRST') self.assertEqual(msg.get_subdir(), 'cur') self.assertEqual(msg.get_date(), date) def test_maildir_to_mboxmmdf(self): # Convert MaildirMessage to mboxmessage and MMDFMessage pairs = (('D', ''), ('F', 'F'), ('P', ''), ('R', 'A'), ('S', 'R'), ('T', 'D'), ('DFPRST', 'RDFA')) for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg_maildir = mailbox.MaildirMessage(_sample_message) msg_maildir.set_date(0.0) for setting, result in pairs: msg_maildir.set_flags(setting) msg = class_(msg_maildir) self.assertEqual(msg.get_flags(), result) self.assertEqual(msg.get_from(), 'MAILER-DAEMON %s' % time.asctime(time.gmtime(0.0))) msg_maildir.set_subdir('cur') self.assertEqual(class_(msg_maildir).get_flags(), 'RODFA') def test_maildir_to_mh(self): # Convert MaildirMessage to MHMessage msg_maildir = mailbox.MaildirMessage(_sample_message) pairs = (('D', ['unseen']), ('F', ['unseen', 'flagged']), ('P', ['unseen']), ('R', ['unseen', 'replied']), ('S', []), ('T', ['unseen']), ('DFPRST', ['replied', 'flagged'])) for setting, result in pairs: msg_maildir.set_flags(setting) self.assertEqual(mailbox.MHMessage(msg_maildir).get_sequences(), result) def test_maildir_to_babyl(self): # Convert MaildirMessage to Babyl msg_maildir = mailbox.MaildirMessage(_sample_message) pairs = (('D', ['unseen']), ('F', ['unseen']), ('P', ['unseen', 'forwarded']), ('R', ['unseen', 'answered']), ('S', []), ('T', ['unseen', 'deleted']), ('DFPRST', ['deleted', 'answered', 'forwarded'])) for setting, result in pairs: msg_maildir.set_flags(setting) self.assertEqual(mailbox.BabylMessage(msg_maildir).get_labels(), result) def test_mboxmmdf_to_maildir(self): # Convert mboxMessage and MMDFMessage to MaildirMessage for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg_mboxMMDF = class_(_sample_message) msg_mboxMMDF.set_from('foo@bar', time.gmtime(0.0)) pairs = (('R', 'S'), ('O', ''), ('D', 'T'), ('F', 'F'), ('A', 'R'), ('RODFA', 'FRST')) for setting, result in pairs: msg_mboxMMDF.set_flags(setting) msg = mailbox.MaildirMessage(msg_mboxMMDF) self.assertEqual(msg.get_flags(), result) self.assertEqual(msg.get_date(), 0.0) msg_mboxMMDF.set_flags('O') self.assertEqual(mailbox.MaildirMessage(msg_mboxMMDF).get_subdir(), 'cur') def test_mboxmmdf_to_mboxmmdf(self): # Convert mboxMessage and MMDFMessage to mboxMessage and MMDFMessage for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg_mboxMMDF = class_(_sample_message) msg_mboxMMDF.set_flags('RODFA') msg_mboxMMDF.set_from('foo@bar') for class2_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg2 = class2_(msg_mboxMMDF) self.assertEqual(msg2.get_flags(), 'RODFA') self.assertEqual(msg2.get_from(), 'foo@bar') def test_mboxmmdf_to_mh(self): # Convert mboxMessage and MMDFMessage to MHMessage for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg_mboxMMDF = class_(_sample_message) pairs = (('R', []), ('O', ['unseen']), ('D', ['unseen']), ('F', ['unseen', 'flagged']), ('A', ['unseen', 'replied']), ('RODFA', ['replied', 'flagged'])) for setting, result in pairs: msg_mboxMMDF.set_flags(setting) self.assertEqual(mailbox.MHMessage(msg_mboxMMDF).get_sequences(), result) def test_mboxmmdf_to_babyl(self): # Convert mboxMessage and MMDFMessage to BabylMessage for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg = class_(_sample_message) pairs = (('R', []), ('O', ['unseen']), ('D', ['unseen', 'deleted']), ('F', ['unseen']), ('A', ['unseen', 'answered']), ('RODFA', ['deleted', 'answered'])) for setting, result in pairs: msg.set_flags(setting) self.assertEqual(mailbox.BabylMessage(msg).get_labels(), result) def test_mh_to_maildir(self): # Convert MHMessage to MaildirMessage pairs = (('unseen', ''), ('replied', 'RS'), ('flagged', 'FS')) for setting, result in pairs: msg = mailbox.MHMessage(_sample_message) msg.add_sequence(setting) self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), result) self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur') msg = mailbox.MHMessage(_sample_message) msg.add_sequence('unseen') msg.add_sequence('replied') msg.add_sequence('flagged') self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), 'FR') self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur') def test_mh_to_mboxmmdf(self): # Convert MHMessage to mboxMessage and MMDFMessage pairs = (('unseen', 'O'), ('replied', 'ROA'), ('flagged', 'ROF')) for setting, result in pairs: msg = mailbox.MHMessage(_sample_message) msg.add_sequence(setting) for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): self.assertEqual(class_(msg).get_flags(), result) msg = mailbox.MHMessage(_sample_message) msg.add_sequence('unseen') msg.add_sequence('replied') msg.add_sequence('flagged') for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): self.assertEqual(class_(msg).get_flags(), 'OFA') def test_mh_to_mh(self): # Convert MHMessage to MHMessage msg = mailbox.MHMessage(_sample_message) msg.add_sequence('unseen') msg.add_sequence('replied') msg.add_sequence('flagged') self.assertEqual(mailbox.MHMessage(msg).get_sequences(), ['unseen', 'replied', 'flagged']) def test_mh_to_babyl(self): # Convert MHMessage to BabylMessage pairs = (('unseen', ['unseen']), ('replied', ['answered']), ('flagged', [])) for setting, result in pairs: msg = mailbox.MHMessage(_sample_message) msg.add_sequence(setting) self.assertEqual(mailbox.BabylMessage(msg).get_labels(), result) msg = mailbox.MHMessage(_sample_message) msg.add_sequence('unseen') msg.add_sequence('replied') msg.add_sequence('flagged') self.assertEqual(mailbox.BabylMessage(msg).get_labels(), ['unseen', 'answered']) def test_babyl_to_maildir(self): # Convert BabylMessage to MaildirMessage pairs = (('unseen', ''), ('deleted', 'ST'), ('filed', 'S'), ('answered', 'RS'), ('forwarded', 'PS'), ('edited', 'S'), ('resent', 'PS')) for setting, result in pairs: msg = mailbox.BabylMessage(_sample_message) msg.add_label(setting) self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), result) self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur') msg = mailbox.BabylMessage(_sample_message) for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded', 'edited', 'resent'): msg.add_label(label) self.assertEqual(mailbox.MaildirMessage(msg).get_flags(), 'PRT') self.assertEqual(mailbox.MaildirMessage(msg).get_subdir(), 'cur') def test_babyl_to_mboxmmdf(self): # Convert BabylMessage to mboxMessage and MMDFMessage pairs = (('unseen', 'O'), ('deleted', 'ROD'), ('filed', 'RO'), ('answered', 'ROA'), ('forwarded', 'RO'), ('edited', 'RO'), ('resent', 'RO')) for setting, result in pairs: for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg = mailbox.BabylMessage(_sample_message) msg.add_label(setting) self.assertEqual(class_(msg).get_flags(), result) msg = mailbox.BabylMessage(_sample_message) for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded', 'edited', 'resent'): msg.add_label(label) for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage): self.assertEqual(class_(msg).get_flags(), 'ODA') def test_babyl_to_mh(self): # Convert BabylMessage to MHMessage pairs = (('unseen', ['unseen']), ('deleted', []), ('filed', []), ('answered', ['replied']), ('forwarded', []), ('edited', []), ('resent', [])) for setting, result in pairs: msg = mailbox.BabylMessage(_sample_message) msg.add_label(setting) self.assertEqual(mailbox.MHMessage(msg).get_sequences(), result) msg = mailbox.BabylMessage(_sample_message) for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded', 'edited', 'resent'): msg.add_label(label) self.assertEqual(mailbox.MHMessage(msg).get_sequences(), ['unseen', 'replied']) def test_babyl_to_babyl(self): # Convert BabylMessage to BabylMessage msg = mailbox.BabylMessage(_sample_message) msg.update_visible() for label in ('unseen', 'deleted', 'filed', 'answered', 'forwarded', 'edited', 'resent'): msg.add_label(label) msg2 = mailbox.BabylMessage(msg) self.assertEqual(msg2.get_labels(), ['unseen', 'deleted', 'filed', 'answered', 'forwarded', 'edited', 'resent']) self.assertEqual(msg.get_visible().keys(), msg2.get_visible().keys()) for key in msg.get_visible().keys(): self.assertEqual(msg.get_visible()[key], msg2.get_visible()[key]) class TestProxyFileBase(TestBase): def _test_read(self, proxy): # Read by byte proxy.seek(0) self.assertEqual(proxy.read(), b'bar') proxy.seek(1) self.assertEqual(proxy.read(), b'ar') proxy.seek(0) self.assertEqual(proxy.read(2), b'ba') proxy.seek(1) self.assertEqual(proxy.read(-1), b'ar') proxy.seek(2) self.assertEqual(proxy.read(1000), b'r') def _test_readline(self, proxy): # Read by line linesep = os.linesep.encode() proxy.seek(0) self.assertEqual(proxy.readline(), b'foo' + linesep) self.assertEqual(proxy.readline(), b'bar' + linesep) self.assertEqual(proxy.readline(), b'fred' + linesep) self.assertEqual(proxy.readline(), b'bob') proxy.seek(2) self.assertEqual(proxy.readline(), b'o' + linesep) proxy.seek(6 + 2 * len(os.linesep)) self.assertEqual(proxy.readline(), b'fred' + linesep) proxy.seek(6 + 2 * len(os.linesep)) self.assertEqual(proxy.readline(2), b'fr') self.assertEqual(proxy.readline(-10), b'ed' + linesep) def _test_readlines(self, proxy): # Read multiple lines linesep = os.linesep.encode() proxy.seek(0) self.assertEqual(proxy.readlines(), [b'foo' + linesep, b'bar' + linesep, b'fred' + linesep, b'bob']) proxy.seek(0) self.assertEqual(proxy.readlines(2), [b'foo' + linesep]) proxy.seek(3 + len(linesep)) self.assertEqual(proxy.readlines(4 + len(linesep)), [b'bar' + linesep, b'fred' + linesep]) proxy.seek(3) self.assertEqual(proxy.readlines(1000), [linesep, b'bar' + linesep, b'fred' + linesep, b'bob']) def _test_iteration(self, proxy): # Iterate by line linesep = os.linesep.encode() proxy.seek(0) iterator = iter(proxy) self.assertEqual(next(iterator), b'foo' + linesep) self.assertEqual(next(iterator), b'bar' + linesep) self.assertEqual(next(iterator), b'fred' + linesep) self.assertEqual(next(iterator), b'bob') self.assertRaises(StopIteration, next, iterator) def _test_seek_and_tell(self, proxy): # Seek and use tell to check position linesep = os.linesep.encode() proxy.seek(3) self.assertEqual(proxy.tell(), 3) self.assertEqual(proxy.read(len(linesep)), linesep) proxy.seek(2, 1) self.assertEqual(proxy.read(1 + len(linesep)), b'r' + linesep) proxy.seek(-3 - len(linesep), 2) self.assertEqual(proxy.read(3), b'bar') proxy.seek(2, 0) self.assertEqual(proxy.read(), b'o' + linesep + b'bar' + linesep) proxy.seek(100) self.assertFalse(proxy.read()) def _test_close(self, proxy): # Close a file self.assertFalse(proxy.closed) proxy.close() self.assertTrue(proxy.closed) # Issue 11700 subsequent closes should be a no-op. proxy.close() self.assertTrue(proxy.closed) class TestProxyFile(TestProxyFileBase, unittest.TestCase): def setUp(self): self._path = support.TESTFN self._file = open(self._path, 'wb+') def tearDown(self): self._file.close() self._delete_recursively(self._path) def test_initialize(self): # Initialize and check position self._file.write(b'foo') pos = self._file.tell() proxy0 = mailbox._ProxyFile(self._file) self.assertEqual(proxy0.tell(), pos) self.assertEqual(self._file.tell(), pos) proxy1 = mailbox._ProxyFile(self._file, 0) self.assertEqual(proxy1.tell(), 0) self.assertEqual(self._file.tell(), pos) def test_read(self): self._file.write(b'bar') self._test_read(mailbox._ProxyFile(self._file)) def test_readline(self): self._file.write(bytes('foo%sbar%sfred%sbob' % (os.linesep, os.linesep, os.linesep), 'ascii')) self._test_readline(mailbox._ProxyFile(self._file)) def test_readlines(self): self._file.write(bytes('foo%sbar%sfred%sbob' % (os.linesep, os.linesep, os.linesep), 'ascii')) self._test_readlines(mailbox._ProxyFile(self._file)) def test_iteration(self): self._file.write(bytes('foo%sbar%sfred%sbob' % (os.linesep, os.linesep, os.linesep), 'ascii')) self._test_iteration(mailbox._ProxyFile(self._file)) def test_seek_and_tell(self): self._file.write(bytes('foo%sbar%s' % (os.linesep, os.linesep), 'ascii')) self._test_seek_and_tell(mailbox._ProxyFile(self._file)) def test_close(self): self._file.write(bytes('foo%sbar%s' % (os.linesep, os.linesep), 'ascii')) self._test_close(mailbox._ProxyFile(self._file)) class TestPartialFile(TestProxyFileBase, unittest.TestCase): def setUp(self): self._path = support.TESTFN self._file = open(self._path, 'wb+') def tearDown(self): self._file.close() self._delete_recursively(self._path) def test_initialize(self): # Initialize and check position self._file.write(bytes('foo' + os.linesep + 'bar', 'ascii')) pos = self._file.tell() proxy = mailbox._PartialFile(self._file, 2, 5) self.assertEqual(proxy.tell(), 0) self.assertEqual(self._file.tell(), pos) def test_read(self): self._file.write(bytes('***bar***', 'ascii')) self._test_read(mailbox._PartialFile(self._file, 3, 6)) def test_readline(self): self._file.write(bytes('!!!!!foo%sbar%sfred%sbob!!!!!' % (os.linesep, os.linesep, os.linesep), 'ascii')) self._test_readline(mailbox._PartialFile(self._file, 5, 18 + 3 * len(os.linesep))) def test_readlines(self): self._file.write(bytes('foo%sbar%sfred%sbob?????' % (os.linesep, os.linesep, os.linesep), 'ascii')) self._test_readlines(mailbox._PartialFile(self._file, 0, 13 + 3 * len(os.linesep))) def test_iteration(self): self._file.write(bytes('____foo%sbar%sfred%sbob####' % (os.linesep, os.linesep, os.linesep), 'ascii')) self._test_iteration(mailbox._PartialFile(self._file, 4, 17 + 3 * len(os.linesep))) def test_seek_and_tell(self): self._file.write(bytes('(((foo%sbar%s$$$' % (os.linesep, os.linesep), 'ascii')) self._test_seek_and_tell(mailbox._PartialFile(self._file, 3, 9 + 2 * len(os.linesep))) def test_close(self): self._file.write(bytes('&foo%sbar%s^' % (os.linesep, os.linesep), 'ascii')) self._test_close(mailbox._PartialFile(self._file, 1, 6 + 3 * len(os.linesep))) ## Start: tests from the original module (for backward compatibility). FROM_ = "From [email protected] Sat Jul 24 13:43:35 2004\n" DUMMY_MESSAGE = """\ From: [email protected] To: [email protected] Subject: Simple Test This is a dummy message. """ class MaildirTestCase(unittest.TestCase): def setUp(self): # create a new maildir mailbox to work with: self._dir = support.TESTFN if os.path.isdir(self._dir): support.rmtree(self._dir) elif os.path.isfile(self._dir): support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) os.mkdir(os.path.join(self._dir, "new")) self._counter = 1 self._msgfiles = [] def tearDown(self): list(map(os.unlink, self._msgfiles)) support.rmdir(os.path.join(self._dir, "cur")) support.rmdir(os.path.join(self._dir, "tmp")) support.rmdir(os.path.join(self._dir, "new")) support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) pid = self._counter self._counter += 1 filename = ".".join((str(t), str(pid), "myhostname", "mydomain")) tmpname = os.path.join(self._dir, "tmp", filename) newname = os.path.join(self._dir, dir, filename) with open(tmpname, "w") as fp: self._msgfiles.append(tmpname) if mbox: fp.write(FROM_) fp.write(DUMMY_MESSAGE) if hasattr(os, "link"): os.link(tmpname, newname) else: with open(newname, "w") as fp: fp.write(DUMMY_MESSAGE) self._msgfiles.append(newname) return tmpname def test_empty_maildir(self): """Test an empty maildir mailbox""" # Test for regression on bug #117490: # Make sure the boxes attribute actually gets set. self.mbox = mailbox.Maildir(support.TESTFN) #self.assertTrue(hasattr(self.mbox, "boxes")) #self.assertEqual(len(self.mbox.boxes), 0) self.assertIsNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) def test_nonempty_maildir_cur(self): self.createMessage("cur") self.mbox = mailbox.Maildir(support.TESTFN) #self.assertEqual(len(self.mbox.boxes), 1) self.assertIsNotNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) def test_nonempty_maildir_new(self): self.createMessage("new") self.mbox = mailbox.Maildir(support.TESTFN) #self.assertEqual(len(self.mbox.boxes), 1) self.assertIsNotNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) def test_nonempty_maildir_both(self): self.createMessage("cur") self.createMessage("new") self.mbox = mailbox.Maildir(support.TESTFN) #self.assertEqual(len(self.mbox.boxes), 2) self.assertIsNotNone(self.mbox.next()) self.assertIsNotNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) self.assertIsNone(self.mbox.next()) ## End: tests from the original module (for backward compatibility). _sample_message = """\ Return-Path: <[email protected]> X-Original-To: gkj+person@localhost Delivered-To: gkj+person@localhost Received: from localhost (localhost [127.0.0.1]) by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17 for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT) Delivered-To: [email protected] Received: from localhost [127.0.0.1] by localhost with POP3 (fetchmail-6.2.5) for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT) Received: from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228]) by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746 for <[email protected]>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT) Received: by andy.gregorykjohnson.com (Postfix, from userid 1000) id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT) Date: Wed, 13 Jul 2005 17:23:11 -0400 From: "Gregory K. Johnson" <[email protected]> To: [email protected] Subject: Sample message Message-ID: <[email protected]> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="NMuMz9nt05w80d4+" Content-Disposition: inline User-Agent: Mutt/1.5.9i --NMuMz9nt05w80d4+ Content-Type: text/plain; charset=us-ascii Content-Disposition: inline This is a sample message. -- Gregory K. Johnson --NMuMz9nt05w80d4+ Content-Type: application/octet-stream Content-Disposition: attachment; filename="text.gz" Content-Transfer-Encoding: base64 H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs 3FYlAAAA --NMuMz9nt05w80d4+-- """ _bytes_sample_message = _sample_message.encode('ascii') _sample_headers = { "Return-Path":"<[email protected]>", "X-Original-To":"gkj+person@localhost", "Delivered-To":"gkj+person@localhost", "Received":"""from localhost (localhost [127.0.0.1]) by andy.gregorykjohnson.com (Postfix) with ESMTP id 356ED9DD17 for <gkj+person@localhost>; Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""", "Delivered-To":"[email protected]", "Received":"""from localhost [127.0.0.1] by localhost with POP3 (fetchmail-6.2.5) for gkj+person@localhost (single-drop); Wed, 13 Jul 2005 17:23:16 -0400 (EDT)""", "Received":"""from andy.gregorykjohnson.com (andy.gregorykjohnson.com [64.32.235.228]) by sundance.gregorykjohnson.com (Postfix) with ESMTP id 5B056316746 for <[email protected]>; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""", "Received":"""by andy.gregorykjohnson.com (Postfix, from userid 1000) id 490CD9DD17; Wed, 13 Jul 2005 17:23:11 -0400 (EDT)""", "Date":"Wed, 13 Jul 2005 17:23:11 -0400", "From":""""Gregory K. Johnson" <[email protected]>""", "To":"[email protected]", "Subject":"Sample message", "Mime-Version":"1.0", "Content-Type":"""multipart/mixed; boundary="NMuMz9nt05w80d4+\"""", "Content-Disposition":"inline", "User-Agent": "Mutt/1.5.9i" } _sample_payloads = ("""This is a sample message. -- Gregory K. Johnson """, """H4sICM2D1UIAA3RleHQAC8nILFYAokSFktSKEoW0zJxUPa7wzJIMhZLyfIWczLzUYj0uAHTs 3FYlAAAA """) class MiscTestCase(unittest.TestCase): def test__all__(self): blacklist = {"linesep", "fcntl"} support.check__all__(self, mailbox, blacklist=blacklist) def test_main(): tests = (TestMailboxSuperclass, TestMaildir, TestMbox, TestMMDF, TestMH, TestBabyl, TestMessage, TestMaildirMessage, TestMboxMessage, TestMHMessage, TestBabylMessage, TestMMDFMessage, TestMessageConversion, TestProxyFile, TestPartialFile, MaildirTestCase, TestFakeMailBox, MiscTestCase) support.run_unittest(*tests) support.reap_children() if __name__ == '__main__': test_main()
mit
4,246,049,689,262,483,500
39.546329
90
0.582829
false
3.672169
true
false
false
AmatanHead/collective-blog
s_markdown/widgets.py
1
3381
"""Markdown widgets""" from django import forms from django.utils.safestring import mark_safe from django.utils.deconstruct import deconstructible from json import dumps @deconstructible class MarkdownTextarea(forms.Textarea): """Basic textarea widget for rendering Markdown objects""" pass @deconstructible class CodeMirror(MarkdownTextarea): def __init__(self, *args, **kwargs): """Widget that uses the `CodeMirror` editor :param mode: Syntax mode name. :param addons: List of addons (each element is a relative path to the addon, without `.js` extension. Example: `mode/overlay`) :param theme: Theme name. :param theme_path: Path to the theme file. Default is `s_markdown/codemirror/theme/<theme>.css` :param keymap: A keymap name. :param options: A dict of options that will be passed to the codemirror editor. :param additional_modes: Load additional modes for `overlay` extension. :param js_var_format: A name of the js variable in which the codemirror instance is saved. """ self.mode = kwargs.pop('mode', 'markdown') self.addons = kwargs.pop('addons', []) self.theme = kwargs.pop('theme', 'default') self.theme_path = kwargs.pop('theme_path', 's_markdown/codemirror/theme/%s.css' % self.theme) self.keymap = kwargs.pop('keymap', None) self.options = kwargs.pop('options', {}) self.additional_modes = kwargs.pop('additional_modes', []) self.js_var_format = kwargs.pop('js_var_format', None) self.options.update(dict(mode=self.mode, theme=self.theme)) self.option_json = dumps(self.options) super(CodeMirror, self).__init__(*args, **kwargs) @property def media(self): """Construct a list of mediafiles required for this widget :return: `forms.Media` instance. """ css = ['s_markdown/codemirror/lib/codemirror.css'] if self.theme: css.append(self.theme_path) js = ['s_markdown/codemirror/lib/codemirror.js'] js.extend('s_markdown/codemirror/addon/%s.js' % a for a in self.addons) if self.keymap: js.append('s_markdown/codemirror/keymap/%s.js' % self.keymap) if self.mode: js.append('s_markdown/codemirror/mode/%s/%s.js' % (self.mode, self.mode)) for mode in self.additional_modes: js.append('s_markdown/codemirror/mode/%s/%s.js' % (mode, mode)) return forms.Media( css=dict(all=css), js=js, ) def render(self, name, value, attrs=None): """Render this widget :param value: Current field vlue. :param attrs: Attributes of the widget. :param name: Name of the widget. :return: Rendered html. """ if self.js_var_format is not None: js_var_bit = 'var %s = ' % (self.js_var_format % name) else: js_var_bit = '' output = [super(CodeMirror, self).render(name, value, attrs), '<script type="text/javascript">' '%sCodeMirror.fromTextArea(' 'document.getElementById(%s), %s);' '</script>' % (js_var_bit, '"id_%s"' % name, self.option_json)] return mark_safe('\n'.join(output))
mit
1,420,852,129,473,943,600
36.153846
101
0.599823
false
3.828992
false
false
false
tmfoltz/worldengine
worldengine/generation.py
1
7471
from noise import snoise2 from worldengine.world import Step from worldengine.simulations.basic import find_threshold_f from worldengine.simulations.hydrology import WatermapSimulation from worldengine.simulations.irrigation import IrrigationSimulation from worldengine.simulations.humidity import HumiditySimulation from worldengine.simulations.temperature import TemperatureSimulation from worldengine.simulations.permeability import PermeabilitySimulation from worldengine.simulations.erosion import ErosionSimulation from worldengine.simulations.precipitation import PrecipitationSimulation from worldengine.simulations.biome import BiomeSimulation from worldengine.common import anti_alias, get_verbose, matrix_min_and_max, rescale_value # ------------------ # Initial generation # ------------------ def center_land(world): """Translate the map horizontally and vertically to put as much ocean as possible at the borders. It operates on elevation and plates map""" min_sum_on_y = None y_with_min_sum = None latshift = 0 for y in range(world.height): sum_on_y = 0 for x in range(world.width): sum_on_y += world.elevation['data'][y][x] if min_sum_on_y is None or sum_on_y < min_sum_on_y: min_sum_on_y = sum_on_y y_with_min_sum = y if get_verbose(): print("geo.center_land: height complete") min_sum_on_x = None x_with_min_sum = None for x in range(world.width): sum_on_x = 0 for y in range(world.height): sum_on_x += world.elevation['data'][y][x] if min_sum_on_x is None or sum_on_x < min_sum_on_x: min_sum_on_x = sum_on_x x_with_min_sum = x if get_verbose(): print("geo.center_land: width complete") new_elevation_data = [] new_plates = [] for y in range(world.height): new_elevation_data.append([]) new_plates.append([]) src_y = (y_with_min_sum + y - latshift) % world.height for x in range(world.width): src_x = (x_with_min_sum + x) % world.width new_elevation_data[y].append(world.elevation['data'][src_y][src_x]) new_plates[y].append(world.plates[src_y][src_x]) world.elevation['data'] = new_elevation_data world.plates = new_plates if get_verbose(): print("geo.center_land: width complete") def place_oceans_at_map_borders(world): """ Lower the elevation near the border of the map """ ocean_border = int(min(30, max(world.width / 5, world.height / 5))) def place_ocean(x, y, i): world.elevation['data'][y][x] = \ (world.elevation['data'][y][x] * i) / ocean_border for x in range(world.width): for i in range(ocean_border): place_ocean(x, i, i) place_ocean(x, world.height - i - 1, i) for y in range(world.height): for i in range(ocean_border): place_ocean(i, y, i) place_ocean(world.width - i - 1, y, i) def add_noise_to_elevation(world, seed): octaves = 8 freq = 16.0 * octaves for y in range(world.height): for x in range(world.width): n = snoise2(x / freq * 2, y / freq * 2, octaves, base=seed) world.elevation['data'][y][x] += n def fill_ocean(elevation, sea_level): width = len(elevation[0]) height = len(elevation) ocean = [[False for x in range(width)] for y in range(height)] # TODO: use numpy to_expand = [] for x in range(width): if elevation[0][x] <= sea_level: to_expand.append((x, 0)) if elevation[height - 1][x] <= sea_level: to_expand.append((x, height - 1)) for y in range(height): if elevation[y][0] <= sea_level: to_expand.append((0, y)) if elevation[y][width - 1] <= sea_level: to_expand.append((width - 1, y)) for t in to_expand: tx, ty = t if not ocean[ty][tx]: ocean[ty][tx] = True for px, py in _around(tx, ty, width, height): if not ocean[py][px] and elevation[py][px] <= sea_level: to_expand.append((px, py)) return ocean def initialize_ocean_and_thresholds(world, ocean_level=1.0): """ Calculate the ocean, the sea depth and the elevation thresholds :param world: a world having elevation but not thresholds :param ocean_level: the elevation representing the ocean level :return: nothing, the world will be changed """ e = world.elevation['data'] ocean = fill_ocean(e, ocean_level) hl = find_threshold_f(e, 0.10) ml = find_threshold_f(e, 0.03) e_th = [('sea', ocean_level), ('plain', hl), ('hill', ml), ('mountain', None)] world.set_ocean(ocean) world.set_elevation(e, e_th) world.sea_depth = sea_depth(world, ocean_level) # ---- # Misc # ---- def sea_depth(world, sea_level): sea_depth = [[sea_level - world.elevation['data'][y][x] for x in range(world.width)] for y in range(world.height)] for y in range(world.height): for x in range(world.width): if world.tiles_around((x, y), radius=1, predicate=world.is_land): sea_depth[y][x] = 0 elif world.tiles_around((x, y), radius=2, predicate=world.is_land): sea_depth[y][x] *= 0.3 elif world.tiles_around((x, y), radius=3, predicate=world.is_land): sea_depth[y][x] *= 0.5 elif world.tiles_around((x, y), radius=4, predicate=world.is_land): sea_depth[y][x] *= 0.7 elif world.tiles_around((x, y), radius=5, predicate=world.is_land): sea_depth[y][x] *= 0.9 sea_depth = anti_alias(sea_depth, 10) min_depth, max_depth = matrix_min_and_max(sea_depth) sea_depth = [[rescale_value(sea_depth[y][x], min_depth, max_depth, 0.0, 1.0) for x in range(world.width)] for y in range(world.height)] return sea_depth def _around(x, y, width, height): ps = [] for dx in range(-1, 2): nx = x + dx if 0 <= nx < width: for dy in range(-1, 2): ny = y + dy if 0 <= ny < height and (dx != 0 or dy != 0): ps.append((nx, ny)) return ps def generate_world(w, step): if isinstance(step, str): step = Step.get_by_name(step) seed = w.seed if not step.include_precipitations: return w # Precipitation with thresholds PrecipitationSimulation().execute(w, seed) if not step.include_erosion: return w ErosionSimulation().execute(w, seed) if get_verbose(): print("...erosion calculated") WatermapSimulation().execute(w, seed) # FIXME: create setters IrrigationSimulation().execute(w, seed) TemperatureSimulation().execute(w, seed) HumiditySimulation().execute(w, seed) PermeabilitySimulation().execute(w, seed) cm, biome_cm = BiomeSimulation().execute(w, seed) for cl in cm.keys(): count = cm[cl] if get_verbose(): print("%s = %i" % (str(cl), count)) if get_verbose(): print('') # empty line print('Biome obtained:') for cl in biome_cm.keys(): count = biome_cm[cl] if get_verbose(): print(" %30s = %7i" % (str(cl), count)) return w
mit
62,308,796,191,271,064
32.352679
89
0.584125
false
3.344226
false
false
false
jfterpstra/bluebottle
bluebottle/wallposts/models.py
1
8042
from django.db import models from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import fields from django_extensions.db.fields import (ModificationDateTimeField, CreationDateTimeField) from django.utils.text import Truncator from django.utils.translation import ugettext_lazy as _ from polymorphic.models import PolymorphicModel from .managers import ReactionManager, WallpostManager WALLPOST_TEXT_MAX_LENGTH = getattr(settings, 'WALLPOST_TEXT_MAX_LENGTH', 300) WALLPOST_REACTION_MAX_LENGTH = getattr(settings, 'WALLPOST_REACTION_MAX_LENGTH', 300) GROUP_PERMS = { 'Staff': { 'perms': ( 'add_reaction', 'change_reaction', 'delete_reaction', 'add_wallpost', 'change_wallpost', 'delete_wallpost', 'add_mediawallpost', 'change_mediawallpost', 'delete_mediawallpost', 'add_textwallpost', 'change_textwallpost', 'delete_textwallpost', 'add_systemwallpost', 'change_systemwallpost', 'delete_systemwallpost', 'add_mediawallpostphoto', 'change_mediawallpostphoto', 'delete_mediawallpostphoto', ) } } class Wallpost(PolymorphicModel): """ The Wallpost base class. This class will never be used directly because the content of a Wallpost is always defined in the child classes. Implementation Note: Normally this would be an abstract class but it's not possible to make this an abstract class and have the polymorphic behaviour of sorting on the common fields. """ @property def wallpost_type(self): return 'unknown' # The user who wrote the wall post. This can be empty to support wallposts # without users (e.g. anonymous # TextWallposts, system Wallposts for donations etc.) author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name="%(class)s_wallpost", blank=True, null=True) editor = models.ForeignKey( settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, help_text=_("The last user to edit this wallpost.")) # The metadata for the wall post. created = CreationDateTimeField(_('created')) updated = ModificationDateTimeField(_('updated')) deleted = models.DateTimeField(_('deleted'), blank=True, null=True) ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True, default=None) # Generic foreign key so we can connect it to any object. content_type = models.ForeignKey( ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s") object_id = models.PositiveIntegerField(_('object ID')) content_object = fields.GenericForeignKey('content_type', 'object_id') share_with_facebook = models.BooleanField(default=False) share_with_twitter = models.BooleanField(default=False) share_with_linkedin = models.BooleanField(default=False) email_followers = models.BooleanField(default=True) donation = models.ForeignKey('donations.Donation', verbose_name=_("Donation"), related_name='donation', null=True, blank=True) # Manager objects = WallpostManager() class Meta: ordering = ('created',) def __unicode__(self): return str(self.id) class MediaWallpost(Wallpost): # The content of the wall post. @property def wallpost_type(self): return 'media' title = models.CharField(max_length=60) text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True, default='') video_url = models.URLField(max_length=100, blank=True, default='') def __unicode__(self): return Truncator(self.text).words(10) # FIXME: See how we can re-enable this # def save(self, *args, **kwargs): # super(MediaWallpost, self).save(*args, **kwargs) # # # Mark the photos as deleted when the MediaWallpost is deleted. # if self.deleted: # for photo in self.photos.all(): # if not photo.deleted: # photo.deleted = self.deleted # photo.save() class MediaWallpostPhoto(models.Model): mediawallpost = models.ForeignKey(MediaWallpost, related_name='photos', null=True, blank=True) photo = models.ImageField(upload_to='mediawallpostphotos') deleted = models.DateTimeField(_('deleted'), blank=True, null=True) ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True, default=None) author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name="%(class)s_wallpost_photo", blank=True, null=True) editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, help_text=_( "The last user to edit this wallpost photo.")) class TextWallpost(Wallpost): # The content of the wall post. @property def wallpost_type(self): return 'text' text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH) def __unicode__(self): return Truncator(self.text).words(10) class SystemWallpost(Wallpost): # The content of the wall post. @property def wallpost_type(self): return 'system' text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True) # Generic foreign key so we can connect any object to it. related_type = models.ForeignKey(ContentType, verbose_name=_('related type')) related_id = models.PositiveIntegerField(_('related ID')) related_object = fields.GenericForeignKey('related_type', 'related_id') def __unicode__(self): return Truncator(self.text).words(10) class Reaction(models.Model): """ A user reaction or comment to a Wallpost. This model is based on the Comments model from django.contrib.comments. """ # Who posted this reaction. User will need to be logged in to # make a reaction. author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name='wallpost_reactions') editor = models.ForeignKey( settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, related_name='+', help_text=_("The last user to edit this reaction.")) # The reaction text and the wallpost it's a reaction to. text = models.TextField(_('reaction text'), max_length=WALLPOST_REACTION_MAX_LENGTH) wallpost = models.ForeignKey(Wallpost, related_name='reactions') # Metadata for the reaction. created = CreationDateTimeField(_('created')) updated = ModificationDateTimeField(_('updated')) deleted = models.DateTimeField(_('deleted'), blank=True, null=True) ip_address = models.GenericIPAddressField(_('IP address'), blank=True, null=True, default=None) # Manager objects = ReactionManager() objects_with_deleted = models.Manager() class Meta: ordering = ('created',) verbose_name = _('Reaction') verbose_name_plural = _('Reactions') def __unicode__(self): s = "{0}: {1}".format(self.author.get_full_name(), self.text) return Truncator(s).words(10) import mails import bluebottle.wallposts.signals
bsd-3-clause
-1,037,258,378,431,632,900
37.295238
85
0.615145
false
4.32831
false
false
false
rootofevil/watercounter
watercounter/app/sqlmodel.py
1
4586
# -*- coding: utf-8 -*- from app import db, mail from sqlalchemy.orm import backref, relationship from datetime import datetime from sqlalchemy_utils import PasswordType from sqlalchemy.sql.schema import ForeignKey from flask_mail import Message from config import HOSTNAME class Waterhistory(db.Model): id = db.Column(db.Integer, primary_key = True) hw_counter = db.Column(db.Integer) cw_counter = db.Column(db.Integer) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User', backref=backref('history', lazy='dynamic')) month = db.Column(db.Integer) def __init__(self, hw_counter, cw_counter, user): self.hw_counter = hw_counter self.cw_counter = cw_counter self.user = user self.month = datetime.utcnow().month class User(db.Model): id = db.Column(db.Integer, primary_key = True) name = db.Column(db.String(80)) email = db.Column(db.String(120), unique=True) phone = db.Column(db.String(12), unique=True) is_active = db.Column(db.Boolean, default = True) role = db.Column(db.String(20), default = 'user') flat_number = db.Column(db.Integer) password = db.Column(PasswordType(schemes=['pbkdf2_sha512', 'md5_crypt'], deprecated=['md5_crypt'])) email_verified = db.Column(db.Boolean, default = False) phone_verified = db.Column(db.Boolean, default = False) def __init__(self, name, email, phone, flat_number, password): self.name = name self.email = email self.phone = phone self.flat_number = flat_number self.password = password def is_authenticated(self): return True def is_anonymous(self): return False def get_id(self): return unicode(self.id) def _gen_act_code(self): import binascii, os act_code = binascii.hexlify(os.urandom(48)) raw = Act_code(self, act_code) db.session.add(raw) db.session.commit() return act_code def verify_email(self, act_code = None): if act_code is None: act_code = self._gen_act_code() link = 'http://' + HOSTNAME + '/activate/' + act_code msg = Message('Hello!', recipients=[self.email], sender=('WC Project', '[email protected]')) msg.html = '<a href="' + link + '">Link</a>' mail.send(msg) return True saved_codes = Act_code.query.filter_by(user = self) for saved_code in saved_codes: if saved_code.code == act_code and (datetime.now() - saved_code.created).seconds <= 43200: self.email_verified = True db.session.commit() return True def verify_phone(self, act_code): pass return False def __repr__(self): return 'User %r' % self.name class Application(db.Model): id = db.Column(db.Integer, primary_key = True) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User', backref = backref('apps', lazy = 'dynamic')) link = db.Column(db.String(100)) created = db.Column(db.DateTime) is_active = db.Column(db.Boolean()) def __init__(self, user, link, created = None, is_active = True): self.user = user self.link = link if created is None: created = datetime.utcnow() self.created = created self.is_active = is_active class Act_code(db.Model): id = db.Column(db.Integer, primary_key = True) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User', backref = backref('codes', lazy = 'dynamic')) code = db.Column(db.String(50)) created = db.Column(db.DateTime) def __init__(self, user, code): self.user = user self.code = code self.created = datetime.now() class Providers_oAuth(db.Model): id = db.Column(db.Integer, primary_key = True) name = db.Column(db.String(20)) consumer_key = db.Column(db.String(120)) consumer_secret = db.Column(db.String(120)) request_token_params = db.Column(db.String(120)) base_url = db.Column(db.String(120)) request_token_url = db.Column(db.String(120)) access_token_method = db.Column(db.String(10)) access_token_url = db.Column(db.String(120)) authorize_url = db.Column(db.String(120))
gpl-3.0
-8,005,757,017,711,927,000
34.983871
104
0.589184
false
3.651274
false
false
false
jshaffstall/PyPhysicsSandbox
py2d/Math/Transform.py
1
2380
class Transform(object): """Class for representing affine transformations""" def __init__(self, data): self.data = data @staticmethod def unit(): """Get a new unit tranformation""" return Transform([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) @staticmethod def move(dx, dy): """Get a transformation that moves by dx, dy""" return Transform([[1, 0, dx], [0, 1, dy], [0, 0, 1]]) @staticmethod def rotate(phi): """Get a transformation that rotates by phi""" return Transform([[math.cos(phi), -math.sin(phi), 0], [math.sin(phi), math.cos(phi), 0], [0, 0, 1]]) @staticmethod def rotate_around(cx, cy, phi): """Get a transformation that rotates around (cx, cy) by phi""" return Transform.move(cx, cy) * Transform.rotate(phi) * Transform.move(-cx, -cy) @staticmethod def scale(sx, sy): """Get a transformation that scales by sx, sy""" return Transform([[sx, 0, 0], [0, sy, 0], [0, 0, 1]]) @staticmethod def mirror_x(): """Get a transformation that mirrors along the x axis""" return Transform([[-1, 0, 0], [ 0, 1, 0], [ 0, 0, 1]]) @staticmethod def mirror_y(): """Get a transformation that mirrors along the y axis""" return Transform([[ 1, 0, 0], [ 0,-1, 0], [ 0, 0, 1]]) def __add__(self, b): t = Transform() t.data = [[self.data[x][y] + b.data[x][y] for y in range(3)] for x in range(3)] return t def __sub__(self, b): t = Transform() t.data = [[self.data[x][y] - b.data[x][y] for y in range(3)] for x in range(3)] return t def __mul__(self, val): if isinstance(val, Vector): x = val.x * self.data[0][0] + val.y * self.data[0][1] + self.data[0][2] y = val.x * self.data[1][0] + val.y * self.data[1][1] + self.data[1][2] return Vector(x,y) elif isinstance(val, Transform): data = [[0 for y in range(3)] for x in range(3)] for i in range(3): for j in range(3): for k in range(3): data[i][j] += self.data[i][k] * val.data[k][j] return Transform(data) elif isinstance(val, Polygon): p_transform = [ self * v for v in val.points ] return Polygon.from_pointlist(p_transform) else: raise ValueError("Unknown multiplier: %s" % val)
mit
-787,264,642,545,114,100
26.045455
82
0.542017
false
2.986198
false
false
false
President3D/Quality-SPC
src/Ui/InMainWindow.py
1
108855
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'InMainWindow.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_myMainWindow(object): def setupUi(self, myMainWindow): myMainWindow.setObjectName("myMainWindow") myMainWindow.resize(1024, 768) myMainWindow.setWindowTitle("Qualiy SPC") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/Icons/Images/Micrometer-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) myMainWindow.setWindowIcon(icon) myMainWindow.setAccessibleName("myMainWindow") myMainWindow.setStyleSheet("") self.myCentralwidget = QtWidgets.QWidget(myMainWindow) self.myCentralwidget.setAccessibleName("myCentralwidget") self.myCentralwidget.setStyleSheet("background-color: rgb(240, 240, 240);") self.myCentralwidget.setObjectName("myCentralwidget") self.gridLayoutCentralwidget = QtWidgets.QGridLayout(self.myCentralwidget) self.gridLayoutCentralwidget.setContentsMargins(6, 2, 6, 2) self.gridLayoutCentralwidget.setHorizontalSpacing(6) self.gridLayoutCentralwidget.setVerticalSpacing(0) self.gridLayoutCentralwidget.setObjectName("gridLayoutCentralwidget") self.myStackedWidget = QtWidgets.QStackedWidget(self.myCentralwidget) self.myStackedWidget.setAccessibleName("myStackedWidget") self.myStackedWidget.setObjectName("myStackedWidget") self.myPageTestInstruction = QtWidgets.QWidget() self.myPageTestInstruction.setAccessibleName("myPageTestInstruction") self.myPageTestInstruction.setObjectName("myPageTestInstruction") self.myGridLayoutPageTestInstruction = QtWidgets.QGridLayout(self.myPageTestInstruction) self.myGridLayoutPageTestInstruction.setContentsMargins(0, 0, 0, 0) self.myGridLayoutPageTestInstruction.setSpacing(1) self.myGridLayoutPageTestInstruction.setObjectName("myGridLayoutPageTestInstruction") self.myGroupBoxSpc = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxSpc.setAccessibleName("myGroupBoxSpc") self.myGroupBoxSpc.setStyleSheet("#myGroupBoxSpc {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxSpc::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxSpc.setObjectName("myGroupBoxSpc") self.gridLayoutGroupBoxSpc = QtWidgets.QGridLayout(self.myGroupBoxSpc) self.gridLayoutGroupBoxSpc.setContentsMargins(4, 15, 4, 4) self.gridLayoutGroupBoxSpc.setSpacing(4) self.gridLayoutGroupBoxSpc.setObjectName("gridLayoutGroupBoxSpc") self.myFrameSpc = QtWidgets.QFrame(self.myGroupBoxSpc) self.myFrameSpc.setFrameShape(QtWidgets.QFrame.NoFrame) self.myFrameSpc.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameSpc.setLineWidth(0) self.myFrameSpc.setObjectName("myFrameSpc") self.myGridLayoutSpc = QtWidgets.QGridLayout(self.myFrameSpc) self.myGridLayoutSpc.setContentsMargins(0, 0, 0, 0) self.myGridLayoutSpc.setSpacing(0) self.myGridLayoutSpc.setObjectName("myGridLayoutSpc") self.gridLayoutGroupBoxSpc.addWidget(self.myFrameSpc, 0, 0, 1, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxSpc, 0, 0, 1, 1) self.myGroupBoxSetpoint = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxSetpoint.setAccessibleName("myGroupBoxSetpoint") self.myGroupBoxSetpoint.setStyleSheet("#myGroupBoxSetpoint {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxSetpoint::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxSetpoint.setObjectName("myGroupBoxSetpoint") self.gridLayoutGroupBoxSetpoint = QtWidgets.QGridLayout(self.myGroupBoxSetpoint) self.gridLayoutGroupBoxSetpoint.setContentsMargins(4, 15, 4, 4) self.gridLayoutGroupBoxSetpoint.setSpacing(4) self.gridLayoutGroupBoxSetpoint.setObjectName("gridLayoutGroupBoxSetpoint") self.myScrollAreaSetpoint = QtWidgets.QScrollArea(self.myGroupBoxSetpoint) self.myScrollAreaSetpoint.setAccessibleName("myScrollAreaSetpoint") self.myScrollAreaSetpoint.setStyleSheet("background-color: transparent;") self.myScrollAreaSetpoint.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaSetpoint.setFrameShadow(QtWidgets.QFrame.Plain) self.myScrollAreaSetpoint.setWidgetResizable(True) self.myScrollAreaSetpoint.setObjectName("myScrollAreaSetpoint") self.myScrollAreaSetpointContents = QtWidgets.QWidget() self.myScrollAreaSetpointContents.setGeometry(QtCore.QRect(0, 0, 293, 195)) self.myScrollAreaSetpointContents.setAccessibleName("myScrollAreaSetpointContents") self.myScrollAreaSetpointContents.setStyleSheet("background-color: transparent;") self.myScrollAreaSetpointContents.setObjectName("myScrollAreaSetpointContents") self.gridLayoutScrollAreaSetpoint = QtWidgets.QGridLayout(self.myScrollAreaSetpointContents) self.gridLayoutScrollAreaSetpoint.setContentsMargins(1, 1, 1, 1) self.gridLayoutScrollAreaSetpoint.setSpacing(1) self.gridLayoutScrollAreaSetpoint.setObjectName("gridLayoutScrollAreaSetpoint") self.myFrameSetpointHline = QtWidgets.QFrame(self.myScrollAreaSetpointContents) self.myFrameSetpointHline.setAccessibleName("myFrameSetpointHline") self.myFrameSetpointHline.setStyleSheet("color: rgb(154, 154, 154);") self.myFrameSetpointHline.setFrameShape(QtWidgets.QFrame.HLine) self.myFrameSetpointHline.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameSetpointHline.setObjectName("myFrameSetpointHline") self.gridLayoutScrollAreaSetpoint.addWidget(self.myFrameSetpointHline, 3, 0, 1, 1) self.myLabelType = QtWidgets.QLabel(self.myScrollAreaSetpointContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelType.setFont(font) self.myLabelType.setAccessibleName("myLabelType") self.myLabelType.setStyleSheet("background-color: transparent;") self.myLabelType.setText("") self.myLabelType.setObjectName("myLabelType") self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelType, 0, 0, 1, 1) self.myLabelValue = QtWidgets.QLabel(self.myScrollAreaSetpointContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelValue.setFont(font) self.myLabelValue.setAccessibleName("myLabelValue") self.myLabelValue.setStyleSheet("background-color: transparent;") self.myLabelValue.setObjectName("myLabelValue") self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelValue, 4, 0, 1, 1) self.myLabelReference = QtWidgets.QLabel(self.myScrollAreaSetpointContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelReference.setFont(font) self.myLabelReference.setAccessibleName("myLabelReference") self.myLabelReference.setStyleSheet("background-color: transparent;") self.myLabelReference.setObjectName("myLabelReference") self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelReference, 1, 0, 1, 1) self.myLabelEquipment = QtWidgets.QLabel(self.myScrollAreaSetpointContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelEquipment.setFont(font) self.myLabelEquipment.setAccessibleName("myLabelEquipment") self.myLabelEquipment.setStyleSheet("background-color: transparent;") self.myLabelEquipment.setObjectName("myLabelEquipment") self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelEquipment, 2, 0, 1, 1) self.myLabelTolerance = QtWidgets.QLabel(self.myScrollAreaSetpointContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelTolerance.setFont(font) self.myLabelTolerance.setAccessibleName("myLabelTolerance") self.myLabelTolerance.setStyleSheet("background-color: transparent;") self.myLabelTolerance.setObjectName("myLabelTolerance") self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelTolerance, 5, 0, 1, 1) self.myLabelInterference = QtWidgets.QLabel(self.myScrollAreaSetpointContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelInterference.setFont(font) self.myLabelInterference.setAccessibleName("myLabelInterference") self.myLabelInterference.setStyleSheet("background-color: transparent;") self.myLabelInterference.setObjectName("myLabelInterference") self.gridLayoutScrollAreaSetpoint.addWidget(self.myLabelInterference, 6, 0, 1, 1) self.myScrollAreaSetpoint.setWidget(self.myScrollAreaSetpointContents) self.gridLayoutGroupBoxSetpoint.addWidget(self.myScrollAreaSetpoint, 0, 0, 1, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxSetpoint, 0, 1, 1, 1) self.myGroupBoxCharacteristics = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxCharacteristics.setAccessibleName("myGroupBoxCharacteristics") self.myGroupBoxCharacteristics.setStyleSheet("#myGroupBoxCharacteristics {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristics::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxCharacteristics.setObjectName("myGroupBoxCharacteristics") self.gridLayoutGroupBoxCharacteristics = QtWidgets.QGridLayout(self.myGroupBoxCharacteristics) self.gridLayoutGroupBoxCharacteristics.setContentsMargins(1, 15, 1, 1) self.gridLayoutGroupBoxCharacteristics.setSpacing(4) self.gridLayoutGroupBoxCharacteristics.setObjectName("gridLayoutGroupBoxCharacteristics") self.myScrollAreaCharacteristics = QtWidgets.QScrollArea(self.myGroupBoxCharacteristics) self.myScrollAreaCharacteristics.setAccessibleName("myScrollAreaCharacteristics") self.myScrollAreaCharacteristics.setStyleSheet("background-color: transparent;") self.myScrollAreaCharacteristics.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaCharacteristics.setWidgetResizable(True) self.myScrollAreaCharacteristics.setObjectName("myScrollAreaCharacteristics") self.myScrollAreaCharacteristicsContents = QtWidgets.QWidget() self.myScrollAreaCharacteristicsContents.setGeometry(QtCore.QRect(0, 0, 400, 416)) self.myScrollAreaCharacteristicsContents.setAccessibleName("myScrollAreaCharacteristicsContents") self.myScrollAreaCharacteristicsContents.setStyleSheet("#myScrollAreaCharacteristics {background-color: transparent;}") self.myScrollAreaCharacteristicsContents.setObjectName("myScrollAreaCharacteristicsContents") self.gridLayoutScrollAreaCharacteristics = QtWidgets.QGridLayout(self.myScrollAreaCharacteristicsContents) self.gridLayoutScrollAreaCharacteristics.setContentsMargins(1, 1, 1, 1) self.gridLayoutScrollAreaCharacteristics.setHorizontalSpacing(6) self.gridLayoutScrollAreaCharacteristics.setVerticalSpacing(4) self.gridLayoutScrollAreaCharacteristics.setObjectName("gridLayoutScrollAreaCharacteristics") self.myLabelTestInstruction = QtWidgets.QLabel(self.myScrollAreaCharacteristicsContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelTestInstruction.setFont(font) self.myLabelTestInstruction.setAccessibleName("myLabelTestInstruction") self.myLabelTestInstruction.setText("") self.myLabelTestInstruction.setObjectName("myLabelTestInstruction") self.gridLayoutScrollAreaCharacteristics.addWidget(self.myLabelTestInstruction, 0, 0, 1, 1) self.myTableViewCharacteristics = QtWidgets.QTableView(self.myScrollAreaCharacteristicsContents) self.myTableViewCharacteristics.setAccessibleName("myTableViewCharacteristics") self.myTableViewCharacteristics.setStyleSheet("QHeaderView::section {\n" " background-color: lightgray;\n" " color: black;\n" " padding: 4px;\n" " border: 1px solid black;\n" "}\n" "\n" "QHeaderView::section:checked\n" "{\n" " background-color: lightgray;\n" "}") self.myTableViewCharacteristics.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTableViewCharacteristics.setFrameShadow(QtWidgets.QFrame.Plain) self.myTableViewCharacteristics.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.myTableViewCharacteristics.setAlternatingRowColors(True) self.myTableViewCharacteristics.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.myTableViewCharacteristics.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.myTableViewCharacteristics.setObjectName("myTableViewCharacteristics") self.gridLayoutScrollAreaCharacteristics.addWidget(self.myTableViewCharacteristics, 1, 0, 1, 1) self.myScrollAreaCharacteristics.setWidget(self.myScrollAreaCharacteristicsContents) self.gridLayoutGroupBoxCharacteristics.addWidget(self.myScrollAreaCharacteristics, 0, 0, 1, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxCharacteristics, 0, 2, 2, 1) self.myGroupBoxDeviation = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxDeviation.setAccessibleName("myGroupBoxDeviation") self.myGroupBoxDeviation.setStyleSheet("#myGroupBoxDeviation {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxDeviation::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxDeviation.setObjectName("myGroupBoxDeviation") self.gridLayoutGroupBoxDeviation = QtWidgets.QGridLayout(self.myGroupBoxDeviation) self.gridLayoutGroupBoxDeviation.setContentsMargins(4, 15, 4, 4) self.gridLayoutGroupBoxDeviation.setSpacing(4) self.gridLayoutGroupBoxDeviation.setObjectName("gridLayoutGroupBoxDeviation") self.myFrameDeviation = QtWidgets.QFrame(self.myGroupBoxDeviation) self.myFrameDeviation.setFrameShape(QtWidgets.QFrame.NoFrame) self.myFrameDeviation.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameDeviation.setLineWidth(0) self.myFrameDeviation.setObjectName("myFrameDeviation") self.myGridLayoutDeviation = QtWidgets.QGridLayout(self.myFrameDeviation) self.myGridLayoutDeviation.setContentsMargins(0, 0, 0, 0) self.myGridLayoutDeviation.setSpacing(0) self.myGridLayoutDeviation.setObjectName("myGridLayoutDeviation") self.gridLayoutGroupBoxDeviation.addWidget(self.myFrameDeviation, 0, 0, 1, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxDeviation, 1, 0, 1, 1) self.myGroupBoxActualValue = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxActualValue.setAccessibleName("myGroupBoxActualValue") self.myGroupBoxActualValue.setStyleSheet("#myGroupBoxActualValue {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxActualValue::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxActualValue.setObjectName("myGroupBoxActualValue") self.gridLayout_2 = QtWidgets.QGridLayout(self.myGroupBoxActualValue) self.gridLayout_2.setContentsMargins(4, 15, 4, 4) self.gridLayout_2.setSpacing(4) self.gridLayout_2.setObjectName("gridLayout_2") self.myToolButtonOk = QtWidgets.QToolButton(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.myToolButtonOk.sizePolicy().hasHeightForWidth()) self.myToolButtonOk.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(12) self.myToolButtonOk.setFont(font) self.myToolButtonOk.setAccessibleName("myToolButtonOk") self.myToolButtonOk.setStyleSheet("#myToolButtonOk {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myToolButtonOk:hover {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 2px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myToolButtonOk:pressed {color: black;\n" "background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: inset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "") icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(":/Icons/Images/Approval-96.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myToolButtonOk.setIcon(icon1) self.myToolButtonOk.setIconSize(QtCore.QSize(40, 40)) self.myToolButtonOk.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.myToolButtonOk.setObjectName("myToolButtonOk") self.gridLayout_2.addWidget(self.myToolButtonOk, 3, 1, 2, 1) self.myLineEditSerialNo = QtWidgets.QLineEdit(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(2) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.myLineEditSerialNo.sizePolicy().hasHeightForWidth()) self.myLineEditSerialNo.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(18) self.myLineEditSerialNo.setFont(font) self.myLineEditSerialNo.setAccessibleName("myLineEditSerialNo") self.myLineEditSerialNo.setStyleSheet("#myLineEditSerialNo {\n" "border: 1px solid gray;\n" "border-radius: 2px;\n" "padding: 3 3px;\n" "background: white;\n" "selection-background-color: darkgray;\n" "}") self.myLineEditSerialNo.setObjectName("myLineEditSerialNo") self.gridLayout_2.addWidget(self.myLineEditSerialNo, 4, 0, 1, 1) self.myLabelSerialNo = QtWidgets.QLabel(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(2) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.myLabelSerialNo.sizePolicy().hasHeightForWidth()) self.myLabelSerialNo.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(12) self.myLabelSerialNo.setFont(font) self.myLabelSerialNo.setAccessibleName("myLabelSerialNo") self.myLabelSerialNo.setStyleSheet("background-color: transparent;") self.myLabelSerialNo.setObjectName("myLabelSerialNo") self.gridLayout_2.addWidget(self.myLabelSerialNo, 3, 0, 1, 1) self.myToolButtonNok = QtWidgets.QToolButton(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.myToolButtonNok.sizePolicy().hasHeightForWidth()) self.myToolButtonNok.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(12) self.myToolButtonNok.setFont(font) self.myToolButtonNok.setAccessibleName("myToolButtonNok") self.myToolButtonNok.setStyleSheet("#myToolButtonNok {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myToolButtonNok:hover {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 2px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myToolButtonNok:pressed {color: black;\n" "background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: inset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "") icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(":/Icons/Images/Cancel-96.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myToolButtonNok.setIcon(icon2) self.myToolButtonNok.setIconSize(QtCore.QSize(40, 40)) self.myToolButtonNok.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.myToolButtonNok.setObjectName("myToolButtonNok") self.gridLayout_2.addWidget(self.myToolButtonNok, 3, 2, 2, 1) self.myLineEditActualValue = QtWidgets.QLineEdit(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.myLineEditActualValue.sizePolicy().hasHeightForWidth()) self.myLineEditActualValue.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(18) self.myLineEditActualValue.setFont(font) self.myLineEditActualValue.setAccessibleName("myLineEditActualValue") self.myLineEditActualValue.setStyleSheet("#myLineEditActualValue {\n" "border: 1px solid gray;\n" "border-radius: 2px;\n" "padding: 3 3px;\n" "background: white;\n" "selection-background-color: darkgray;\n" "}") self.myLineEditActualValue.setObjectName("myLineEditActualValue") self.gridLayout_2.addWidget(self.myLineEditActualValue, 1, 0, 1, 3) self.myFrameActualValueHLine = QtWidgets.QFrame(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.myFrameActualValueHLine.sizePolicy().hasHeightForWidth()) self.myFrameActualValueHLine.setSizePolicy(sizePolicy) self.myFrameActualValueHLine.setAccessibleName("myFrameActualValueHLine") self.myFrameActualValueHLine.setStyleSheet("color: rgb(154, 154, 154);") self.myFrameActualValueHLine.setFrameShape(QtWidgets.QFrame.HLine) self.myFrameActualValueHLine.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameActualValueHLine.setObjectName("myFrameActualValueHLine") self.gridLayout_2.addWidget(self.myFrameActualValueHLine, 2, 0, 1, 3) self.myLabelActualValue = QtWidgets.QLabel(self.myGroupBoxActualValue) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(1) sizePolicy.setVerticalStretch(1) sizePolicy.setHeightForWidth(self.myLabelActualValue.sizePolicy().hasHeightForWidth()) self.myLabelActualValue.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelActualValue.setFont(font) self.myLabelActualValue.setAccessibleName("myLabelActualValue") self.myLabelActualValue.setStyleSheet("background-color: transparent;") self.myLabelActualValue.setObjectName("myLabelActualValue") self.gridLayout_2.addWidget(self.myLabelActualValue, 0, 0, 1, 1) self.myLabelActualValuePreview = QtWidgets.QLabel(self.myGroupBoxActualValue) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelActualValuePreview.setFont(font) self.myLabelActualValuePreview.setStyleSheet("#myLabelActualValuePreview {\n" "border: 1px solid gray;\n" "border-radius: 2px;\n" "padding: 3 3px;\n" "background: transparent;\n" "}") self.myLabelActualValuePreview.setObjectName("myLabelActualValuePreview") self.gridLayout_2.addWidget(self.myLabelActualValuePreview, 0, 1, 1, 2) self.gridLayout_2.setColumnStretch(0, 2) self.gridLayout_2.setColumnStretch(1, 1) self.gridLayout_2.setColumnStretch(2, 1) self.gridLayout_2.setRowStretch(0, 1) self.gridLayout_2.setRowStretch(1, 1) self.gridLayout_2.setRowStretch(2, 1) self.gridLayout_2.setRowStretch(3, 1) self.gridLayout_2.setRowStretch(4, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxActualValue, 1, 1, 1, 1) self.myGroupBoxImage = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxImage.setAccessibleName("myGroupBoxImage") self.myGroupBoxImage.setStyleSheet("#myGroupBoxImage {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxImage::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxImage.setObjectName("myGroupBoxImage") self.gridLayoutGroupBoxImage = QtWidgets.QGridLayout(self.myGroupBoxImage) self.gridLayoutGroupBoxImage.setContentsMargins(4, 15, 4, 4) self.gridLayoutGroupBoxImage.setSpacing(4) self.gridLayoutGroupBoxImage.setObjectName("gridLayoutGroupBoxImage") self.myLabelImageAmount = QtWidgets.QLabel(self.myGroupBoxImage) font = QtGui.QFont() font.setPointSize(12) self.myLabelImageAmount.setFont(font) self.myLabelImageAmount.setAccessibleName("myLabelImageAmount") self.myLabelImageAmount.setStyleSheet("background-color: transparent;") self.myLabelImageAmount.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing) self.myLabelImageAmount.setObjectName("myLabelImageAmount") self.gridLayoutGroupBoxImage.addWidget(self.myLabelImageAmount, 4, 1, 1, 1) self.myPushButtonBackward = QtWidgets.QPushButton(self.myGroupBoxImage) font = QtGui.QFont() font.setPointSize(12) self.myPushButtonBackward.setFont(font) self.myPushButtonBackward.setAccessibleName("myPushButtonBackward") self.myPushButtonBackward.setStyleSheet("#myPushButtonBackward {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonBackward:hover {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 2px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonBackward:pressed {color: black;\n" "background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: inset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "") self.myPushButtonBackward.setObjectName("myPushButtonBackward") self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonBackward, 1, 1, 1, 1) self.myPushButtonZoom = QtWidgets.QPushButton(self.myGroupBoxImage) font = QtGui.QFont() font.setPointSize(12) self.myPushButtonZoom.setFont(font) self.myPushButtonZoom.setAccessibleName("myPushButtonZoom") self.myPushButtonZoom.setStyleSheet("#myPushButtonZoom {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonZoom:hover {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 2px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonZoom:pressed {color: black;\n" "background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: inset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "") self.myPushButtonZoom.setObjectName("myPushButtonZoom") self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonZoom, 2, 1, 1, 1) self.myPushButtonForward = QtWidgets.QPushButton(self.myGroupBoxImage) font = QtGui.QFont() font.setPointSize(12) self.myPushButtonForward.setFont(font) self.myPushButtonForward.setAccessibleName("myPushButtonForward") self.myPushButtonForward.setStyleSheet("#myPushButtonForward {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonForward:hover {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 2px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonForward:pressed {color: black;\n" "background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: inset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "") self.myPushButtonForward.setObjectName("myPushButtonForward") self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonForward, 0, 1, 1, 1) self.myLabelImage = QtWidgets.QLabel(self.myGroupBoxImage) self.myLabelImage.setAccessibleName("myLabelImage") self.myLabelImage.setStyleSheet("background-color: transparent;") self.myLabelImage.setText("") self.myLabelImage.setScaledContents(False) self.myLabelImage.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.myLabelImage.setObjectName("myLabelImage") self.gridLayoutGroupBoxImage.addWidget(self.myLabelImage, 0, 0, 5, 1) self.myPushButtonVideo = QtWidgets.QPushButton(self.myGroupBoxImage) font = QtGui.QFont() font.setPointSize(12) self.myPushButtonVideo.setFont(font) self.myPushButtonVideo.setAccessibleName("myPushButtonVideo") self.myPushButtonVideo.setStyleSheet("#myPushButtonVideo {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonVideo:hover {color: black;\n" "background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(187, 187, 187, 255));\n" "border-width: 2px;\n" "border-color: gray;\n" "border-style: outset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "#myPushButtonVideo:pressed {color: black;\n" "background-color: qlineargradient(spread:reflect, x1:0, y1:0.517, x2:0, y2:0, stop:0 rgba(255, 255, 255, 255), stop:1 rgba(218, 218, 218, 255));\n" "border-width: 1px;\n" "border-color: gray;\n" "border-style: inset;\n" "border-radius: 2px;\n" "padding: 3px}\n" "") self.myPushButtonVideo.setObjectName("myPushButtonVideo") self.gridLayoutGroupBoxImage.addWidget(self.myPushButtonVideo, 3, 1, 1, 1) self.gridLayoutGroupBoxImage.setColumnStretch(0, 6) self.gridLayoutGroupBoxImage.setColumnStretch(1, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxImage, 2, 0, 1, 2) self.myGroupBoxDescription = QtWidgets.QGroupBox(self.myPageTestInstruction) self.myGroupBoxDescription.setAccessibleName("myGroupBoxDescription") self.myGroupBoxDescription.setStyleSheet("#myGroupBoxDescription {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxDescription::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxDescription.setObjectName("myGroupBoxDescription") self.gridLayoutGroupBoxDescription = QtWidgets.QGridLayout(self.myGroupBoxDescription) self.gridLayoutGroupBoxDescription.setContentsMargins(1, 15, 1, 1) self.gridLayoutGroupBoxDescription.setHorizontalSpacing(1) self.gridLayoutGroupBoxDescription.setVerticalSpacing(6) self.gridLayoutGroupBoxDescription.setObjectName("gridLayoutGroupBoxDescription") self.myTextBrowserDescription = QtWidgets.QTextBrowser(self.myGroupBoxDescription) font = QtGui.QFont() font.setPointSize(12) self.myTextBrowserDescription.setFont(font) self.myTextBrowserDescription.setFocusPolicy(QtCore.Qt.NoFocus) self.myTextBrowserDescription.setAccessibleName("myTextBrowserDescription") self.myTextBrowserDescription.setStyleSheet("background-color: transparent;") self.myTextBrowserDescription.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTextBrowserDescription.setFrameShadow(QtWidgets.QFrame.Plain) self.myTextBrowserDescription.setObjectName("myTextBrowserDescription") self.gridLayoutGroupBoxDescription.addWidget(self.myTextBrowserDescription, 0, 0, 1, 1) self.myGridLayoutPageTestInstruction.addWidget(self.myGroupBoxDescription, 2, 2, 1, 1) self.myGridLayoutPageTestInstruction.setColumnStretch(0, 3) self.myGridLayoutPageTestInstruction.setColumnStretch(1, 3) self.myGridLayoutPageTestInstruction.setColumnStretch(2, 4) self.myGridLayoutPageTestInstruction.setRowStretch(0, 3) self.myGridLayoutPageTestInstruction.setRowStretch(1, 3) self.myGridLayoutPageTestInstruction.setRowStretch(2, 4) self.myStackedWidget.addWidget(self.myPageTestInstruction) self.myPageSpc = QtWidgets.QWidget() self.myPageSpc.setAccessibleName("myPageSpc") self.myPageSpc.setObjectName("myPageSpc") self.myGridLayoutPageSpc = QtWidgets.QGridLayout(self.myPageSpc) self.myGridLayoutPageSpc.setContentsMargins(0, 0, 0, 0) self.myGridLayoutPageSpc.setSpacing(1) self.myGridLayoutPageSpc.setObjectName("myGridLayoutPageSpc") self.myGroupBoxSpcFull = QtWidgets.QGroupBox(self.myPageSpc) self.myGroupBoxSpcFull.setStyleSheet("#myGroupBoxSpcFull {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxSpcFull::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxSpcFull.setObjectName("myGroupBoxSpcFull") self.myGridLayoutGroupBoxSpcFull = QtWidgets.QGridLayout(self.myGroupBoxSpcFull) self.myGridLayoutGroupBoxSpcFull.setContentsMargins(4, 15, 4, 4) self.myGridLayoutGroupBoxSpcFull.setSpacing(0) self.myGridLayoutGroupBoxSpcFull.setObjectName("myGridLayoutGroupBoxSpcFull") self.myFrameSpcFull = QtWidgets.QFrame(self.myGroupBoxSpcFull) self.myFrameSpcFull.setFrameShape(QtWidgets.QFrame.NoFrame) self.myFrameSpcFull.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameSpcFull.setLineWidth(0) self.myFrameSpcFull.setObjectName("myFrameSpcFull") self.myVerticalLayoutFrameSpcFull = QtWidgets.QVBoxLayout(self.myFrameSpcFull) self.myVerticalLayoutFrameSpcFull.setContentsMargins(0, 0, 0, 0) self.myVerticalLayoutFrameSpcFull.setSpacing(0) self.myVerticalLayoutFrameSpcFull.setObjectName("myVerticalLayoutFrameSpcFull") self.myGridLayoutGroupBoxSpcFull.addWidget(self.myFrameSpcFull, 0, 0, 1, 1) self.myGridLayoutPageSpc.addWidget(self.myGroupBoxSpcFull, 0, 0, 1, 1) self.myGroupBoxStatisticSpc = QtWidgets.QGroupBox(self.myPageSpc) self.myGroupBoxStatisticSpc.setStyleSheet("#myGroupBoxStatisticSpc {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxStatisticSpc::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxStatisticSpc.setObjectName("myGroupBoxStatisticSpc") self.myGridLayoutStatisticSpc = QtWidgets.QGridLayout(self.myGroupBoxStatisticSpc) self.myGridLayoutStatisticSpc.setContentsMargins(1, 15, 1, 1) self.myGridLayoutStatisticSpc.setSpacing(4) self.myGridLayoutStatisticSpc.setObjectName("myGridLayoutStatisticSpc") self.myScrollAreaGroupBoxStatisticSpc = QtWidgets.QScrollArea(self.myGroupBoxStatisticSpc) self.myScrollAreaGroupBoxStatisticSpc.setStyleSheet("background-color: transparent;") self.myScrollAreaGroupBoxStatisticSpc.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaGroupBoxStatisticSpc.setWidgetResizable(True) self.myScrollAreaGroupBoxStatisticSpc.setObjectName("myScrollAreaGroupBoxStatisticSpc") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 396, 113)) self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents") self.myGridLayoutGroupBoxStatisticSpc = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.myGridLayoutGroupBoxStatisticSpc.setContentsMargins(1, 1, 1, 1) self.myGridLayoutGroupBoxStatisticSpc.setSpacing(4) self.myGridLayoutGroupBoxStatisticSpc.setObjectName("myGridLayoutGroupBoxStatisticSpc") self.myLabelStatisticSpcCpk = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelStatisticSpcCpk.setFont(font) self.myLabelStatisticSpcCpk.setObjectName("myLabelStatisticSpcCpk") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcCpk, 0, 0, 1, 1) self.myLabelSpcCpkValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelSpcCpkValue.setFont(font) self.myLabelSpcCpkValue.setObjectName("myLabelSpcCpkValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcCpkValue, 0, 1, 1, 1) self.myLabelStatisticSpcAverage = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcAverage.setFont(font) self.myLabelStatisticSpcAverage.setObjectName("myLabelStatisticSpcAverage") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcAverage, 1, 0, 1, 1) self.myLabelSpcDeivationValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcDeivationValue.setFont(font) self.myLabelSpcDeivationValue.setObjectName("myLabelSpcDeivationValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcDeivationValue, 2, 1, 1, 1) self.myLabelStatistcSpcDeviation = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatistcSpcDeviation.setFont(font) self.myLabelStatistcSpcDeviation.setObjectName("myLabelStatistcSpcDeviation") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatistcSpcDeviation, 2, 0, 1, 1) self.myLabelSpcAverageValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcAverageValue.setFont(font) self.myLabelSpcAverageValue.setObjectName("myLabelSpcAverageValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcAverageValue, 1, 1, 1, 1) self.myLabelStatisticSpcAndSicSigma = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcAndSicSigma.setFont(font) self.myLabelStatisticSpcAndSicSigma.setObjectName("myLabelStatisticSpcAndSicSigma") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcAndSicSigma, 3, 0, 1, 1) self.myLabelStatisticSpcMinusSixSigma = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcMinusSixSigma.setFont(font) self.myLabelStatisticSpcMinusSixSigma.setObjectName("myLabelStatisticSpcMinusSixSigma") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcMinusSixSigma, 4, 0, 1, 1) self.myLabelSpcMinusSixSigmaValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcMinusSixSigmaValue.setFont(font) self.myLabelSpcMinusSixSigmaValue.setObjectName("myLabelSpcMinusSixSigmaValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcMinusSixSigmaValue, 4, 1, 1, 1) self.myLabelSpcAndSixSigmaValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcAndSixSigmaValue.setFont(font) self.myLabelSpcAndSixSigmaValue.setObjectName("myLabelSpcAndSixSigmaValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcAndSixSigmaValue, 3, 1, 1, 1) self.myLabelStatisticSpcUtl = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcUtl.setFont(font) self.myLabelStatisticSpcUtl.setObjectName("myLabelStatisticSpcUtl") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcUtl, 1, 3, 1, 1) self.myLabelStatisticSpcLtl = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcLtl.setFont(font) self.myLabelStatisticSpcLtl.setObjectName("myLabelStatisticSpcLtl") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcLtl, 2, 3, 1, 1) self.myLabelSpcUtlValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcUtlValue.setFont(font) self.myLabelSpcUtlValue.setObjectName("myLabelSpcUtlValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcUtlValue, 1, 4, 1, 1) self.myLabelSpcLtlValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcLtlValue.setFont(font) self.myLabelSpcLtlValue.setObjectName("myLabelSpcLtlValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcLtlValue, 2, 4, 1, 1) self.myLabelStatisticSpcUil = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcUil.setFont(font) self.myLabelStatisticSpcUil.setObjectName("myLabelStatisticSpcUil") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcUil, 3, 3, 1, 1) self.myLabelStatisticSpcLil = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelStatisticSpcLil.setFont(font) self.myLabelStatisticSpcLil.setObjectName("myLabelStatisticSpcLil") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcLil, 4, 3, 1, 1) self.myLabelSpcUilValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcUilValue.setFont(font) self.myLabelSpcUilValue.setObjectName("myLabelSpcUilValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcUilValue, 3, 4, 1, 1) self.myLabelSpcLilValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelSpcLilValue.setFont(font) self.myLabelSpcLilValue.setObjectName("myLabelSpcLilValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcLilValue, 4, 4, 1, 1) self.myFrameVlineStatisticSpc = QtWidgets.QFrame(self.scrollAreaWidgetContents) self.myFrameVlineStatisticSpc.setStyleSheet("color: rgb(154, 154, 154);") self.myFrameVlineStatisticSpc.setFrameShape(QtWidgets.QFrame.VLine) self.myFrameVlineStatisticSpc.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameVlineStatisticSpc.setObjectName("myFrameVlineStatisticSpc") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myFrameVlineStatisticSpc, 0, 2, 5, 1) self.myLabelStatisticSpcPpm = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelStatisticSpcPpm.setFont(font) self.myLabelStatisticSpcPpm.setObjectName("myLabelStatisticSpcPpm") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelStatisticSpcPpm, 0, 3, 1, 1) self.myLabelSpcPpmValue = QtWidgets.QLabel(self.scrollAreaWidgetContents) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelSpcPpmValue.setFont(font) self.myLabelSpcPpmValue.setObjectName("myLabelSpcPpmValue") self.myGridLayoutGroupBoxStatisticSpc.addWidget(self.myLabelSpcPpmValue, 0, 4, 1, 1) self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(0, 2) self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(1, 1) self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(2, 1) self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(3, 2) self.myGridLayoutGroupBoxStatisticSpc.setColumnStretch(4, 1) self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(0, 1) self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(1, 1) self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(2, 1) self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(3, 1) self.myGridLayoutGroupBoxStatisticSpc.setRowStretch(4, 1) self.myScrollAreaGroupBoxStatisticSpc.setWidget(self.scrollAreaWidgetContents) self.myGridLayoutStatisticSpc.addWidget(self.myScrollAreaGroupBoxStatisticSpc, 0, 0, 1, 1) self.myGridLayoutPageSpc.addWidget(self.myGroupBoxStatisticSpc, 1, 0, 1, 1) self.myGroupBoxCharacteristicsSpc = QtWidgets.QGroupBox(self.myPageSpc) self.myGroupBoxCharacteristicsSpc.setStyleSheet("#myGroupBoxCharacteristicsSpc {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristicsSpc::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxCharacteristicsSpc.setObjectName("myGroupBoxCharacteristicsSpc") self.myGridLayoutGroupBoxCharacteristicsSpc = QtWidgets.QGridLayout(self.myGroupBoxCharacteristicsSpc) self.myGridLayoutGroupBoxCharacteristicsSpc.setContentsMargins(1, 15, 1, 1) self.myGridLayoutGroupBoxCharacteristicsSpc.setSpacing(4) self.myGridLayoutGroupBoxCharacteristicsSpc.setObjectName("myGridLayoutGroupBoxCharacteristicsSpc") self.myScrollAreaGroupBoyCharacteristicsSpc = QtWidgets.QScrollArea(self.myGroupBoxCharacteristicsSpc) self.myScrollAreaGroupBoyCharacteristicsSpc.setStyleSheet("background-color: transparent;") self.myScrollAreaGroupBoyCharacteristicsSpc.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaGroupBoyCharacteristicsSpc.setFrameShadow(QtWidgets.QFrame.Sunken) self.myScrollAreaGroupBoyCharacteristicsSpc.setWidgetResizable(True) self.myScrollAreaGroupBoyCharacteristicsSpc.setObjectName("myScrollAreaGroupBoyCharacteristicsSpc") self.myScrollAreaWidgetContentsSpc = QtWidgets.QWidget() self.myScrollAreaWidgetContentsSpc.setGeometry(QtCore.QRect(0, 0, 83, 94)) self.myScrollAreaWidgetContentsSpc.setStyleSheet("#myScrollAreaWidgetContentsSpc {background-color: transparent;}") self.myScrollAreaWidgetContentsSpc.setObjectName("myScrollAreaWidgetContentsSpc") self.myGridLayoutScrollAreaWidgetContentsSpc = QtWidgets.QGridLayout(self.myScrollAreaWidgetContentsSpc) self.myGridLayoutScrollAreaWidgetContentsSpc.setContentsMargins(1, 1, 1, 1) self.myGridLayoutScrollAreaWidgetContentsSpc.setSpacing(4) self.myGridLayoutScrollAreaWidgetContentsSpc.setObjectName("myGridLayoutScrollAreaWidgetContentsSpc") self.myTableViewCharacteristicsPageSpc = QtWidgets.QTableView(self.myScrollAreaWidgetContentsSpc) self.myTableViewCharacteristicsPageSpc.setStyleSheet("QHeaderView::section {\n" " background-color: lightgray;\n" " color: black;\n" " padding: 4px;\n" " border: 1px solid black;\n" "}\n" "\n" "QHeaderView::section:checked\n" "{\n" " background-color: lightgray;\n" "}") self.myTableViewCharacteristicsPageSpc.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTableViewCharacteristicsPageSpc.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.myTableViewCharacteristicsPageSpc.setAlternatingRowColors(True) self.myTableViewCharacteristicsPageSpc.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.myTableViewCharacteristicsPageSpc.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.myTableViewCharacteristicsPageSpc.setObjectName("myTableViewCharacteristicsPageSpc") self.myGridLayoutScrollAreaWidgetContentsSpc.addWidget(self.myTableViewCharacteristicsPageSpc, 1, 0, 1, 1) self.myLabelTestInstructionNamePageSpc = QtWidgets.QLabel(self.myScrollAreaWidgetContentsSpc) font = QtGui.QFont() font.setPointSize(12) self.myLabelTestInstructionNamePageSpc.setFont(font) self.myLabelTestInstructionNamePageSpc.setObjectName("myLabelTestInstructionNamePageSpc") self.myGridLayoutScrollAreaWidgetContentsSpc.addWidget(self.myLabelTestInstructionNamePageSpc, 0, 0, 1, 1) self.myScrollAreaGroupBoyCharacteristicsSpc.setWidget(self.myScrollAreaWidgetContentsSpc) self.myGridLayoutGroupBoxCharacteristicsSpc.addWidget(self.myScrollAreaGroupBoyCharacteristicsSpc, 0, 0, 1, 1) self.myGridLayoutPageSpc.addWidget(self.myGroupBoxCharacteristicsSpc, 0, 1, 2, 1) self.myGridLayoutPageSpc.setColumnStretch(0, 6) self.myGridLayoutPageSpc.setColumnStretch(1, 4) self.myGridLayoutPageSpc.setRowStretch(0, 6) self.myGridLayoutPageSpc.setRowStretch(1, 4) self.myStackedWidget.addWidget(self.myPageSpc) self.myPageDeviation = QtWidgets.QWidget() self.myPageDeviation.setAccessibleName("myPageDeviation") self.myPageDeviation.setObjectName("myPageDeviation") self.myGridLayoutPageDeviation = QtWidgets.QGridLayout(self.myPageDeviation) self.myGridLayoutPageDeviation.setContentsMargins(0, 0, 0, 0) self.myGridLayoutPageDeviation.setSpacing(1) self.myGridLayoutPageDeviation.setObjectName("myGridLayoutPageDeviation") self.myGroupBoxDeviationFull = QtWidgets.QGroupBox(self.myPageDeviation) self.myGroupBoxDeviationFull.setStyleSheet("#myGroupBoxDeviationFull {background-color: rgb(255, 255, 255); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxDeviationFull::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxDeviationFull.setObjectName("myGroupBoxDeviationFull") self.myGridLayoutGroupBoxDeviationFull = QtWidgets.QGridLayout(self.myGroupBoxDeviationFull) self.myGridLayoutGroupBoxDeviationFull.setContentsMargins(4, 15, 4, 4) self.myGridLayoutGroupBoxDeviationFull.setObjectName("myGridLayoutGroupBoxDeviationFull") self.myFrameDeviationFull = QtWidgets.QFrame(self.myGroupBoxDeviationFull) self.myFrameDeviationFull.setFrameShape(QtWidgets.QFrame.NoFrame) self.myFrameDeviationFull.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameDeviationFull.setLineWidth(0) self.myFrameDeviationFull.setObjectName("myFrameDeviationFull") self.myVerticalLayoutGroupBoxDeviationFull = QtWidgets.QVBoxLayout(self.myFrameDeviationFull) self.myVerticalLayoutGroupBoxDeviationFull.setContentsMargins(0, 0, 0, 0) self.myVerticalLayoutGroupBoxDeviationFull.setSpacing(0) self.myVerticalLayoutGroupBoxDeviationFull.setObjectName("myVerticalLayoutGroupBoxDeviationFull") self.myGridLayoutGroupBoxDeviationFull.addWidget(self.myFrameDeviationFull, 0, 0, 1, 1) self.myGridLayoutPageDeviation.addWidget(self.myGroupBoxDeviationFull, 0, 0, 1, 1) self.myGroupBoxStatisticDeviation = QtWidgets.QGroupBox(self.myPageDeviation) self.myGroupBoxStatisticDeviation.setStyleSheet("#myGroupBoxStatisticDeviation {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxStatisticDeviation::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxStatisticDeviation.setObjectName("myGroupBoxStatisticDeviation") self.myGridLayoutGroupBoxStatisticDeviation = QtWidgets.QGridLayout(self.myGroupBoxStatisticDeviation) self.myGridLayoutGroupBoxStatisticDeviation.setContentsMargins(1, 15, 1, 1) self.myGridLayoutGroupBoxStatisticDeviation.setSpacing(4) self.myGridLayoutGroupBoxStatisticDeviation.setObjectName("myGridLayoutGroupBoxStatisticDeviation") self.myScrollAreaGroupBoxStatisticDeviation = QtWidgets.QScrollArea(self.myGroupBoxStatisticDeviation) self.myScrollAreaGroupBoxStatisticDeviation.setStyleSheet("background-color: transparent;") self.myScrollAreaGroupBoxStatisticDeviation.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaGroupBoxStatisticDeviation.setFrameShadow(QtWidgets.QFrame.Sunken) self.myScrollAreaGroupBoxStatisticDeviation.setWidgetResizable(True) self.myScrollAreaGroupBoxStatisticDeviation.setObjectName("myScrollAreaGroupBoxStatisticDeviation") self.myScrollAreaWidgetContentsDeviation_2 = QtWidgets.QWidget() self.myScrollAreaWidgetContentsDeviation_2.setGeometry(QtCore.QRect(0, 0, 396, 113)) self.myScrollAreaWidgetContentsDeviation_2.setObjectName("myScrollAreaWidgetContentsDeviation_2") self.myGridLayoutScrollAreaDeviation = QtWidgets.QGridLayout(self.myScrollAreaWidgetContentsDeviation_2) self.myGridLayoutScrollAreaDeviation.setContentsMargins(1, 1, 1, 1) self.myGridLayoutScrollAreaDeviation.setSpacing(4) self.myGridLayoutScrollAreaDeviation.setObjectName("myGridLayoutScrollAreaDeviation") self.myLabelDeviationLtlValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationLtlValue.setFont(font) self.myLabelDeviationLtlValue.setObjectName("myLabelDeviationLtlValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLtlValue, 2, 4, 1, 1) self.myLabelDeviationLilValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationLilValue.setFont(font) self.myLabelDeviationLilValue.setObjectName("myLabelDeviationLilValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLilValue, 4, 4, 1, 1) self.myFrameVlineStatisticDeviation = QtWidgets.QFrame(self.myScrollAreaWidgetContentsDeviation_2) self.myFrameVlineStatisticDeviation.setStyleSheet("color: rgb(154, 154, 154);") self.myFrameVlineStatisticDeviation.setFrameShape(QtWidgets.QFrame.VLine) self.myFrameVlineStatisticDeviation.setFrameShadow(QtWidgets.QFrame.Plain) self.myFrameVlineStatisticDeviation.setObjectName("myFrameVlineStatisticDeviation") self.myGridLayoutScrollAreaDeviation.addWidget(self.myFrameVlineStatisticDeviation, 0, 2, 5, 1) self.myLabelDeviationCpkValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelDeviationCpkValue.setFont(font) self.myLabelDeviationCpkValue.setObjectName("myLabelDeviationCpkValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationCpkValue, 0, 1, 1, 1) self.myLabelDeviationDeivationValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationDeivationValue.setFont(font) self.myLabelDeviationDeivationValue.setObjectName("myLabelDeviationDeivationValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationDeivationValue, 2, 1, 1, 1) self.myLabelDeviationUtlValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationUtlValue.setFont(font) self.myLabelDeviationUtlValue.setObjectName("myLabelDeviationUtlValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUtlValue, 1, 4, 1, 1) self.myLabelDeviationPpmValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelDeviationPpmValue.setFont(font) self.myLabelDeviationPpmValue.setObjectName("myLabelDeviationPpmValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationPpmValue, 0, 4, 1, 1) self.myLabelDeviationAverageValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationAverageValue.setFont(font) self.myLabelDeviationAverageValue.setObjectName("myLabelDeviationAverageValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAverageValue, 1, 1, 1, 1) self.myLabelDeviationAndSixSigmaValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationAndSixSigmaValue.setFont(font) self.myLabelDeviationAndSixSigmaValue.setObjectName("myLabelDeviationAndSixSigmaValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAndSixSigmaValue, 3, 1, 1, 1) self.myLabelDeviationMinusSixSigmaValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationMinusSixSigmaValue.setFont(font) self.myLabelDeviationMinusSixSigmaValue.setObjectName("myLabelDeviationMinusSixSigmaValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationMinusSixSigmaValue, 4, 1, 1, 1) self.myLabelDeviationUilValue = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationUilValue.setFont(font) self.myLabelDeviationUilValue.setObjectName("myLabelDeviationUilValue") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUilValue, 3, 4, 1, 1) self.myLabelDeviationCpk = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelDeviationCpk.setFont(font) self.myLabelDeviationCpk.setObjectName("myLabelDeviationCpk") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationCpk, 0, 0, 1, 1) self.myLabelDeviationAverage = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationAverage.setFont(font) self.myLabelDeviationAverage.setObjectName("myLabelDeviationAverage") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAverage, 1, 0, 1, 1) self.myLabelDeviationDeivation = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationDeivation.setFont(font) self.myLabelDeviationDeivation.setObjectName("myLabelDeviationDeivation") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationDeivation, 2, 0, 1, 1) self.myLabelDeviationAndSixSigma = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationAndSixSigma.setFont(font) self.myLabelDeviationAndSixSigma.setObjectName("myLabelDeviationAndSixSigma") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationAndSixSigma, 3, 0, 1, 1) self.myLabelDeviationMinusSixSigma = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationMinusSixSigma.setFont(font) self.myLabelDeviationMinusSixSigma.setObjectName("myLabelDeviationMinusSixSigma") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationMinusSixSigma, 4, 0, 1, 1) self.myLabelDeviationPpm = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.myLabelDeviationPpm.setFont(font) self.myLabelDeviationPpm.setObjectName("myLabelDeviationPpm") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationPpm, 0, 3, 1, 1) self.myLabelDeviationUtl = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationUtl.setFont(font) self.myLabelDeviationUtl.setObjectName("myLabelDeviationUtl") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUtl, 1, 3, 1, 1) self.myLabelDeviationLtl = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationLtl.setFont(font) self.myLabelDeviationLtl.setObjectName("myLabelDeviationLtl") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLtl, 2, 3, 1, 1) self.myLabelDeviationUil = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationUil.setFont(font) self.myLabelDeviationUil.setObjectName("myLabelDeviationUil") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationUil, 3, 3, 1, 1) self.myLabelDeviationLil = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation_2) font = QtGui.QFont() font.setPointSize(12) self.myLabelDeviationLil.setFont(font) self.myLabelDeviationLil.setObjectName("myLabelDeviationLil") self.myGridLayoutScrollAreaDeviation.addWidget(self.myLabelDeviationLil, 4, 3, 1, 1) self.myGridLayoutScrollAreaDeviation.setColumnStretch(0, 2) self.myGridLayoutScrollAreaDeviation.setColumnStretch(1, 1) self.myGridLayoutScrollAreaDeviation.setColumnStretch(2, 1) self.myGridLayoutScrollAreaDeviation.setColumnStretch(3, 2) self.myGridLayoutScrollAreaDeviation.setColumnStretch(4, 1) self.myGridLayoutScrollAreaDeviation.setRowStretch(0, 1) self.myGridLayoutScrollAreaDeviation.setRowStretch(1, 1) self.myGridLayoutScrollAreaDeviation.setRowStretch(2, 1) self.myGridLayoutScrollAreaDeviation.setRowStretch(3, 1) self.myGridLayoutScrollAreaDeviation.setRowStretch(4, 1) self.myScrollAreaGroupBoxStatisticDeviation.setWidget(self.myScrollAreaWidgetContentsDeviation_2) self.myGridLayoutGroupBoxStatisticDeviation.addWidget(self.myScrollAreaGroupBoxStatisticDeviation, 0, 0, 1, 1) self.myGridLayoutPageDeviation.addWidget(self.myGroupBoxStatisticDeviation, 1, 0, 1, 1) self.myGroupBoxCharacteristicsDeviation = QtWidgets.QGroupBox(self.myPageDeviation) self.myGroupBoxCharacteristicsDeviation.setStyleSheet("#myGroupBoxCharacteristicsDeviation {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristicsDeviation::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxCharacteristicsDeviation.setObjectName("myGroupBoxCharacteristicsDeviation") self.myGridLayoutGroupBoxCharacteristicsDeviation = QtWidgets.QGridLayout(self.myGroupBoxCharacteristicsDeviation) self.myGridLayoutGroupBoxCharacteristicsDeviation.setContentsMargins(1, 15, 1, 1) self.myGridLayoutGroupBoxCharacteristicsDeviation.setSpacing(4) self.myGridLayoutGroupBoxCharacteristicsDeviation.setObjectName("myGridLayoutGroupBoxCharacteristicsDeviation") self.myScrollAreaGroupBoxCharacteristicsDeviation = QtWidgets.QScrollArea(self.myGroupBoxCharacteristicsDeviation) self.myScrollAreaGroupBoxCharacteristicsDeviation.setStyleSheet("background-color: transparent;") self.myScrollAreaGroupBoxCharacteristicsDeviation.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaGroupBoxCharacteristicsDeviation.setWidgetResizable(True) self.myScrollAreaGroupBoxCharacteristicsDeviation.setObjectName("myScrollAreaGroupBoxCharacteristicsDeviation") self.myScrollAreaWidgetContentsDeviation = QtWidgets.QWidget() self.myScrollAreaWidgetContentsDeviation.setGeometry(QtCore.QRect(0, 0, 83, 94)) self.myScrollAreaWidgetContentsDeviation.setStyleSheet("#myScrollAreaWidgetContentsDeviation {background-color: transparent;}") self.myScrollAreaWidgetContentsDeviation.setObjectName("myScrollAreaWidgetContentsDeviation") self.myGridLayoutScrollAreaWidgetsDeviation = QtWidgets.QGridLayout(self.myScrollAreaWidgetContentsDeviation) self.myGridLayoutScrollAreaWidgetsDeviation.setContentsMargins(1, 1, 1, 1) self.myGridLayoutScrollAreaWidgetsDeviation.setSpacing(4) self.myGridLayoutScrollAreaWidgetsDeviation.setObjectName("myGridLayoutScrollAreaWidgetsDeviation") self.myTableViewCharacteristicsDeviationFull = QtWidgets.QTableView(self.myScrollAreaWidgetContentsDeviation) self.myTableViewCharacteristicsDeviationFull.setStyleSheet("QHeaderView::section {\n" " background-color: lightgray;\n" " color: black;\n" " padding: 4px;\n" " border: 1px solid black;\n" "}\n" "\n" "QHeaderView::section:checked\n" "{\n" " background-color: lightgray;\n" "}") self.myTableViewCharacteristicsDeviationFull.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTableViewCharacteristicsDeviationFull.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.myTableViewCharacteristicsDeviationFull.setAlternatingRowColors(True) self.myTableViewCharacteristicsDeviationFull.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.myTableViewCharacteristicsDeviationFull.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.myTableViewCharacteristicsDeviationFull.setObjectName("myTableViewCharacteristicsDeviationFull") self.myGridLayoutScrollAreaWidgetsDeviation.addWidget(self.myTableViewCharacteristicsDeviationFull, 1, 0, 1, 1) self.myLabelTestInstructionNamePageDeviation = QtWidgets.QLabel(self.myScrollAreaWidgetContentsDeviation) font = QtGui.QFont() font.setPointSize(12) self.myLabelTestInstructionNamePageDeviation.setFont(font) self.myLabelTestInstructionNamePageDeviation.setObjectName("myLabelTestInstructionNamePageDeviation") self.myGridLayoutScrollAreaWidgetsDeviation.addWidget(self.myLabelTestInstructionNamePageDeviation, 0, 0, 1, 1) self.myScrollAreaGroupBoxCharacteristicsDeviation.setWidget(self.myScrollAreaWidgetContentsDeviation) self.myGridLayoutGroupBoxCharacteristicsDeviation.addWidget(self.myScrollAreaGroupBoxCharacteristicsDeviation, 0, 0, 1, 1) self.myGridLayoutPageDeviation.addWidget(self.myGroupBoxCharacteristicsDeviation, 0, 1, 2, 1) self.myGridLayoutPageDeviation.setColumnStretch(0, 6) self.myGridLayoutPageDeviation.setColumnStretch(1, 4) self.myGridLayoutPageDeviation.setRowStretch(0, 6) self.myGridLayoutPageDeviation.setRowStretch(1, 4) self.myStackedWidget.addWidget(self.myPageDeviation) self.myPageResult = QtWidgets.QWidget() self.myPageResult.setObjectName("myPageResult") self.myGridLayoutPageResult = QtWidgets.QGridLayout(self.myPageResult) self.myGridLayoutPageResult.setContentsMargins(0, 0, 0, 0) self.myGridLayoutPageResult.setSpacing(1) self.myGridLayoutPageResult.setObjectName("myGridLayoutPageResult") self.myGroupBoxCharacteristicsResult = QtWidgets.QGroupBox(self.myPageResult) self.myGroupBoxCharacteristicsResult.setStyleSheet("#myGroupBoxCharacteristicsResult {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxCharacteristicsResult::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxCharacteristicsResult.setObjectName("myGroupBoxCharacteristicsResult") self.myGridLayoutGroupBoxCharacteristicsResult = QtWidgets.QGridLayout(self.myGroupBoxCharacteristicsResult) self.myGridLayoutGroupBoxCharacteristicsResult.setContentsMargins(1, 15, 1, 1) self.myGridLayoutGroupBoxCharacteristicsResult.setSpacing(4) self.myGridLayoutGroupBoxCharacteristicsResult.setObjectName("myGridLayoutGroupBoxCharacteristicsResult") self.myScrollAreaCharacteristicsPageResult = QtWidgets.QScrollArea(self.myGroupBoxCharacteristicsResult) self.myScrollAreaCharacteristicsPageResult.setStyleSheet("background-color: transparent;") self.myScrollAreaCharacteristicsPageResult.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaCharacteristicsPageResult.setWidgetResizable(True) self.myScrollAreaCharacteristicsPageResult.setObjectName("myScrollAreaCharacteristicsPageResult") self.myScrollAreaCharacteristicsPageResultWidgetContents = QtWidgets.QWidget() self.myScrollAreaCharacteristicsPageResultWidgetContents.setGeometry(QtCore.QRect(0, 0, 83, 94)) self.myScrollAreaCharacteristicsPageResultWidgetContents.setStyleSheet("#myScrollAreaCharacteristicsPageResultWidgetContents {background-color: transparent;}") self.myScrollAreaCharacteristicsPageResultWidgetContents.setObjectName("myScrollAreaCharacteristicsPageResultWidgetContents") self.myGridLayoutScrollAreaCharacteristicsWidgetContents = QtWidgets.QGridLayout(self.myScrollAreaCharacteristicsPageResultWidgetContents) self.myGridLayoutScrollAreaCharacteristicsWidgetContents.setContentsMargins(1, 1, 1, 1) self.myGridLayoutScrollAreaCharacteristicsWidgetContents.setSpacing(4) self.myGridLayoutScrollAreaCharacteristicsWidgetContents.setObjectName("myGridLayoutScrollAreaCharacteristicsWidgetContents") self.myLabelTestInstructionNamePageResult = QtWidgets.QLabel(self.myScrollAreaCharacteristicsPageResultWidgetContents) font = QtGui.QFont() font.setPointSize(12) self.myLabelTestInstructionNamePageResult.setFont(font) self.myLabelTestInstructionNamePageResult.setObjectName("myLabelTestInstructionNamePageResult") self.myGridLayoutScrollAreaCharacteristicsWidgetContents.addWidget(self.myLabelTestInstructionNamePageResult, 0, 0, 1, 1) self.myTableViewCharacteristicsPageResult = QtWidgets.QTableView(self.myScrollAreaCharacteristicsPageResultWidgetContents) self.myTableViewCharacteristicsPageResult.setStyleSheet("QHeaderView::section {\n" " background-color: lightgray;\n" " color: black;\n" " padding: 4px;\n" " border: 1px solid black;\n" "}\n" "\n" "QHeaderView::section:checked\n" "{\n" " background-color: lightgray;\n" "}") self.myTableViewCharacteristicsPageResult.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTableViewCharacteristicsPageResult.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.myTableViewCharacteristicsPageResult.setAlternatingRowColors(True) self.myTableViewCharacteristicsPageResult.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.myTableViewCharacteristicsPageResult.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.myTableViewCharacteristicsPageResult.setObjectName("myTableViewCharacteristicsPageResult") self.myGridLayoutScrollAreaCharacteristicsWidgetContents.addWidget(self.myTableViewCharacteristicsPageResult, 1, 0, 1, 1) self.myScrollAreaCharacteristicsPageResult.setWidget(self.myScrollAreaCharacteristicsPageResultWidgetContents) self.myGridLayoutGroupBoxCharacteristicsResult.addWidget(self.myScrollAreaCharacteristicsPageResult, 0, 0, 1, 1) self.myGridLayoutPageResult.addWidget(self.myGroupBoxCharacteristicsResult, 0, 1, 1, 1) self.myGroupBoxResult = QtWidgets.QGroupBox(self.myPageResult) self.myGroupBoxResult.setStyleSheet("#myGroupBoxResult {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex} #myGroupBoxResult::title{subcontrol-origin: margin; subcontrol-position: top left; padding: 2 2px; border: 0px solid gray; border-radius: 0px; background-color: transparent;}") self.myGroupBoxResult.setObjectName("myGroupBoxResult") self.myGridLayoutGroupBoxResult = QtWidgets.QGridLayout(self.myGroupBoxResult) self.myGridLayoutGroupBoxResult.setContentsMargins(1, 15, 1, 1) self.myGridLayoutGroupBoxResult.setSpacing(4) self.myGridLayoutGroupBoxResult.setObjectName("myGridLayoutGroupBoxResult") self.myScrollAreaGroupBoxResult = QtWidgets.QScrollArea(self.myGroupBoxResult) self.myScrollAreaGroupBoxResult.setStyleSheet("background-color: transparent;") self.myScrollAreaGroupBoxResult.setFrameShape(QtWidgets.QFrame.NoFrame) self.myScrollAreaGroupBoxResult.setWidgetResizable(True) self.myScrollAreaGroupBoxResult.setObjectName("myScrollAreaGroupBoxResult") self.myScrollAreaGroupBoxResultWidgetContents = QtWidgets.QWidget() self.myScrollAreaGroupBoxResultWidgetContents.setGeometry(QtCore.QRect(0, 0, 83, 71)) self.myScrollAreaGroupBoxResultWidgetContents.setStyleSheet("#myScrollAreaGroupBoxWidgetContents {background-color: transparent;}") self.myScrollAreaGroupBoxResultWidgetContents.setObjectName("myScrollAreaGroupBoxResultWidgetContents") self.myGridLayoutSrcollAreaResultWidgetContents = QtWidgets.QGridLayout(self.myScrollAreaGroupBoxResultWidgetContents) self.myGridLayoutSrcollAreaResultWidgetContents.setContentsMargins(1, 1, 1, 1) self.myGridLayoutSrcollAreaResultWidgetContents.setSpacing(4) self.myGridLayoutSrcollAreaResultWidgetContents.setObjectName("myGridLayoutSrcollAreaResultWidgetContents") self.myTableViewResult = QtWidgets.QTableView(self.myScrollAreaGroupBoxResultWidgetContents) self.myTableViewResult.setStyleSheet("QHeaderView::section {\n" " background-color: lightgray;\n" " color: black;\n" " padding: 4px;\n" " border: 1px solid black;\n" "}\n" "\n" "QHeaderView::section:checked\n" "{\n" " background-color: lightgray;\n" "}") self.myTableViewResult.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTableViewResult.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) self.myTableViewResult.setAlternatingRowColors(True) self.myTableViewResult.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) self.myTableViewResult.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.myTableViewResult.setObjectName("myTableViewResult") self.myGridLayoutSrcollAreaResultWidgetContents.addWidget(self.myTableViewResult, 0, 0, 1, 1) self.myScrollAreaGroupBoxResult.setWidget(self.myScrollAreaGroupBoxResultWidgetContents) self.myGridLayoutGroupBoxResult.addWidget(self.myScrollAreaGroupBoxResult, 0, 0, 1, 1) self.myGridLayoutPageResult.addWidget(self.myGroupBoxResult, 0, 0, 1, 1) self.myGridLayoutPageResult.setColumnStretch(0, 6) self.myGridLayoutPageResult.setColumnStretch(1, 4) self.myStackedWidget.addWidget(self.myPageResult) self.myPageLicense = QtWidgets.QWidget() self.myPageLicense.setAccessibleName("myPageLicense") self.myPageLicense.setObjectName("myPageLicense") self.myGridLayoutPageLicense = QtWidgets.QGridLayout(self.myPageLicense) self.myGridLayoutPageLicense.setContentsMargins(0, 0, 0, 0) self.myGridLayoutPageLicense.setSpacing(1) self.myGridLayoutPageLicense.setObjectName("myGridLayoutPageLicense") self.myTextBrowserLicense = QtWidgets.QTextBrowser(self.myPageLicense) font = QtGui.QFont() font.setPointSize(12) self.myTextBrowserLicense.setFont(font) self.myTextBrowserLicense.setFocusPolicy(QtCore.Qt.NoFocus) self.myTextBrowserLicense.setStyleSheet("#myTextBrowserLicense {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex}") self.myTextBrowserLicense.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTextBrowserLicense.setFrameShadow(QtWidgets.QFrame.Plain) self.myTextBrowserLicense.setObjectName("myTextBrowserLicense") self.myGridLayoutPageLicense.addWidget(self.myTextBrowserLicense, 0, 0, 1, 1) self.myStackedWidget.addWidget(self.myPageLicense) self.myPageContact = QtWidgets.QWidget() self.myPageContact.setAccessibleName("myPageContact") self.myPageContact.setObjectName("myPageContact") self.myGridLayoutPageContact = QtWidgets.QGridLayout(self.myPageContact) self.myGridLayoutPageContact.setContentsMargins(0, 0, 0, 0) self.myGridLayoutPageContact.setSpacing(1) self.myGridLayoutPageContact.setObjectName("myGridLayoutPageContact") self.myTextBrowserContact = QtWidgets.QTextBrowser(self.myPageContact) font = QtGui.QFont() font.setPointSize(12) self.myTextBrowserContact.setFont(font) self.myTextBrowserContact.setFocusPolicy(QtCore.Qt.NoFocus) self.myTextBrowserContact.setStyleSheet("#myTextBrowserContact {background-color: qlineargradient(spread:reflect, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(240, 240, 240, 255), stop:1 rgba(255, 255, 255, 255)); border: 1px ridge gray; border-radius: 2px; margin-top: 0ex}") self.myTextBrowserContact.setFrameShape(QtWidgets.QFrame.NoFrame) self.myTextBrowserContact.setFrameShadow(QtWidgets.QFrame.Plain) self.myTextBrowserContact.setObjectName("myTextBrowserContact") self.myGridLayoutPageContact.addWidget(self.myTextBrowserContact, 0, 0, 1, 1) self.myStackedWidget.addWidget(self.myPageContact) self.gridLayoutCentralwidget.addWidget(self.myStackedWidget, 0, 0, 1, 1) myMainWindow.setCentralWidget(self.myCentralwidget) self.myMenubar = QtWidgets.QMenuBar(myMainWindow) self.myMenubar.setGeometry(QtCore.QRect(0, 0, 1024, 21)) self.myMenubar.setAccessibleName("myMenubar") self.myMenubar.setObjectName("myMenubar") self.myMenuFile = QtWidgets.QMenu(self.myMenubar) self.myMenuFile.setAccessibleName("myMenuFile") self.myMenuFile.setObjectName("myMenuFile") self.myMenuView = QtWidgets.QMenu(self.myMenubar) self.myMenuView.setAccessibleName("myMenuView") self.myMenuView.setObjectName("myMenuView") self.myMenuInfo = QtWidgets.QMenu(self.myMenubar) self.myMenuInfo.setAccessibleName("myMenuInfo") self.myMenuInfo.setObjectName("myMenuInfo") myMainWindow.setMenuBar(self.myMenubar) self.myStatusbar = QtWidgets.QStatusBar(myMainWindow) self.myStatusbar.setAccessibleName("myStatusbar") self.myStatusbar.setObjectName("myStatusbar") myMainWindow.setStatusBar(self.myStatusbar) self.myActionStartTesting = QtWidgets.QAction(myMainWindow) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(":/Icons/Images/Open Folder-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionStartTesting.setIcon(icon3) self.myActionStartTesting.setObjectName("myActionStartTesting") self.myActionCreateDocumentation = QtWidgets.QAction(myMainWindow) icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap(":/Icons/Images/Document-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionCreateDocumentation.setIcon(icon4) self.myActionCreateDocumentation.setObjectName("myActionCreateDocumentation") self.myActionNewTestInstruction = QtWidgets.QAction(myMainWindow) icon5 = QtGui.QIcon() icon5.addPixmap(QtGui.QPixmap(":/Icons/Images/Create New-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionNewTestInstruction.setIcon(icon5) self.myActionNewTestInstruction.setObjectName("myActionNewTestInstruction") self.myActionEditTestInstruction = QtWidgets.QAction(myMainWindow) icon6 = QtGui.QIcon() icon6.addPixmap(QtGui.QPixmap(":/Icons/Images/Edit Image-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionEditTestInstruction.setIcon(icon6) self.myActionEditTestInstruction.setObjectName("myActionEditTestInstruction") self.myActionQuit = QtWidgets.QAction(myMainWindow) icon7 = QtGui.QIcon() icon7.addPixmap(QtGui.QPixmap(":/Icons/Images/Close Window-80.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionQuit.setIcon(icon7) self.myActionQuit.setObjectName("myActionQuit") self.myActionFullscreenTi = QtWidgets.QAction(myMainWindow) icon8 = QtGui.QIcon() icon8.addPixmap(QtGui.QPixmap(":/Icons/Images/To Do-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionFullscreenTi.setIcon(icon8) self.myActionFullscreenTi.setObjectName("myActionFullscreenTi") self.myActionFullscreenSpc = QtWidgets.QAction(myMainWindow) icon9 = QtGui.QIcon() icon9.addPixmap(QtGui.QPixmap(":/Icons/Images/Line Chart-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionFullscreenSpc.setIcon(icon9) self.myActionFullscreenSpc.setObjectName("myActionFullscreenSpc") self.myActionFullscreenDeviation = QtWidgets.QAction(myMainWindow) icon10 = QtGui.QIcon() icon10.addPixmap(QtGui.QPixmap(":/Icons/Images/Normal Distribution Histogram-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionFullscreenDeviation.setIcon(icon10) self.myActionFullscreenDeviation.setObjectName("myActionFullscreenDeviation") self.myActionLicense = QtWidgets.QAction(myMainWindow) icon11 = QtGui.QIcon() icon11.addPixmap(QtGui.QPixmap(":/Icons/Images/Diploma 1-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionLicense.setIcon(icon11) self.myActionLicense.setObjectName("myActionLicense") self.myActionContact = QtWidgets.QAction(myMainWindow) icon12 = QtGui.QIcon() icon12.addPixmap(QtGui.QPixmap(":/Icons/Images/Address Book-100.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionContact.setIcon(icon12) self.myActionContact.setObjectName("myActionContact") self.myActionResultlist = QtWidgets.QAction(myMainWindow) self.myActionResultlist.setIcon(icon4) self.myActionResultlist.setObjectName("myActionResultlist") self.myActionStartTestingScanner = QtWidgets.QAction(myMainWindow) icon13 = QtGui.QIcon() icon13.addPixmap(QtGui.QPixmap(":/Icons/Images/Barcode-96.png"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.myActionStartTestingScanner.setIcon(icon13) self.myActionStartTestingScanner.setObjectName("myActionStartTestingScanner") self.myMenuFile.addAction(self.myActionStartTesting) self.myMenuFile.addAction(self.myActionStartTestingScanner) self.myMenuFile.addSeparator() self.myMenuFile.addAction(self.myActionNewTestInstruction) self.myMenuFile.addAction(self.myActionEditTestInstruction) self.myMenuFile.addSeparator() self.myMenuFile.addAction(self.myActionQuit) self.myMenuView.addAction(self.myActionFullscreenTi) self.myMenuView.addAction(self.myActionFullscreenSpc) self.myMenuView.addAction(self.myActionFullscreenDeviation) self.myMenuView.addAction(self.myActionResultlist) self.myMenuInfo.addAction(self.myActionLicense) self.myMenuInfo.addAction(self.myActionContact) self.myMenubar.addAction(self.myMenuFile.menuAction()) self.myMenubar.addAction(self.myMenuView.menuAction()) self.myMenubar.addAction(self.myMenuInfo.menuAction()) self.retranslateUi(myMainWindow) self.myStackedWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(myMainWindow) myMainWindow.setTabOrder(self.myLineEditActualValue, self.myLineEditSerialNo) myMainWindow.setTabOrder(self.myLineEditSerialNo, self.myToolButtonOk) myMainWindow.setTabOrder(self.myToolButtonOk, self.myToolButtonNok) myMainWindow.setTabOrder(self.myToolButtonNok, self.myPushButtonForward) myMainWindow.setTabOrder(self.myPushButtonForward, self.myPushButtonBackward) myMainWindow.setTabOrder(self.myPushButtonBackward, self.myPushButtonZoom) myMainWindow.setTabOrder(self.myPushButtonZoom, self.myPushButtonVideo) myMainWindow.setTabOrder(self.myPushButtonVideo, self.myTableViewCharacteristics) myMainWindow.setTabOrder(self.myTableViewCharacteristics, self.myScrollAreaCharacteristics) myMainWindow.setTabOrder(self.myScrollAreaCharacteristics, self.myScrollAreaSetpoint) def retranslateUi(self, myMainWindow): _translate = QtCore.QCoreApplication.translate self.myGroupBoxSpc.setTitle(_translate("myMainWindow", "SPC")) self.myFrameSpc.setAccessibleName(_translate("myMainWindow", "myFrameSpc")) self.myGroupBoxSetpoint.setTitle(_translate("myMainWindow", "Soll")) self.myGroupBoxCharacteristics.setTitle(_translate("myMainWindow", "Merkmale")) self.myGroupBoxDeviation.setTitle(_translate("myMainWindow", "Verteilung")) self.myFrameDeviation.setAccessibleName(_translate("myMainWindow", "myFrameDeviation")) self.myGroupBoxActualValue.setTitle(_translate("myMainWindow", "Ist")) self.myToolButtonOk.setText(_translate("myMainWindow", "i.O.")) self.myLineEditSerialNo.setPlaceholderText(_translate("myMainWindow", "...")) self.myLabelSerialNo.setText(_translate("myMainWindow", "Serien Nr.:")) self.myToolButtonNok.setText(_translate("myMainWindow", "n.i.O.")) self.myLineEditActualValue.setPlaceholderText(_translate("myMainWindow", "...")) self.myLabelActualValue.setText(_translate("myMainWindow", "Messwert:")) self.myLabelActualValuePreview.setAccessibleName(_translate("myMainWindow", "myLabelActualValuePreview")) self.myGroupBoxImage.setTitle(_translate("myMainWindow", "Bilder")) self.myPushButtonBackward.setText(_translate("myMainWindow", "Zurück")) self.myPushButtonZoom.setText(_translate("myMainWindow", "Zoom")) self.myPushButtonForward.setText(_translate("myMainWindow", "Vor")) self.myPushButtonVideo.setText(_translate("myMainWindow", "Video")) self.myGroupBoxDescription.setTitle(_translate("myMainWindow", "Beschreibung")) self.myGroupBoxSpcFull.setAccessibleName(_translate("myMainWindow", "myGroupBoxSpcFull")) self.myGroupBoxSpcFull.setTitle(_translate("myMainWindow", "SPC Fullscreen")) self.myFrameSpcFull.setAccessibleName(_translate("myMainWindow", "myFrameSpcFull")) self.myGroupBoxStatisticSpc.setAccessibleName(_translate("myMainWindow", "myGroupBoxStatisticSpc")) self.myGroupBoxStatisticSpc.setTitle(_translate("myMainWindow", "Statistik")) self.myScrollAreaGroupBoxStatisticSpc.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxStatisticSpc")) self.myLabelStatisticSpcCpk.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcCpk")) self.myLabelStatisticSpcCpk.setText(_translate("myMainWindow", "Cpk:")) self.myLabelSpcCpkValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcCpkValue")) self.myLabelSpcCpkValue.setText(_translate("myMainWindow", "...")) self.myLabelStatisticSpcAverage.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcAverage")) self.myLabelStatisticSpcAverage.setText(_translate("myMainWindow", "Mittelwert (µ):")) self.myLabelSpcDeivationValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcDeivationValue")) self.myLabelSpcDeivationValue.setText(_translate("myMainWindow", "...")) self.myLabelStatistcSpcDeviation.setAccessibleName(_translate("myMainWindow", "myLabelStatistcSpcDeviation")) self.myLabelStatistcSpcDeviation.setText(_translate("myMainWindow", "Standardabweichung (σ):")) self.myLabelSpcAverageValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcAverageValue")) self.myLabelSpcAverageValue.setText(_translate("myMainWindow", "...")) self.myLabelStatisticSpcAndSicSigma.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcAndSicSigma")) self.myLabelStatisticSpcAndSicSigma.setText(_translate("myMainWindow", "µ + 6σ:")) self.myLabelStatisticSpcMinusSixSigma.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcMinusSixSigma")) self.myLabelStatisticSpcMinusSixSigma.setText(_translate("myMainWindow", "µ - 6σ:")) self.myLabelSpcMinusSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcMinusSixSigmaValue")) self.myLabelSpcMinusSixSigmaValue.setText(_translate("myMainWindow", "...")) self.myLabelSpcAndSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcAndSixSigmaValue")) self.myLabelSpcAndSixSigmaValue.setText(_translate("myMainWindow", "...")) self.myLabelStatisticSpcUtl.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcUtl")) self.myLabelStatisticSpcUtl.setText(_translate("myMainWindow", "Obere Toleranzgrenze:")) self.myLabelStatisticSpcLtl.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcLtl")) self.myLabelStatisticSpcLtl.setText(_translate("myMainWindow", "Untere Toleranzgrenze:")) self.myLabelSpcUtlValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcUtlValue")) self.myLabelSpcUtlValue.setText(_translate("myMainWindow", "...")) self.myLabelSpcLtlValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcLtlValue")) self.myLabelSpcLtlValue.setText(_translate("myMainWindow", "...")) self.myLabelStatisticSpcUil.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcUil")) self.myLabelStatisticSpcUil.setText(_translate("myMainWindow", "Obere Eingriffsgrenze:")) self.myLabelStatisticSpcLil.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcLil")) self.myLabelStatisticSpcLil.setText(_translate("myMainWindow", "Untere Eingriffsgrenze:")) self.myLabelSpcUilValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcUilValue")) self.myLabelSpcUilValue.setText(_translate("myMainWindow", "...")) self.myLabelSpcLilValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcLilValue")) self.myLabelSpcLilValue.setText(_translate("myMainWindow", "...")) self.myFrameVlineStatisticSpc.setAccessibleName(_translate("myMainWindow", "myFrameVlineStatisticSpc")) self.myLabelStatisticSpcPpm.setAccessibleName(_translate("myMainWindow", "myLabelStatisticSpcPpm")) self.myLabelStatisticSpcPpm.setText(_translate("myMainWindow", "PPM:")) self.myLabelSpcPpmValue.setAccessibleName(_translate("myMainWindow", "myLabelSpcPpmValue")) self.myLabelSpcPpmValue.setText(_translate("myMainWindow", "...")) self.myGroupBoxCharacteristicsSpc.setAccessibleName(_translate("myMainWindow", "myGroupBoxCharacteristicsSpc")) self.myGroupBoxCharacteristicsSpc.setTitle(_translate("myMainWindow", "Merkmale")) self.myScrollAreaGroupBoyCharacteristicsSpc.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoyCharacteristicsSpc")) self.myScrollAreaWidgetContentsSpc.setAccessibleName(_translate("myMainWindow", "myScrollAreaWidgetContentsSpc")) self.myTableViewCharacteristicsPageSpc.setAccessibleName(_translate("myMainWindow", "myTableViewCharacteristicsPageSpc")) self.myLabelTestInstructionNamePageSpc.setAccessibleName(_translate("myMainWindow", "myLabelTestInstructionNamePageSpc")) self.myLabelTestInstructionNamePageSpc.setText(_translate("myMainWindow", "...")) self.myGroupBoxDeviationFull.setAccessibleName(_translate("myMainWindow", "myGroupBoxDeviationFull")) self.myGroupBoxDeviationFull.setTitle(_translate("myMainWindow", "Verteilung Fullscreen")) self.myFrameDeviationFull.setAccessibleName(_translate("myMainWindow", "myFrameDeviationFull")) self.myGroupBoxStatisticDeviation.setAccessibleName(_translate("myMainWindow", "myGroupBoxStatisticDeviation")) self.myGroupBoxStatisticDeviation.setTitle(_translate("myMainWindow", "Statistik")) self.myScrollAreaGroupBoxStatisticDeviation.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxStatisticDeviation")) self.myScrollAreaWidgetContentsDeviation_2.setAccessibleName(_translate("myMainWindow", "myScrollAreaWidgetContentsDeviation")) self.myLabelDeviationLtlValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationLtlValue")) self.myLabelDeviationLtlValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationLilValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationLilValue")) self.myLabelDeviationLilValue.setText(_translate("myMainWindow", "...")) self.myFrameVlineStatisticDeviation.setAccessibleName(_translate("myMainWindow", "myFrameVlineStatisticDeviation")) self.myLabelDeviationCpkValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationCpkValue")) self.myLabelDeviationCpkValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationDeivationValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationDeivationValue")) self.myLabelDeviationDeivationValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationUtlValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUtlValue")) self.myLabelDeviationUtlValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationPpmValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationPpmValue")) self.myLabelDeviationPpmValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationAverageValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAverageValue")) self.myLabelDeviationAverageValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationAndSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAndSixSigmaValue")) self.myLabelDeviationAndSixSigmaValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationMinusSixSigmaValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationMinusSixSigmaValue")) self.myLabelDeviationMinusSixSigmaValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationUilValue.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUilValue")) self.myLabelDeviationUilValue.setText(_translate("myMainWindow", "...")) self.myLabelDeviationCpk.setAccessibleName(_translate("myMainWindow", "myLabelDeviationCpk")) self.myLabelDeviationCpk.setText(_translate("myMainWindow", "Cpk:")) self.myLabelDeviationAverage.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAverage")) self.myLabelDeviationAverage.setText(_translate("myMainWindow", "Mittelwert (µ):")) self.myLabelDeviationDeivation.setAccessibleName(_translate("myMainWindow", "myLabelDeviationDeivation")) self.myLabelDeviationDeivation.setText(_translate("myMainWindow", "Standardabweichung (σ):")) self.myLabelDeviationAndSixSigma.setAccessibleName(_translate("myMainWindow", "myLabelDeviationAndSixSigma")) self.myLabelDeviationAndSixSigma.setText(_translate("myMainWindow", "µ + 6σ:")) self.myLabelDeviationMinusSixSigma.setAccessibleName(_translate("myMainWindow", "myLabelDeviationMinusSixSigma")) self.myLabelDeviationMinusSixSigma.setText(_translate("myMainWindow", "µ - 6σ:")) self.myLabelDeviationPpm.setAccessibleName(_translate("myMainWindow", "myLabelDeviationPpm")) self.myLabelDeviationPpm.setText(_translate("myMainWindow", "PPM:")) self.myLabelDeviationUtl.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUtl")) self.myLabelDeviationUtl.setText(_translate("myMainWindow", "Obere Toleranzgrenze:")) self.myLabelDeviationLtl.setText(_translate("myMainWindow", "Untere Toleranzgrenze:")) self.myLabelDeviationUil.setAccessibleName(_translate("myMainWindow", "myLabelDeviationUil")) self.myLabelDeviationUil.setText(_translate("myMainWindow", "Obere Eingriffsgrenze:")) self.myLabelDeviationLil.setAccessibleName(_translate("myMainWindow", "myLabelDeviationLil")) self.myLabelDeviationLil.setText(_translate("myMainWindow", "Untere Eingriffsgrenze:")) self.myGroupBoxCharacteristicsDeviation.setAccessibleName(_translate("myMainWindow", "myGroupBoxCharacteristicsDeviation")) self.myGroupBoxCharacteristicsDeviation.setTitle(_translate("myMainWindow", "Merkmale")) self.myScrollAreaGroupBoxCharacteristicsDeviation.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxCharacteristicsDeviation")) self.myScrollAreaWidgetContentsDeviation.setAccessibleName(_translate("myMainWindow", "myScrollAreaWidgetContentsDeviation")) self.myTableViewCharacteristicsDeviationFull.setAccessibleName(_translate("myMainWindow", "myTableViewCharacteristicsDeviationFull")) self.myLabelTestInstructionNamePageDeviation.setAccessibleName(_translate("myMainWindow", "myLabelTestInstructionNamePageDeviation")) self.myLabelTestInstructionNamePageDeviation.setText(_translate("myMainWindow", "...")) self.myPageResult.setAccessibleName(_translate("myMainWindow", "myPageResult")) self.myGroupBoxCharacteristicsResult.setAccessibleName(_translate("myMainWindow", "myGroupBoxCharacteristicsResult")) self.myGroupBoxCharacteristicsResult.setTitle(_translate("myMainWindow", "Merkmale")) self.myScrollAreaCharacteristicsPageResult.setAccessibleName(_translate("myMainWindow", "myScrollAreaCharacteristicsPageResult")) self.myScrollAreaCharacteristicsPageResultWidgetContents.setAccessibleName(_translate("myMainWindow", "myScrollAreaCharacteristicsPageResultWidgetContents")) self.myLabelTestInstructionNamePageResult.setAccessibleName(_translate("myMainWindow", "myLabelTestInstructionNamePageResult")) self.myLabelTestInstructionNamePageResult.setText(_translate("myMainWindow", "...")) self.myTableViewCharacteristicsPageResult.setAccessibleName(_translate("myMainWindow", "myTableViewCharacteristicsPageResult")) self.myGroupBoxResult.setAccessibleName(_translate("myMainWindow", "myGroupBoxResult")) self.myGroupBoxResult.setTitle(_translate("myMainWindow", "Ergebnisliste")) self.myScrollAreaGroupBoxResult.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxResult")) self.myScrollAreaGroupBoxResultWidgetContents.setAccessibleName(_translate("myMainWindow", "myScrollAreaGroupBoxResultWidgetContents")) self.myTableViewResult.setAccessibleName(_translate("myMainWindow", "myTableViewResult")) self.myTextBrowserLicense.setAccessibleName(_translate("myMainWindow", "myTextBrowserLicense")) self.myTextBrowserContact.setAccessibleName(_translate("myMainWindow", "myTextBrowserContact")) self.myMenuFile.setTitle(_translate("myMainWindow", "Datei")) self.myMenuView.setTitle(_translate("myMainWindow", "Ansicht")) self.myMenuInfo.setTitle(_translate("myMainWindow", "Info")) self.myActionStartTesting.setText(_translate("myMainWindow", "Prüfplan öffnen - Manuell")) self.myActionStartTesting.setStatusTip(_translate("myMainWindow", "Die Prüfungen mit einem vorhandenen Prüfplan beginnen")) self.myActionStartTesting.setShortcut(_translate("myMainWindow", "Ctrl+O")) self.myActionCreateDocumentation.setText(_translate("myMainWindow", "Ergebnis dokumentieren")) self.myActionCreateDocumentation.setStatusTip(_translate("myMainWindow", "Die Ergebnisse in einem Prüfprotokoll dokumentieren")) self.myActionCreateDocumentation.setShortcut(_translate("myMainWindow", "Ctrl+I")) self.myActionNewTestInstruction.setText(_translate("myMainWindow", "Prüfplan erstellen")) self.myActionNewTestInstruction.setStatusTip(_translate("myMainWindow", "Einen neuen Prüfplan erstellen")) self.myActionNewTestInstruction.setShortcut(_translate("myMainWindow", "Ctrl+N")) self.myActionEditTestInstruction.setText(_translate("myMainWindow", "Prüfplan bearbeiten")) self.myActionEditTestInstruction.setStatusTip(_translate("myMainWindow", "Einen vorhandenen Prüfplan bearbeiten")) self.myActionEditTestInstruction.setShortcut(_translate("myMainWindow", "Ctrl+E")) self.myActionQuit.setText(_translate("myMainWindow", "Beenden")) self.myActionQuit.setStatusTip(_translate("myMainWindow", "Das Programm beenden")) self.myActionQuit.setShortcut(_translate("myMainWindow", "Ctrl+Q")) self.myActionFullscreenTi.setText(_translate("myMainWindow", "Prüfplan")) self.myActionFullscreenTi.setStatusTip(_translate("myMainWindow", "Den Prüfplan im Vollbildmodus anzeigen")) self.myActionFullscreenTi.setShortcut(_translate("myMainWindow", "Ctrl+T")) self.myActionFullscreenSpc.setText(_translate("myMainWindow", "SPC Diagramm")) self.myActionFullscreenSpc.setStatusTip(_translate("myMainWindow", "Das SPC Diagramm im Vollbildmodus anzeigen")) self.myActionFullscreenSpc.setShortcut(_translate("myMainWindow", "Ctrl+S")) self.myActionFullscreenDeviation.setText(_translate("myMainWindow", "Verteilungsdiagramm")) self.myActionFullscreenDeviation.setStatusTip(_translate("myMainWindow", "Das Verteilungsdiagramm im Vollbildmodus anzeigen")) self.myActionFullscreenDeviation.setShortcut(_translate("myMainWindow", "Ctrl+D")) self.myActionLicense.setText(_translate("myMainWindow", "Lizenz")) self.myActionLicense.setStatusTip(_translate("myMainWindow", "Die Lizenzvereinbarung anzeigen")) self.myActionContact.setText(_translate("myMainWindow", "Kontakt")) self.myActionContact.setStatusTip(_translate("myMainWindow", "Die Kontaktdaten anzeigen")) self.myActionResultlist.setText(_translate("myMainWindow", "Ergebnisliste")) self.myActionResultlist.setStatusTip(_translate("myMainWindow", "Die Ergebnisliste anzeigen")) self.myActionResultlist.setShortcut(_translate("myMainWindow", "Ctrl+R")) self.myActionStartTestingScanner.setText(_translate("myMainWindow", "Prüfplan öffnen - Scanner")) self.myActionStartTestingScanner.setStatusTip(_translate("myMainWindow", "Die Prüfungen mit einem vorhandenen Prüfplan beginnen, mit Scannerunterstützung")) self.myActionStartTestingScanner.setShortcut(_translate("myMainWindow", "Ctrl+B")) import InResources_rc
gpl-3.0
1,980,948,443,473,866,500
71.64753
497
0.762483
false
3.558731
true
false
false
itdxer/neupy
examples/competitive/sofm_compare_weight_init.py
1
1754
from itertools import product import matplotlib.pyplot as plt from neupy import algorithms, utils, init from utils import plot_2d_grid, make_circle, make_elipse, make_square plt.style.use('ggplot') utils.reproducible() if __name__ == '__main__': GRID_WIDTH = 4 GRID_HEIGHT = 4 datasets = [ make_square(), make_circle(), make_elipse(corr=0.7), ] configurations = [{ 'weight_init': init.Uniform(0, 1), 'title': 'Random uniform initialization', }, { 'weight_init': 'sample_from_data', 'title': 'Sampled from the data', }, { 'weight_init': 'init_pca', 'title': 'Initialize with PCA', }] plt.figure(figsize=(15, 15)) plt.title("Compare weight initialization methods for SOFM") red, blue = ('#E24A33', '#348ABD') n_columns = len(configurations) n_rows = len(datasets) index = 1 for data, conf in product(datasets, configurations): sofm = algorithms.SOFM( n_inputs=2, features_grid=(GRID_HEIGHT, GRID_WIDTH), verbose=True, shuffle_data=True, weight=conf['weight_init'], learning_radius=8, reduce_radius_after=5, std=2, reduce_std_after=5, step=0.3, reduce_step_after=5, ) if not sofm.initialized: sofm.init_weights(data) plt.subplot(n_rows, n_columns, index) plt.title(conf['title']) plt.scatter(*data.T, color=blue, alpha=0.05) plt.scatter(*sofm.weight, color=red) weights = sofm.weight.reshape((2, GRID_HEIGHT, GRID_WIDTH)) plot_2d_grid(weights, color=red) index += 1 plt.show()
mit
8,403,345,659,887,349,000
22.702703
69
0.562144
false
3.543434
false
false
false
alphagov/stagecraft
stagecraft/libs/mass_update/copy_dataset_with_new_mapping.py
1
5218
import reversion from performanceplatform.client import DataSet as client from stagecraft.apps.datasets.models import DataGroup, DataSet, DataType from django.conf import settings INTERNAL_KEY = [ "_day_start_at", "_hour_start_at", "_week_start_at", "_month_start_at", "_quarter_start_at", "_updated_at"] # should pass in whole mapping? @reversion.create_revision() def migrate_data_set(old_attributes, changed_attributes, data_mapping): print("getting existing dataset") existing_data_set = get_existing_data_set(old_attributes['data_group'], old_attributes['data_type']) if not existing_data_set: print("no existing dataset found, skipping") return False new_data_set_attributes = get_new_attributes( serialize_for_update(existing_data_set), changed_attributes) print("got new attributes {}".format(new_data_set_attributes)) print("creating new dataset with attributes") new_data_set = get_or_create_new_data_set(new_data_set_attributes) print("getting old data") old_data = get_old_data(old_attributes['data_group'], old_attributes['data_type']) print("converting old data") new_data = convert_old_data(old_data, data_mapping) serialized_new_data_set = new_data_set.serialize() print("posting data {} to dataset {}".format(new_data, serialized_new_data_set)) post_new_data(serialized_new_data_set['data_group'], serialized_new_data_set['data_type'], serialized_new_data_set['bearer_token'], new_data) def serialize_for_update(data_set): serialized_data_set = data_set.serialize() serialized_data_set['auto_ids'] = data_set.auto_ids serialized_data_set['upload_filters'] = data_set.upload_filters return serialized_data_set def get_existing_data_set(data_group_name, data_type_name): data_type = DataType.objects.filter( name=data_type_name).first() data_group = DataGroup.objects.filter( name=data_group_name).first() if not data_group or not data_type: return None return DataSet.objects.filter(data_type=data_type, data_group=data_group).first() def get_new_attributes(existing_attributes, changed_attributes): """ >>> existing_attributes = {'a': 1, 'b': 2, 'c': 3} >>> changed_attributes = {'a': 6, 'c': 'x,y'} >>> get_new_attributes(existing_attributes,changed_attributes) \ == {'b': 2, 'c': 'x,y', 'a': 6} True """ new_attributes = existing_attributes.copy() new_attributes.update(changed_attributes) return new_attributes def get_or_create_new_data_set(new_attributes): (data_type, new) = DataType.objects.get_or_create( name=new_attributes.pop('data_type')) (data_group, new) = DataGroup.objects.get_or_create( name=new_attributes.pop('data_group')) (obj, new) = DataSet.objects.get_or_create( data_type=data_type, data_group=data_group) new_attributes['data_type'] = data_type new_attributes['data_group'] = data_group del new_attributes['schema'] del new_attributes['name'] data_set_to_update_queryset = DataSet.objects.filter(name=obj.name) data_set_to_update_queryset.update(**new_attributes) return data_set_to_update_queryset.first() def get_qualified_backdrop_url(): return settings.BACKDROP_WRITE_URL + '/data' def get_old_data(data_group_name, data_type_name): data_set_client = client.from_group_and_type(get_qualified_backdrop_url(), data_group_name, data_type_name) return data_set_client.get().json()['data'] def apply_new_key_mappings(document, key_mapping): for key, val in document.items(): if key in key_mapping: document.pop(key) document[key_mapping[key]] = val elif key in INTERNAL_KEY: del document[key] else: document[key] = val return document def apply_new_values(document, value_mapping): # we need to convert counts to i - they are floats currently for key, val in document.items(): if val in value_mapping: document[key] = value_mapping[val] if key == 'count': document[key] = int(val) return document def convert_old_data(old_data, data_mapping): new_data = [] key_mapping = data_mapping['key_mapping'] value_mapping = data_mapping['value_mapping'] for document in old_data: doc = apply_new_values( apply_new_key_mappings(document, key_mapping), value_mapping) new_data.append(doc) return new_data def post_new_data(data_group_name, data_type_name, bearer_token, data): data_set_client = client.from_group_and_type(get_qualified_backdrop_url(), data_group_name, data_type_name, token=bearer_token) return data_set_client.post(data)
mit
718,580,277,900,791,000
36.271429
78
0.61422
false
3.703336
false
false
false
64studio/pdk
pdk/xml_legacy/sax/writer.py
1
18896
"""SAX document handlers that support output generation of XML, SGML, and XHTML. This module provides three different groups of objects: the actual SAX document handlers that drive the output, DTD information containers, and syntax descriptors (of limited public use in most cases). Output Drivers -------------- The output drivers conform to the SAX C<DocumentHandler> protocol. They can be used anywhere a C<DocumentHandler> is used. Two drivers are provided: a `basic' driver which creates a fairly minimal output without much intelligence, and a `pretty-printing' driver that performs pretty-printing with nice indentation and the like. Both can optionally make use of DTD information and syntax objects. DTD Information Containers -------------------------- Each DTD information object provides an attribute C<syntax> which describes the expected output syntax; an alternate can be provided to the output drivers if desired. Syntax Descriptors ------------------ Syntax descriptor objects provide several attributes which describe the various lexical components of XML & SGML markup. The attributes have names that reflect the shorthand notation from the SGML world, but the values are strings which give the appropriate characters for the markup language being described. The one addition is the C<empty_stagc> attribute which should be used to end the start tag of elements which have no content. This is needed to properly support XML and XHTML. """ __version__ = '$Revision: 1.9 $' import string import pdk.xml_legacy.parsers.xmlproc.dtdparser import pdk.xml_legacy.parsers.xmlproc.xmlapp from saxutils import escape DEFAULT_LINELENGTH = 74 class Syntax: com = "--" # comment start or end cro = "&#" # character reference open refc = ";" # reference close dso = "[" # declaration subset open dsc = "]" # declaration subset close ero = "&" # entity reference open lit = '"' # literal start or end lit_quoted = '&quot;' # quoted literal lita = "'" # literal start or end (alternative) mdo = "<!" # markup declaration open mdc = ">" # markup declaration close msc = "]]" # marked section close pio = "<?" # processing instruciton open stago = "<" # start tag open etago = "</" # end tag open tagc = ">" # tag close vi = "=" # value indicator def __init__(self): if self.__class__ is Syntax: raise RuntimeError, "Syntax must be subclassed to be used!" class SGMLSyntax(Syntax): empty_stagc = ">" pic = ">" # processing instruction close net = "/" # null end tag class XMLSyntax(Syntax): empty_stagc = "/>" pic = "?>" # processing instruction close net = None # null end tag not supported class XHTMLSyntax(XMLSyntax): empty_stagc = " />" class DoctypeInfo: syntax = XMLSyntax() fpi = None sysid = None def __init__(self): self.__empties = {} self.__elements_only = {} self.__attribs = {} def is_empty(self, gi): return self.__empties.has_key(gi) def get_empties_list(self): return self.__empties.keys() def has_element_content(self, gi): return self.__elements_only.has_key(gi) def get_element_containers_list(self): return self.__elements_only.keys() def get_attributes_list(self, gi): return self.__attribs.get(gi, {}).keys() def get_attribute_info(self, gi, attr): return self.__attribs[gi][attr] def add_empty(self, gi): self.__empties[gi] = 1 def add_element_container(self, gi): self.__elements_only[gi] = gi def add_attribute_defn(self, gi, attr, type, decl, default): try: d = self.__attribs[gi] except KeyError: d = self.__attribs[gi] = {} if not d.has_key(attr): d[attr] = (type, decl, default) else: print "<%s> attribute %s already defined" % (gi, attr) def load_pubtext(self, pubtext): raise NotImplementedError, "sublasses must implement load_pubtext()" class _XMLDTDLoader(pdk.xml_legacy.parsers.xmlproc.xmlapp.DTDConsumer): def __init__(self, info, parser): self.info = info xml.parsers.xmlproc.xmlapp.DTDConsumer.__init__(self, parser) self.new_attribute = info.add_attribute_defn def new_element_type(self, gi, model): if model[0] == "|" and model[1][0] == ("#PCDATA", ""): # no action required pass elif model == ("", [], ""): self.info.add_empty(gi) else: self.info.add_element_container(gi) class XMLDoctypeInfo(DoctypeInfo): def load_pubtext(self, sysid): parser = pdk.xml_legacy.parsers.xmlproc.dtdparser.DTDParser() loader = _XMLDTDLoader(self, parser) parser.set_dtd_consumer(loader) parser.parse_resource(sysid) class XHTMLDoctypeInfo(XMLDoctypeInfo): # Bogus W3C cruft requires the extra space when terminating empty elements. syntax = XHTMLSyntax() class SGMLDoctypeInfo(DoctypeInfo): syntax = SGMLSyntax() import re __element_prefix_search = re.compile("<!ELEMENT", re.IGNORECASE).search __element_prefix_len = len("<!ELEMENT") del re def load_pubtext(self, sysid): # # Really should build a proper SGML DTD parser! # pubtext = open(sysid).read() m = self.__element_prefix_search(pubtext) while m: pubtext = pubtext[m.end():] if pubtext and pubtext[0] in string.whitespace: pubtext = string.lstrip(pubtext) else: continue gi, pubtext = string.split(pubtext, None, 1) pubtext = string.lstrip(pubtext) # maybe need to remove/collect tag occurance specifiers # ... raise NotImplementedError, "implementation incomplete" # m = self.__element_prefix_search(pubtext) class XmlWriter: """Basic XML output handler.""" def __init__(self, fp, standalone=None, dtdinfo=None, syntax=None, linelength=None, encoding='iso-8859-1'): self._offset = 0 self._packing = 1 self._flowing = 1 self._write = fp.write self._dtdflowing = None self._prefix = '' self._encoding = encoding self.__stack = [] self.__lang = None self.__pending_content = 0 self.__pending_doctype = 1 self.__standalone = standalone self.__dtdinfo = dtdinfo if syntax is None: if dtdinfo: syntax = dtdinfo.syntax else: syntax = XMLSyntax() self.__syntax = syntax self.indentation = 0 self.indentEndTags = 0 if linelength is None: self.lineLength = DEFAULT_LINELENGTH else: self.lineLength = linelength def setDocumentLocator(self, locator): self.locator = locator def startDocument(self): if self.__syntax.pic == "?>": lit = self.__syntax.lit s = '%sxml version=%s1.0%s encoding%s%s%s%s' % ( self.__syntax.pio, lit, lit, self.__syntax.vi, lit, self._encoding, lit) if self.__standalone: s = '%s standalone%s%s%s%s' % ( s, self.__syntax.vi, lit, self.__standalone, lit) self._write("%s%s\n" % (s, self.__syntax.pic)) def endDocument(self): if self.__stack: raise RuntimeError, "open element stack cannot be empty on close" def startElement(self, tag, attrs={}): if self.__pending_doctype: self.handle_doctype(tag) self._check_pending_content() self.__pushtag(tag) self.__check_flowing(tag, attrs) if attrs.has_key("xml:lang"): self.__lang = attrs["xml:lang"] del attrs["xml:lang"] if self._packing: prefix = "" elif self._flowing: prefix = self._prefix[:-self.indentation] else: prefix = "" stag = "%s%s%s" % (prefix, self.__syntax.stago, tag) prefix = "%s %s" % (prefix, (len(tag) * " ")) lit = self.__syntax.lit lita = self.__syntax.lita vi = self.__syntax.vi a = '' if self._flowing != self.__stack[-1][0]: if self._dtdflowing is not None \ and self._flowing == self._dtdflowing: pass else: a = ' xml:space%s%s%s%s' \ % (vi, lit, ["default", "preserve"][self._flowing], lit) if self.__lang != self.__stack[-1][1]: a = '%s xml:lang%s%s%s%s' % (a, vi, lit, self.lang, lit) line = stag + a self._offset = self._offset + len(line) a = '' for k, v in attrs.items(): if v is None: continue v = str(v) if string.find(v, lit) == -1: a = ' %s%s%s%s%s' % (k, vi, lit, escape(str(v)), lit) elif string.find(v, lita) == -1: a = ' %s%s%s%s%s' % (k, vi, lita, escape(str(v)), lita) else: a = ' %s%s%s%s%s' % (k, vi, lit, escape(str(v), {lit:self.__syntax.lit_quoted}), lita) if (self._offset + len(a)) > self.lineLength: self._write(line + "\n") line = prefix + a self._offset = len(line) else: line = line + a self._offset = self._offset + len(a) self._write(line) self.__pending_content = 1 if ( self.__dtdinfo and not (self.__dtdinfo.has_element_content(tag) or self.__dtdinfo.is_empty(tag))): self._packing = 1 def endElement(self, tag): if self.__pending_content: if self._flowing: self._write(self.__syntax.empty_stagc) if self._packing: self._offset = self._offset \ + len(self.__syntax.empty_stagc) else: self._write("\n") self._offset = 0 else: self._write(self.__syntax.empty_stagc) self._offset = self._offset + len(self.__syntax.empty_stagc) self.__pending_content = 0 self.__poptag(tag) return depth = len(self.__stack) if depth == 1 or self._packing or not self._flowing: prefix = '' else: prefix = self._prefix[:-self.indentation] \ + (" " * self.indentEndTags) self.__poptag(tag) self._write("%s%s%s%s" % ( prefix, self.__syntax.etago, tag, self.__syntax.tagc)) if self._packing: self._offset = self._offset + len(tag) + 3 else: self._write("\n") self._offset = 0 def characters(self, data, start, length): data = data[start: start+length] if data: self._check_pending_content() data = escape(data) if "\n" in data: p = string.find(data, "\n") self._offset = len(data) - (p + 1) else: self._offset = self._offset + len(data) self._check_pending_content() self._write(data) def comment(self, data, start, length): data = data[start: start+length] self._check_pending_content() s = "%s%s%s%s%s" % (self.__syntax.mdo, self.__syntax.com, data, self.__syntax.com, self.__syntax.mdc) p = string.rfind(s, "\n") if self._packing: if p >= 0: self._offset = len(s) - (p + 1) else: self._offset = self._offset + len(s) else: self._write("%s%s\n" % (self._prefix, s)) self._offset = 0 def ignorableWhitespace(self, data, start, length): pass def processingInstruction(self, target, data): self._check_pending_content() s = "%s%s %s%s" % (self.__syntax.pio, target, data, self.__syntax.pic) prefix = self._prefix[:-self.indentation] \ + (" " * self.indentEndTags) if "\n" in s: p = string.rfind(s, "\n") if self._flowing and not self._packing: self._write(prefix + s + "\n") self._offset = 0 else: self._write(s) self._offset = len(s) - (p + 1) elif self._flowing and not self._packing: self._write(prefix + s + "\n") self._offset = 0 else: self._write(s) self._offset = self._offset + len(s) # This doesn't actually have a SAX equivalent, so we'll use it as # an internal helper. def handle_doctype(self, root): self.__pending_doctype = 0 if self.__dtdinfo: fpi = self.__dtdinfo.fpi sysid = self.__dtdinfo.sysid else: fpi = sysid = None lit = self.__syntax.lit isxml = self.__syntax.pic == "?>" if isxml and sysid: s = '%sDOCTYPE %s\n' % (self.__syntax.mdo, root) if fpi: s = s + ' PUBLIC %s%s%s\n' % (lit, fpi, lit) s = s + ' %s%s%s>\n' % (lit, sysid, lit) else: s = s + ' SYSTEM %s%s%s>\n' % (lit, sysid, lit) self._write(s) self._offset = 0 elif not isxml: s = "%sDOCTYPE %s" % (self.__syntax.mdo, root) if fpi: s = '%s\n PUBLIC %s%s%s' % (s, lit, fpi, lit) if sysid: s = '%s\n SYSTEM %s%s%s' % (s, lit, sysid, lit) self._write("%s%s\n" % (s, self.__syntax.mdc)) self._offset = 0 def handle_cdata(self, data): self._check_pending_content() # There should be a better way to generate '[CDATA[' start = self.__syntax.mdo + "[CDATA[" end = self.__syntax.msc + self.__syntax.mdc s = "%s%s%s" % (start, escape(data), end) if self._packing: if "\n" in s: rpos = string.rfind(s, "\n") self._offset = len(s) - (rpos + 1) + len(end) else: self._offset = self._offset + len(s) + len(start + end) self._write(s) else: self._offset = 0 self._write(s + "\n") # Internal helper methods. def __poptag(self, tag): state = self.__stack.pop() self._flowing, self.__lang, expected_tag, \ self._packing, self._dtdflowing = state if tag != expected_tag: raise RuntimeError, \ "expected </%s>, got </%s>" % (expected_tag, tag) self._prefix = self._prefix[:-self.indentation] def __pushtag(self, tag): self.__stack.append((self._flowing, self.__lang, tag, self._packing, self._dtdflowing)) self._prefix = self._prefix + " " * self.indentation def __check_flowing(self, tag, attrs): """Check the contents of attrs and the DTD information to determine whether the following content should be flowed. tag -- general identifier of the element being opened attrs -- attributes dictionary as reported by the parser or application This sets up both the _flowing and _dtdflowing (object) attributes. """ docspec = dtdspec = None if self.__dtdinfo: try: info = self.__dtdinfo.get_attribute_info(tag, "xml:space") except KeyError: info = None if info is not None: self._flowing = info[2] != "preserve" self._dtdflowing = self._flowing if attrs.has_key("xml:space"): self._flowing = attrs["xml:space"] != "preserve" del attrs["xml:space"] def _check_pending_content(self): if self.__pending_content: s = self.__syntax.tagc if self._flowing and not self._packing: s = s + "\n" self._offset = 0 else: self._offset = self._offset + len(s) self._write(s) self.__pending_content = 0 class PrettyPrinter(XmlWriter): """Pretty-printing XML output handler.""" def __init__(self, fp, standalone=None, dtdinfo=None, syntax=None, linelength=None, indentation=2, endtagindentation=None): XmlWriter.__init__(self, fp, standalone=standalone, dtdinfo=dtdinfo, syntax=syntax, linelength=linelength) self.indentation = indentation if endtagindentation is not None: self.indentEndTags = endtagindentation else: self.indentEndTags = indentation def characters(self, data, start, length): data = data[start: start + length] if not data: return self._check_pending_content() data = escape(data) if not self._flowing: self._write(data) return words = string.split(data) begspace = data[0] in string.whitespace endspace = words and (data[-1] in string.whitespace) prefix = self._prefix if len(prefix) > 40: prefix = " " offset = self._offset L = [] append = L.append if begspace: append(" ") offset = offset + 1 ws = "" ws_len = 0 while words: w = words[0] del words[0] if (offset + ws_len + len(w)) > self.lineLength: append("\n") append(prefix) append(w) offset = len(prefix) + len(w) else: append(ws) ws, ws_len = " ", 1 append(w) offset = offset + 1 + len(w) if endspace: append(" ") offset = offset + 1 self._offset = offset self._write(string.join(L, ""))
gpl-2.0
-2,206,020,784,340,318,500
33.418944
84
0.512701
false
3.975594
false
false
false
vkroz/kafka
tests/kafkatest/services/performance/end_to_end_latency.py
1
3028
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from kafkatest.services.performance import PerformanceService from kafkatest.utils.security_config import SecurityConfig class EndToEndLatencyService(PerformanceService): logs = { "end_to_end_latency_log": { "path": "/mnt/end-to-end-latency.log", "collect_default": True}, } def __init__(self, context, num_nodes, kafka, security_protocol, topic, num_records, consumer_fetch_max_wait=100, acks=1): super(EndToEndLatencyService, self).__init__(context, num_nodes) self.kafka = kafka self.security_config = SecurityConfig(security_protocol) self.security_protocol = security_protocol self.args = { 'topic': topic, 'num_records': num_records, 'consumer_fetch_max_wait': consumer_fetch_max_wait, 'acks': acks } def _worker(self, idx, node): args = self.args.copy() self.security_config.setup_node(node) if self.security_protocol == SecurityConfig.SSL: ssl_config_file = SecurityConfig.SSL_DIR + "/security.properties" node.account.create_file(ssl_config_file, str(self.security_config)) else: ssl_config_file = "" args.update({ 'zk_connect': self.kafka.zk.connect_setting(), 'bootstrap_servers': self.kafka.bootstrap_servers(), 'ssl_config_file': ssl_config_file }) cmd = "/opt/kafka/bin/kafka-run-class.sh kafka.tools.EndToEndLatency "\ "%(bootstrap_servers)s %(topic)s %(num_records)d "\ "%(acks)d 20 %(ssl_config_file)s" % args cmd += " | tee /mnt/end-to-end-latency.log" self.logger.debug("End-to-end latency %d command: %s", idx, cmd) results = {} for line in node.account.ssh_capture(cmd): if line.startswith("Avg latency:"): results['latency_avg_ms'] = float(line.split()[2]) if line.startswith("Percentiles"): results['latency_50th_ms'] = float(line.split()[3][:-1]) results['latency_99th_ms'] = float(line.split()[6][:-1]) results['latency_999th_ms'] = float(line.split()[9]) self.results[idx-1] = results
apache-2.0
6,619,955,687,392,177,000
42.884058
126
0.637715
false
3.852417
true
false
false
benhoff/chrome-stream-chat
CHATIMUSMAXIMUS/youtube_scrapper.py
1
3564
import sys import os #import httplib2 from time import sleep from threading import Thread from selenium import webdriver from apiclient.discovery import build from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser, run_flow from PyQt5 import QtCore """ _YOUTUBE_API_SERVICE_NAME = 'youtube' _YOUTUBE_API_VERSION = 'v3' def _youtube_authentication(): client_secrets_file = 'client_secrets.json' youtube_scope = "https://www.googleapis.com/auth/youtube.readonly" missing_client_message = "You need to populate the client_secrets.json!" flow = flow_from_clientsecrets(client_secrets_file, scope=youtube_scope, message=missing_client_message) storage = Storage("{}-oauth2.json".format(sys.argv[0])) credentials = storage.get() if credentials is None or credentials.invalid: credentials = run_flow(flow, storage, args) return build(_YOUTUBE_API_SERVICE_NAME, _YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http())) def get_current_youtube_link(): youtube_api = _youtube_authentication() broadcasts_requests = youtube.liveBroadcasts().list( broadcastStatus=('active',), part='id', maxResults=5) while broadcasts_requests: response = broadcasts_requests.execute() youtube_id = response.get('items', [])[0]['id'] return 'http://youtube.com/watch?v={}'.format(youtube_id) """ class YoutubeScrapper(QtCore.QObject): chat_signal = QtCore.pyqtSignal(str, str, str) def __init__(self, video_url=None, parent=None): super(YoutubeScrapper, self).__init__(parent) """ if video_url is None: video_url = get_current_youtube_link() """ self.video_url = video_url self._number_of_messages = 0 self._thread = Thread(target=self.run) self._thread.setDaemon(True) self._thread.start() def run(self): driver = webdriver.PhantomJS() # TODO: see if this is needed or not driver.set_window_size(1000, 1000) driver.get(self.video_url) # NOTE: need some time for comments to load sleep(5) all_comments = driver.find_element_by_id("all-comments") comments = all_comments.find_elements_by_tag_name('li') self._number_of_messages = len(comments) for comment in comments: author = comment.find_element_by_class_name('author').text message = comment.find_element_by_class_name('comment-text').text self.chat_signal.emit(author, message, 'YT') while True: comments = all_comments.find_elements_by_tag_name('li') comments_length = len(comments) if comments_length > self._number_of_messages: # NOTE: this number is intentionally NEGATIVE messages_not_parsed = self._number_of_messages - comments_length self._number_of_messages = len(comments) comments = comments[messages_not_parsed:] for comment in comments: author = comment.find_element_by_class_name('author').text message = comment.find_element_by_class_name('comment-text').text self.chat_signal.emit(author, message, 'YT') if __name__ == '__main__': scrapper = YoutubeScrapper('https://www.youtube.com/watch?v=W2DS6wT6_48') while True: sleep(1)
mit
-8,556,869,593,251,939,000
32
85
0.632155
false
3.899344
false
false
false
monodokimes/pythonmon
core/scene.py
1
3014
import controller.component from util import jsonmanager, debug, configuration from view.entity import Entity class Scene: def __init__(self, name, entities_data): self.name = name self.entities = [] self.started_entities = [] self.event_input = None self.cont_input = None for entity_data in entities_data: position = (entity_data["X"], entity_data["Y"]) entity = Entity(entity_data["Name"], position) for component_data in entity_data["Components"]: try: component_constructor = getattr(controller.component, component_data["Type"]) component = component_constructor() component.scene = self data = component_data["ComponentData"] if not len(data) == 0: component.load_data(data) entity.add_component(component) except AttributeError: debug.log(component_data["Type"] + " not recognised :/") self.entities.append(entity) def start(self): self.event_input = 'none' self.cont_input = 'none' while not self.ready_to_start(): debug.log('preparing to start entities...') entities_to_start = [] for ent in self.entities: if not ent.is_started(): entities_to_start.append(ent) debug.log(str(len(entities_to_start)) + ' entities ready to start.') debug.log('starting...') for entity in entities_to_start: try: entity.start() except Exception as e: debug.log('could not start entity. Logging error:') debug.log(e) debug.log('started {0} entities :)'.format(len(self.entities))) def update(self, event_input, cont_input): self.event_input = event_input self.cont_input = cont_input for entity in self.entities: entity.update() def find_entity(self, entity_name): for entity in self.entities: if entity.name == entity_name: return entity return None def add_entity(self, entity): self.entities.append(entity) def ready_to_start(self): for entity in self.entities: if not entity.is_started(): return False return True class SceneManager: @staticmethod def get_path(scene_name): return configuration.scene_data_folder_path + scene_name + '.json' @staticmethod def load_scene(scene_name): path = SceneManager.get_path(scene_name) scene_data = jsonmanager.get_data(path) return Scene(scene_name, scene_data['Entities']) @staticmethod def check_if_scene_exists(scene_name): path = SceneManager.get_path(scene_name) return jsonmanager.check_for_file(path)
gpl-3.0
2,675,223,090,908,184,000
31.06383
97
0.562044
false
4.406433
false
false
false
by46/coffee
code1.py
1
2408
# -*- coding: utf-8 -*- ENCODINGS = ['utf8', 'gbk'] def decode_statement(statement, encodings): # if isinstance(statement, unicode): # return statement for encoding in encodings: try: return statement.decode(encoding) except UnicodeDecodeError: pass def get_initial_letters(statement): statement = decode_statement(statement, ENCODINGS) if statement is None: return '' return ''.join(get_initial_letter(word) for word in statement) def get_initial_letter(character): character = character.encode('gbk') try: ord(character) return character.lower() except Exception: # ignore exception asc = ord(character[0]) * 256 + ord(character[1]) - 65536 if -20319 <= asc <= -20284: return 'a' if -20283 <= asc <= -19776: return 'b' if -19775 <= asc <= -19219: return 'c' if -19218 <= asc <= -18711: return 'd' if -18710 <= asc <= -18527: return 'e' if -18526 <= asc <= -18240: return 'f' if -18239 <= asc <= -17923: return 'g' if -17922 <= asc <= -17418: return 'h' if -17417 <= asc <= -16475: return 'j' if -16474 <= asc <= -16213: return 'k' if -16212 <= asc <= -15641: return 'l' if -15640 <= asc <= -15166: return 'm' if -15165 <= asc <= -14923: return 'n' if -14922 <= asc <= -14915: return 'o' if -14914 <= asc <= -14631: return 'p' if -14630 <= asc <= -14150: return 'q' if -14149 <= asc <= -14091: return 'r' if -14090 <= asc <= -13119: return 's' if -13118 <= asc <= -12839: return 't' if -12838 <= asc <= -12557: return 'w' if -12556 <= asc <= -11848: return 'x' if -11847 <= asc <= -11056: return 'y' if -11055 <= asc <= -10247: return 'z' return '' def main(str_input): a = get_initial_letters(str_input) return ''.join(a) if __name__ == "__main__": str_input = u'K珠穆朗玛峰' print(main(str_input))
mit
5,043,973,506,449,608,000
25.563218
66
0.455379
false
3.700617
false
false
false
sunlightlabs/read_FEC
fecreader/api/serializers.py
1
6024
from fec_alerts.models import new_filing from summary_data.models import Committee_Overlay, Candidate_Overlay, DistrictWeekly, District from formdata.models import SkedE from rest_framework import serializers class NFSerializer(serializers.HyperlinkedModelSerializer): form_name = serializers.Field(source='get_form_name') process_time_formatted = serializers.Field(source='process_time_formatted') skeda_url = serializers.Field(source='get_skeda_url') spending_url = serializers.Field(source='get_spending_url') absolute_url = serializers.Field(source='get_absolute_url') committee_url = serializers.Field(source='get_committee_url') class Meta: model = new_filing fields = ('fec_id', 'committee_name', 'filing_number', 'form_type', 'filed_date', 'coverage_from_date', 'coverage_to_date', 'is_superpac', 'committee_designation', 'committee_type', 'coh_end', 'new_loans', 'tot_raised', 'tot_spent', 'lines_present', 'form_name', 'skeda_url', 'spending_url', 'absolute_url', 'committee_url', 'process_time_formatted', 'is_superceded', 'cycle') class COSerializer(serializers.HyperlinkedModelSerializer): display_type = serializers.Field(source='display_type') candidate_url = serializers.Field(source='candidate_url') candidate_office = serializers.Field(source='curated_candidate_office') candidate_name = serializers.Field(source='curated_candidate_name') committee_url = serializers.Field(source='get_absolute_url') class Meta: model = Committee_Overlay fields=('fec_id', 'name', 'total_receipts', 'total_disbursements', 'outstanding_loans', 'cash_on_hand', 'cash_on_hand_date', 'ctype', 'candidate_office','candidate_name', 'candidate_url', 'display_type', 'committee_url', 'political_orientation') #depth = 1 class OSSerializer(serializers.HyperlinkedModelSerializer): display_type = serializers.Field(source='display_type') committee_url = serializers.Field(source='get_absolute_url') get_filtered_ie_url = serializers.Field(source='get_filtered_ie_url') display_coh_date = serializers.Field(source='display_coh_date') display_coh = serializers.Field(source='display_coh') major_activity = serializers.Field(source='major_activity') class Meta: model = Committee_Overlay fields=('fec_id', 'name', 'total_receipts', 'total_disbursements', 'outstanding_loans', 'ctype', 'total_indy_expenditures','ie_support_dems', 'ie_oppose_dems', 'ie_support_reps', 'ie_oppose_reps', 'political_orientation', 'political_orientation_verified', 'display_type', 'committee_url', 'get_filtered_ie_url', 'display_coh', 'display_coh_date', 'major_activity', 'cycle') #depth = 1 class DistrictSerializer(serializers.ModelSerializer): district_url = serializers.Field(source='get_absolute_url') next_election = serializers.Field(source='next_election') class Meta: model = District fields=('id', 'district_url', 'cycle', 'state', 'office', 'office_district', 'term_class', 'incumbent_name', 'incumbent_party', 'next_election_date', 'next_election_code', 'next_election', 'open_seat', 'candidate_raised', 'candidate_spending', 'outside_spending', 'total_spending', 'rothenberg_rating_id', 'rothenberg_rating_text') class MinimalDistrictSerializer(serializers.ModelSerializer): race_name = serializers.Field(source='__unicode__') class Meta: model = District fields=('race_name', 'state', 'office', 'office_district', 'term_class', 'id') class CandidateSerializer(serializers.ModelSerializer): candidate_url = serializers.Field(source='get_absolute_url') race_url = serializers.Field(source='get_race_url') ie_url = serializers.Field(source='get_filtered_ie_url') status = serializers.Field(source='show_candidate_status') district = MinimalDistrictSerializer(source='district') class Meta: model = Candidate_Overlay fields=('name', 'fec_id', 'pcc', 'party', 'candidate_url', 'race_url', 'ie_url', 'is_incumbent', 'cycle', 'not_seeking_reelection', 'other_office_sought', 'other_fec_id', 'election_year', 'state', 'office', 'office_district', 'term_class', 'candidate_status', 'total_expenditures', 'expenditures_supporting', 'expenditures_opposing', 'total_receipts', 'total_contributions', 'total_disbursements', 'cash_on_hand', 'cash_on_hand_date', 'district', 'outstanding_loans', 'cand_is_gen_winner', 'status') class DWSerializer(serializers.HyperlinkedModelSerializer): district = MinimalDistrictSerializer(source='district') class Meta: model = DistrictWeekly depth = 1 fields=('start_date', 'end_date', 'cycle_week_number', 'outside_spending', 'district') class SkedESerializer(serializers.ModelSerializer): payee_name_simplified = serializers.Field(source='payee_name_simplified') candidate_url = serializers.Field(source='get_candidate_url') committee_url = serializers.Field(source='get_committee_url') short_office = serializers.Field(source='short_office') candidate_name = serializers.Field(source='candidate_name_raw') race_url = serializers.Field(source='get_race_url') class Meta: model = SkedE fields=('form_type', 'superceded_by_amendment', 'candidate_id_checked', 'candidate_name', 'candidate_party_checked', 'candidate_office_checked', 'candidate_state_checked', 'candidate_district_checked', 'support_oppose_checked', 'committee_name', 'transaction_id', 'payee_organization_name', 'payee_street_1', 'payee_street_2', 'payee_city', 'payee_state', 'payee_zip', 'payee_name_simplified', 'election_code', 'election_other_description', 'expenditure_date_formatted', 'expenditure_amount', 'expenditure_purpose_code', 'expenditure_purpose_descrip', 'date_signed_formatted', 'memo_code', 'memo_text_description', 'filer_committee_id_number', 'district_checked', 'race_url', 'committee_url', 'candidate_url', 'short_office')
bsd-3-clause
2,079,631,095,254,151,700
59.24
733
0.708001
false
3.411099
false
false
false
VandroiyLabs/FaroresWind
faroreswind/server/handler_Metadata.py
1
3875
## database import psycopg2 ## system libraries import io, os import datetime, time import logging ## web libraries import tornado import tornado.auth import tornado.escape import tornado.gen import tornado.httpserver import urlparse import threading import functools from tornado.ioloop import IOLoop from tornado.web import asynchronous, RequestHandler, Application from tornado.httpclient import AsyncHTTPClient ## custom libraries import faroreDB rootdir = os.path.dirname(__file__) class listEnoseConfHandler(tornado.web.RequestHandler): def initialize(self, database, IPs): self.db = database self.IPs = IPs return def get(self): if self.request.remote_ip[:11] in self.IPs : miolo = '<div class="page-header">' + \ '<table class="table table-striped">' + \ '<thead><tr><th width=500px colspan=2>enose ID</th><th width=150px colspan=3>Location</th><th width=150px colspan=3>Date</th><th width=50px></th><th width=50px></th></tr></thead>'+ \ '<tbody>\n' # Retrieving data from inductions db = self.db listConfs = self.db.getEnoseConfs( ) for conf in listConfs: miolo += "<tr><td colspan=2>hal" + str(conf[-1]) + "k</td>\n" miolo += "<td colspan=3>" + str(conf[-2]) + "</td>\n" miolo += "<td colspan=5>" + str(conf[1]) + "</td>" miolo += "</tr><tr>" for j in range(10): miolo += "<td>" + str(conf[2+j]) + "</td>" miolo += "</tr>" miolo += '</tbody></table></div>' self.render(rootdir+'/pagess/index.html', title="Current list of ENoses", miolo = miolo, top=file(rootdir+"/pagess/top.html").read(), bottom=file(rootdir+"/pagess/bottom.html").read()) ## If in this else, someone tried to access this else: logging.warning('Access to list_inductions from outside IP list: ' + str(self.request.remote_ip) ) return class inputEnoseConfigHandler(tornado.web.RequestHandler): def initialize(self, database, IPs): self.IPs = IPs return def get(self): if self.request.remote_ip[:-2] == self.IPs[0] or self.request.remote_ip[:7] == self.IPs[1]: miolo = file(rootdir+'/pagess/input_enose_config.html').read() self.render(rootdir+'/pagess/index.html', title="Farore's wind", miolo = miolo, top=file(rootdir+"/pagess/top.html").read(), bottom=file(rootdir+"/pagess/bottom.html").read()) ## If in this else, someone tried to access this else: logging.warning('Access to input_metadata from outside IP list: ' + str(self.request.remote_ip) ) return class actionEnoseConfigHandler(tornado.web.RequestHandler): def initialize(self, database, IPs): self.db = database self.IPs = IPs return def post(self): if self.request.remote_ip[:11] in self.IPs : self.render(rootdir+'/pagess/metadata_action.html') date = self.get_argument('date', '') S = [] for j in range(1,11): S.append( self.get_argument('S'+str(j), '') ) T = [] for j in range(1,9): T.append( self.get_argument('T'+str(j), '') ) location = self.get_argument('location', '') enose = self.get_argument('enose', '') if len(enose) > 1: enose = enose[3] self.db.insertEnoseConf(enose, date, S, T, location) ## If in this else, someone tried to access this else: logging.warning('Access to metadata_action from outside IP list: ' + str(self.request.remote_ip) ) return
gpl-3.0
-8,024,903,895,091,325,000
28.135338
202
0.571871
false
3.686965
false
false
false
aurex-linux/virt-manager
tests/capabilities.py
1
9927
# Copyright (C) 2013, 2014 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA. import os import unittest from tests import utils from virtinst import CapabilitiesParser as capabilities def build_host_feature_dict(feature_list): fdict = {} for f in feature_list: fdict[f] = capabilities.FEATURE_ON return fdict class TestCapabilities(unittest.TestCase): def _compareGuest(self, (arch, os_type, domains, features), guest): self.assertEqual(arch, guest.arch) self.assertEqual(os_type, guest.os_type) self.assertEqual(len(domains), len(guest.domains)) for n in range(len(domains)): self.assertEqual(domains[n][0], guest.domains[n].hypervisor_type) self.assertEqual(domains[n][1], guest.domains[n].emulator) self.assertEqual(domains[n][2], guest.domains[n].machines) for n in features: self.assertEqual(features[n], guest.features[n]) def _buildCaps(self, filename): path = os.path.join("tests/capabilities-xml", filename) xml = file(path).read() return capabilities.Capabilities(xml) def _testCapabilities(self, path, (host_arch, host_features), guests, secmodel=None): caps = self._buildCaps(path) if host_arch: self.assertEqual(host_arch, caps.host.cpu.arch) for n in host_features: self.assertEqual(host_features[n], caps.host.cpu.features[n]) if secmodel: self.assertEqual(secmodel[0], caps.host.secmodel.model) self.assertEqual(secmodel[1], caps.host.secmodel.doi) if secmodel[2]: for k, v in secmodel[2].items(): self.assertEqual(v, caps.host.secmodel.baselabels[k]) for idx in range(len(guests)): self._compareGuest(guests[idx], caps.guests[idx]) def testCapabilities1(self): host = ('x86_64', {'vmx': capabilities.FEATURE_ON}) guests = [ ('x86_64', 'xen', [['xen', None, []]], {}), ('i686', 'xen', [['xen', None, []]], {'pae': capabilities.FEATURE_ON}), ('i686', 'hvm', [['xen', "/usr/lib64/xen/bin/qemu-dm", ['pc', 'isapc']]], {'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}), ('x86_64', 'hvm', [['xen', "/usr/lib64/xen/bin/qemu-dm", ['pc', 'isapc']]], {}) ] self._testCapabilities("capabilities-xen.xml", host, guests) def testCapabilities2(self): host = ('x86_64', {}) secmodel = ('selinux', '0', None) guests = [ ('x86_64', 'hvm', [['qemu', '/usr/bin/qemu-system-x86_64', ['pc', 'isapc']]], {}), ('i686', 'hvm', [['qemu', '/usr/bin/qemu', ['pc', 'isapc']]], {}), ('mips', 'hvm', [['qemu', '/usr/bin/qemu-system-mips', ['mips']]], {}), ('mipsel', 'hvm', [['qemu', '/usr/bin/qemu-system-mipsel', ['mips']]], {}), ('sparc', 'hvm', [['qemu', '/usr/bin/qemu-system-sparc', ['sun4m']]], {}), ('ppc', 'hvm', [['qemu', '/usr/bin/qemu-system-ppc', ['g3bw', 'mac99', 'prep']]], {}), ] self._testCapabilities("capabilities-qemu.xml", host, guests, secmodel) def testCapabilities3(self): host = ('i686', {}) guests = [ ('i686', 'hvm', [['qemu', '/usr/bin/qemu', ['pc', 'isapc']], ['kvm', '/usr/bin/qemu-kvm', ['pc', 'isapc']]], {}), ('x86_64', 'hvm', [['qemu', '/usr/bin/qemu-system-x86_64', ['pc', 'isapc']]], {}), ('mips', 'hvm', [['qemu', '/usr/bin/qemu-system-mips', ['mips']]], {}), ('mipsel', 'hvm', [['qemu', '/usr/bin/qemu-system-mipsel', ['mips']]], {}), ('sparc', 'hvm', [['qemu', '/usr/bin/qemu-system-sparc', ['sun4m']]], {}), ('ppc', 'hvm', [['qemu', '/usr/bin/qemu-system-ppc', ['g3bw', 'mac99', 'prep']]], {}), ] secmodel = ('dac', '0', {"kvm" : "+0:+0", "qemu" : "+0:+0"}) self._testCapabilities("capabilities-kvm.xml", host, guests, secmodel) def testCapabilities4(self): host = ('i686', {'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}) guests = [ ('i686', 'linux', [['test', None, []]], {'pae': capabilities.FEATURE_ON | capabilities.FEATURE_OFF}), ] self._testCapabilities("capabilities-test.xml", host, guests) def testCapsLXC(self): guests = [ ("x86_64", "exe", [["lxc", "/usr/libexec/libvirt_lxc", []]], {}), ("i686", "exe", [["lxc", "/usr/libexec/libvirt_lxc", []]], {}), ] self._testCapabilities("capabilities-lxc.xml", (None, None), guests) def testCapsTopology(self): filename = "capabilities-test.xml" caps = self._buildCaps(filename) self.assertTrue(bool(caps.host.topology)) self.assertTrue(len(caps.host.topology.cells) == 2) self.assertTrue(len(caps.host.topology.cells[0].cpus) == 8) self.assertTrue(len(caps.host.topology.cells[0].cpus) == 8) def testCapsCPUFeaturesOldSyntax(self): filename = "rhel5.4-xen-caps-virt-enabled.xml" host_feature_list = ["vmx"] feature_dict = build_host_feature_dict(host_feature_list) caps = self._buildCaps(filename) for f in feature_dict.keys(): self.assertEquals(caps.host.cpu.features[f], feature_dict[f]) def testCapsCPUFeaturesOldSyntaxSVM(self): filename = "rhel5.4-xen-caps.xml" host_feature_list = ["svm"] feature_dict = build_host_feature_dict(host_feature_list) caps = self._buildCaps(filename) for f in feature_dict.keys(): self.assertEquals(caps.host.cpu.features[f], feature_dict[f]) def testCapsCPUFeaturesNewSyntax(self): filename = "libvirt-0.7.6-qemu-caps.xml" host_feature_list = ['lahf_lm', 'xtpr', 'cx16', 'tm2', 'est', 'vmx', 'ds_cpl', 'pbe', 'tm', 'ht', 'ss', 'acpi', 'ds'] feature_dict = build_host_feature_dict(host_feature_list) caps = self._buildCaps(filename) for f in feature_dict.keys(): self.assertEquals(caps.host.cpu.features[f], feature_dict[f]) self.assertEquals(caps.host.cpu.model, "core2duo") self.assertEquals(caps.host.cpu.vendor, "Intel") self.assertEquals(caps.host.cpu.threads, "3") self.assertEquals(caps.host.cpu.cores, "5") self.assertEquals(caps.host.cpu.sockets, "7") def testCapsUtilFuncs(self): new_caps = self._buildCaps("libvirt-0.7.6-qemu-caps.xml") new_caps_no_kvm = self._buildCaps( "libvirt-0.7.6-qemu-no-kvmcaps.xml") empty_caps = self._buildCaps("empty-caps.xml") rhel_xen_enable_hvm_caps = self._buildCaps( "rhel5.4-xen-caps-virt-enabled.xml") rhel_xen_caps = self._buildCaps("rhel5.4-xen-caps.xml") rhel_kvm_caps = self._buildCaps("rhel5.4-kvm-caps.xml") def test_utils(caps, no_guests, is_hvm, is_kvm, is_bios_disable, is_xenner): self.assertEquals(caps.no_install_options(), no_guests) self.assertEquals(caps.hw_virt_supported(), is_hvm) self.assertEquals(caps.is_kvm_available(), is_kvm) self.assertEquals(caps.is_bios_virt_disabled(), is_bios_disable) self.assertEquals(caps.is_xenner_available(), is_xenner) test_utils(new_caps, False, True, True, False, True) test_utils(empty_caps, True, False, False, False, False) test_utils(rhel_xen_enable_hvm_caps, False, True, False, False, False) test_utils(rhel_xen_caps, False, True, False, True, False) test_utils(rhel_kvm_caps, False, True, True, False, False) test_utils(new_caps_no_kvm, False, True, False, False, False) def testCPUMap(self): caps = self._buildCaps("libvirt-0.7.6-qemu-caps.xml") cpu_64 = caps.get_cpu_values(None, "x86_64") cpu_32 = caps.get_cpu_values(None, "i486") cpu_random = caps.get_cpu_values(None, "mips") def test_cpu_map(cpumap, cpus): cpunames = sorted([c.model for c in cpumap], key=str.lower) for c in cpus: self.assertTrue(c in cpunames) self.assertEquals(cpu_64, cpu_32) x86_cpunames = [ '486', 'athlon', 'Conroe', 'core2duo', 'coreduo', 'n270', 'Nehalem', 'Opteron_G1', 'Opteron_G2', 'Opteron_G3', 'Penryn', 'pentium', 'pentium2', 'pentium3', 'pentiumpro', 'phenom', 'qemu32', 'qemu64'] test_cpu_map(cpu_64, x86_cpunames) test_cpu_map(cpu_random, []) conn = utils.open_testdriver() cpu_64 = caps.get_cpu_values(conn, "x86_64") self.assertTrue(len(cpu_64) > 0) if __name__ == "__main__": unittest.main()
gpl-2.0
8,204,153,660,796,448,000
38.392857
133
0.556966
false
3.417212
true
false
false
pamapa/callblocker
usr/share/callblocker/onlinecheck_phonespamfilter_com.py
1
2486
#!/usr/bin/env python3 # callblocker - blocking unwanted calls from your home phone # Copyright (C) 2015-2020 Patrick Ammann <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # import sys from online_base import OnlineBase class OnlineCheckTellowsDE(OnlineBase): def supported_country_codes(self): return ["+1", "+33", "+44", "+61", "+64"] def handle_number(self, args, number): # map number to correct URL if args.number.startswith("+1"): # USA, Canada site = "www.phonespamfilter.com" number = number[2:] elif args.number.startswith("+33"): # France site = "www.phonespamfilter.fr" number = number[3:] elif args.number.startswith("+44"): # United Kingdom site = "www.phonespamfilter.co.uk" number = number[3:] elif args.number.startswith("+61"): # Australia site = "au.phonespamfilter.com" number = number[3:] elif args.number.startswith("+64"): # New Zealand site = "www.phonespamfilter.co.nz" number = number[3:] else: self.log.error("number '%s' is not supported '%s'" % (args.number, self.supported_country_codes())) sys.exit(-1) url = "http://%s/check.php?phone=%s" % (site, number) content = self.http_get(url) self.log.debug(content) score = int(content) spam = False if score < args.spamscore else True return self.onlinecheck_2_result(spam, score) # # main # if __name__ == "__main__": m = OnlineCheckTellowsDE() parser = m.get_parser("Online check via phonespamfilter.com") parser.add_argument("--spamscore", help="score limit to mark as spam [0..100]", default=50) args = parser.parse_args() m.run(args)
gpl-2.0
-8,970,024,645,685,095,000
35.558824
111
0.641593
false
3.677515
false
false
false
miguelgrinberg/python-socketio
src/socketio/kombu_manager.py
1
5298
import pickle import uuid try: import kombu except ImportError: kombu = None from .pubsub_manager import PubSubManager class KombuManager(PubSubManager): # pragma: no cover """Client manager that uses kombu for inter-process messaging. This class implements a client manager backend for event sharing across multiple processes, using RabbitMQ, Redis or any other messaging mechanism supported by `kombu <http://kombu.readthedocs.org/en/latest/>`_. To use a kombu backend, initialize the :class:`Server` instance as follows:: url = 'amqp://user:password@hostname:port//' server = socketio.Server(client_manager=socketio.KombuManager(url)) :param url: The connection URL for the backend messaging queue. Example connection URLs are ``'amqp://guest:guest@localhost:5672//'`` and ``'redis://localhost:6379/'`` for RabbitMQ and Redis respectively. Consult the `kombu documentation <http://kombu.readthedocs.org/en/latest/userguide\ /connections.html#urls>`_ for more on how to construct connection URLs. :param channel: The channel name on which the server sends and receives notifications. Must be the same in all the servers. :param write_only: If set to ``True``, only initialize to emit events. The default of ``False`` initializes the class for emitting and receiving. :param connection_options: additional keyword arguments to be passed to ``kombu.Connection()``. :param exchange_options: additional keyword arguments to be passed to ``kombu.Exchange()``. :param queue_options: additional keyword arguments to be passed to ``kombu.Queue()``. :param producer_options: additional keyword arguments to be passed to ``kombu.Producer()``. """ name = 'kombu' def __init__(self, url='amqp://guest:guest@localhost:5672//', channel='socketio', write_only=False, logger=None, connection_options=None, exchange_options=None, queue_options=None, producer_options=None): if kombu is None: raise RuntimeError('Kombu package is not installed ' '(Run "pip install kombu" in your ' 'virtualenv).') super(KombuManager, self).__init__(channel=channel, write_only=write_only, logger=logger) self.url = url self.connection_options = connection_options or {} self.exchange_options = exchange_options or {} self.queue_options = queue_options or {} self.producer_options = producer_options or {} self.producer = self._producer() def initialize(self): super(KombuManager, self).initialize() monkey_patched = True if self.server.async_mode == 'eventlet': from eventlet.patcher import is_monkey_patched monkey_patched = is_monkey_patched('socket') elif 'gevent' in self.server.async_mode: from gevent.monkey import is_module_patched monkey_patched = is_module_patched('socket') if not monkey_patched: raise RuntimeError( 'Kombu requires a monkey patched socket library to work ' 'with ' + self.server.async_mode) def _connection(self): return kombu.Connection(self.url, **self.connection_options) def _exchange(self): options = {'type': 'fanout', 'durable': False} options.update(self.exchange_options) return kombu.Exchange(self.channel, **options) def _queue(self): queue_name = 'flask-socketio.' + str(uuid.uuid4()) options = {'durable': False, 'queue_arguments': {'x-expires': 300000}} options.update(self.queue_options) return kombu.Queue(queue_name, self._exchange(), **options) def _producer(self): return self._connection().Producer(exchange=self._exchange(), **self.producer_options) def __error_callback(self, exception, interval): self._get_logger().exception('Sleeping {}s'.format(interval)) def _publish(self, data): connection = self._connection() publish = connection.ensure(self.producer, self.producer.publish, errback=self.__error_callback) publish(pickle.dumps(data)) def _listen(self): reader_queue = self._queue() while True: connection = self._connection().ensure_connection( errback=self.__error_callback) try: with connection.SimpleQueue(reader_queue) as queue: while True: message = queue.get(block=True) message.ack() yield message.payload except connection.connection_errors: self._get_logger().exception("Connection error " "while reading from queue")
mit
5,484,561,835,408,599,000
42.42623
78
0.58607
false
4.59497
false
false
false
piotroxp/scibibscan
scib/lib/python3.5/site-packages/astropy/visualization/transform.py
1
1198
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import division, print_function __all__ = ['BaseTransform', 'CompositeTransform'] class BaseTransform(object): """ A transformation object. This is used to construct transformations such as scaling, stretching, and so on. """ def __add__(self, other): return CompositeTransform(other, self) class CompositeTransform(BaseTransform): """ A combination of two transforms. Parameters ---------- transform_1: :class:`astropy.visualization.BaseTransform` The first transform to apply. transform_2: :class:`astropy.visualization.BaseTransform` The second transform to apply. """ def __init__(self, transform_1, transform_2): super(CompositeTransform, self).__init__() self.transform_1 = transform_1 self.transform_2 = transform_2 def __call__(self, values, clip=True): return self.transform_2(self.transform_1(values, clip=clip), clip=clip) @property def inverse(self): return CompositeTransform(self.transform_2.inverse, self.transform_1.inverse)
mit
-9,155,531,039,135,937,000
27.52381
79
0.647746
false
4.21831
false
false
false
supriyantomaftuh/zget
zget/put.py
1
7124
#!/usr/bin/env python from __future__ import absolute_import, division, print_function, \ unicode_literals import os import sys import time import socket try: import urllib.request as urllib except ImportError: import urllib import hashlib import argparse import logging from zeroconf import ServiceInfo, Zeroconf try: from http.server import BaseHTTPRequestHandler, HTTPServer except ImportError: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from . import utils __all__ = ["put"] def validate_address(address): """ Validate IP address """ try: socket.inet_aton(address) return address except socket.error: raise argparse.ArgumentTypeError( "%s is not a valid IP address" % address ) class StateHTTPServer(HTTPServer): """ HTTP Server that knows a certain filename and can be set to remember if that file has been transferred using :class:`FileHandler` """ downloaded = False filename = "" basename = "" reporthook = None class FileHandler(BaseHTTPRequestHandler): """ Custom HTTP upload handler that allows one single filename to be requested. """ def do_GET(self): if self.path == urllib.pathname2url( os.path.join('/', self.server.basename) ): utils.logger.info("Peer found. Uploading...") full_path = os.path.join(os.curdir, self.server.filename) with open(full_path, 'rb') as fh: maxsize = os.path.getsize(full_path) self.send_response(200) self.send_header('Content-type', 'application/octet-stream') self.send_header('Content-length', maxsize) self.end_headers() i = 0 while True: data = fh.read(1024 * 8) # chunksize taken from urllib if not data: break self.wfile.write(data) if self.server.reporthook is not None: self.server.reporthook(i, 1024 * 8, maxsize) i += 1 self.server.downloaded = True else: self.send_response(404) self.end_headers() raise RuntimeError("Invalid request received. Aborting.") def log_message(self, format, *args): """ Suppress log messages by overloading this function """ return def cli(inargs=None): """ Commandline interface for sending files """ parser = argparse.ArgumentParser() parser.add_argument( '--port', '-p', type=int, nargs='?', help="The port to share the file on" ) parser.add_argument( '--address', '-a', nargs='?', type=validate_address, help="The address to share the file on" ) parser.add_argument( '--interface', '-i', nargs='?', help="The interface to share the file on" ) parser.add_argument( '--verbose', '-v', action='count', default=0, help="Verbose mode. Multiple -v options increase the verbosity" ) parser.add_argument( '--quiet', '-q', action='count', default=0, help="Quiet mode. Hides progess bar" ) parser.add_argument( '--timeout', '-t', type=int, metavar="SECONDS", help="Set timeout after which program aborts transfer" ) parser.add_argument( '--version', '-V', action='version', version='%%(prog)s %s' % utils.__version__ ) parser.add_argument( 'input', help="The file to share on the network" ) args = parser.parse_args(inargs) utils.enable_logger(args.verbose) try: if not os.path.isfile(args.input): raise ValueError( "File %s does not exist" % args.input ) if args.interface and args.address: raise ValueError( "You may only provide one of --address " "or --interface" ) with utils.Progresshook(args.input) as progress: put( args.input, interface=args.interface, address=args.address, port=args.port, reporthook=progress if args.quiet == 0 else None, timeout=args.timeout, ) except Exception as e: if args.verbose: raise utils.logger.error(e.message) sys.exit(1) def put( filename, interface=None, address=None, port=None, reporthook=None, timeout=None, ): """Send a file using the zget protocol. Parameters ---------- filename : string The filename to be transferred interface : string The network interface to use. Optional. address : string The network address to use. Optional. port : int The network port to use. Optional. reporthook : callable A hook that will be called during transfer. Handy for watching the transfer. See :code:`urllib.urlretrieve` for callback parameters. Optional. timeout : int Seconds to wait until process is aborted. A running transfer is not aborted even when timeout was hit. Optional. Raises ------- TimeoutException When a timeout occurred. """ if port is None: port = utils.config().getint('DEFAULT', 'port') if interface is None: interface = utils.config().get('DEFAULT', 'interface') if not 0 <= port <= 65535: raise ValueError("Port %d exceeds allowed range" % port) basename = os.path.basename(filename) filehash = hashlib.sha1(basename.encode('utf-8')).hexdigest() if interface is None: interface = utils.default_interface() if address is None: address = utils.ip_addr(interface) server = StateHTTPServer((address, port), FileHandler) server.timeout = timeout server.filename = filename server.basename = basename server.reporthook = reporthook port = server.server_port utils.logger.debug( "Using interface %s" % interface ) utils.logger.debug( "Listening on %s:%d \n" "you may change address using --address and " "port using --port" % (address, port) ) utils.logger.debug( "Broadcasting as %s._zget._http._tcp.local." % filehash ) info = ServiceInfo( "_zget._http._tcp.local.", "%s._zget._http._tcp.local." % filehash, socket.inet_aton(address), port, 0, 0, {'path': None} ) zeroconf = Zeroconf() try: zeroconf.register_service(info) server.handle_request() except KeyboardInterrupt: pass server.socket.close() zeroconf.unregister_service(info) zeroconf.close() if timeout is not None and not server.downloaded: raise utils.TimeoutException() else: utils.logger.info("Done.") if __name__ == '__main__': cli(sys.argv[1:])
mit
-6,137,828,054,823,278,000
25.681648
79
0.580011
false
4.299336
false
false
false
russellhadley/coreclr
src/scripts/genEventPipe.py
1
17048
from __future__ import print_function from genXplatEventing import * from genXplatLttng import * import os import xml.dom.minidom as DOM stdprolog = """// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. /****************************************************************** DO NOT MODIFY. AUTOGENERATED FILE. This file is generated using the logic from <root>/src/scripts/genEventPipe.py ******************************************************************/ """ stdprolog_cmake = """# # #****************************************************************** #DO NOT MODIFY. AUTOGENERATED FILE. #This file is generated using the logic from <root>/src/scripts/genEventPipe.py #****************************************************************** """ def generateClrEventPipeWriteEventsImpl( providerName, eventNodes, allTemplates, exclusionListFile): providerPrettyName = providerName.replace("Windows-", '') providerPrettyName = providerPrettyName.replace("Microsoft-", '') providerPrettyName = providerPrettyName.replace('-', '_') WriteEventImpl = [] # EventPipeEvent declaration for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') WriteEventImpl.append( "EventPipeEvent *EventPipeEvent" + eventName + " = nullptr;\n") for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') templateName = eventNode.getAttribute('template') # generate EventPipeEventEnabled function eventEnabledImpl = """bool EventPipeEventEnabled%s() { return EventPipeEvent%s->IsEnabled(); } """ % (eventName, eventName) WriteEventImpl.append(eventEnabledImpl) # generate EventPipeWriteEvent function fnptype = [] linefnptype = [] fnptype.append("extern \"C\" ULONG EventPipeWriteEvent") fnptype.append(eventName) fnptype.append("(\n") if templateName: template = allTemplates[templateName] else: template = None if template: fnSig = template.signature for paramName in fnSig.paramlist: fnparam = fnSig.getParam(paramName) wintypeName = fnparam.winType typewName = palDataTypeMapping[wintypeName] winCount = fnparam.count countw = palDataTypeMapping[winCount] if paramName in template.structs: linefnptype.append( "%sint %s_ElementSize,\n" % (lindent, paramName)) linefnptype.append(lindent) linefnptype.append(typewName) if countw != " ": linefnptype.append(countw) linefnptype.append(" ") linefnptype.append(fnparam.name) linefnptype.append(",\n") if len(linefnptype) > 0: del linefnptype[-1] fnptype.extend(linefnptype) fnptype.append(")\n{\n") checking = """ if (!EventPipeEventEnabled%s()) return ERROR_SUCCESS; """ % (eventName) fnptype.append(checking) WriteEventImpl.extend(fnptype) if template: body = generateWriteEventBody(template, providerName, eventName) WriteEventImpl.append(body) else: WriteEventImpl.append( " EventPipe::WriteEvent(*EventPipeEvent" + eventName + ", (BYTE*) nullptr, 0);\n") WriteEventImpl.append("\n return ERROR_SUCCESS;\n}\n\n") # EventPipeProvider and EventPipeEvent initialization WriteEventImpl.append( "extern \"C\" void Init" + providerPrettyName + "()\n{\n") WriteEventImpl.append( " EventPipeProvider" + providerPrettyName + " = EventPipe::CreateProvider(" + providerPrettyName + "GUID);\n") for eventNode in eventNodes: eventName = eventNode.getAttribute('symbol') templateName = eventNode.getAttribute('template') eventKeywords = eventNode.getAttribute('keywords') eventKeywordsMask = generateEventKeywords(eventKeywords) eventValue = eventNode.getAttribute('value') eventVersion = eventNode.getAttribute('version') eventLevel = eventNode.getAttribute('level') eventLevel = eventLevel.replace("win:", "EventPipeEventLevel::") exclusionInfo = parseExclusionList(exclusionListFile) taskName = eventNode.getAttribute('task') initEvent = """ EventPipeEvent%s = EventPipeProvider%s->AddEvent(%s,%s,%s,%s); """ % (eventName, providerPrettyName, eventValue, eventKeywordsMask, eventVersion, eventLevel) WriteEventImpl.append(initEvent) WriteEventImpl.append("}") return ''.join(WriteEventImpl) def generateWriteEventBody(template, providerName, eventName): header = """ char stackBuffer[%s]; char *buffer = stackBuffer; unsigned int offset = 0; unsigned int size = %s; bool fixedBuffer = true; bool success = true; """ % (template.estimated_size, template.estimated_size) fnSig = template.signature pack_list = [] for paramName in fnSig.paramlist: parameter = fnSig.getParam(paramName) if paramName in template.structs: size = "(int)%s_ElementSize * (int)%s" % ( paramName, parameter.prop) if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]: size = "(int)(%s)" % specialCaseSizes[template.name][paramName] pack_list.append( " success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" % (paramName, size)) elif paramName in template.arrays: size = "sizeof(%s) * (int)%s" % ( lttngDataTypeMapping[parameter.winType], parameter.prop) if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]: size = "(int)(%s)" % specialCaseSizes[template.name][paramName] pack_list.append( " success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" % (paramName, size)) elif parameter.winType == "win:GUID": pack_list.append( " success &= WriteToBuffer(*%s, buffer, offset, size, fixedBuffer);" % (parameter.name,)) else: pack_list.append( " success &= WriteToBuffer(%s, buffer, offset, size, fixedBuffer);" % (parameter.name,)) code = "\n".join(pack_list) + "\n\n" checking = """ if (!success) { if (!fixedBuffer) delete[] buffer; return ERROR_WRITE_FAULT; }\n\n""" body = " EventPipe::WriteEvent(*EventPipeEvent" + \ eventName + ", (BYTE *)buffer, size);\n" footer = """ if (!fixedBuffer) delete[] buffer; """ return header + code + checking + body + footer providerGUIDMap = {} providerGUIDMap[ "{e13c0d23-ccbc-4e12-931b-d9cc2eee27e4}"] = "{0xe13c0d23,0xccbc,0x4e12,{0x93,0x1b,0xd9,0xcc,0x2e,0xee,0x27,0xe4}}" providerGUIDMap[ "{A669021C-C450-4609-A035-5AF59AF4DF18}"] = "{0xA669021C,0xC450,0x4609,{0xA0,0x35,0x5A,0xF5,0x9A,0xF4,0xDF,0x18}}" providerGUIDMap[ "{CC2BCBBA-16B6-4cf3-8990-D74C2E8AF500}"] = "{0xCC2BCBBA,0x16B6,0x4cf3,{0x89,0x90,0xD7,0x4C,0x2E,0x8A,0xF5,0x00}}" providerGUIDMap[ "{763FD754-7086-4dfe-95EB-C01A46FAF4CA}"] = "{0x763FD754,0x7086,0x4dfe,{0x95,0xEB,0xC0,0x1A,0x46,0xFA,0xF4,0xCA}}" def generateGUID(tmpGUID): return providerGUIDMap[tmpGUID] keywordMap = {} def generateEventKeywords(eventKeywords): mask = 0 # split keywords if there are multiple allKeywords = eventKeywords.split() for singleKeyword in allKeywords: mask = mask | keywordMap[singleKeyword] return mask def generateEventPipeCmakeFile(etwmanifest, eventpipe_directory): tree = DOM.parse(etwmanifest) with open(eventpipe_directory + "CMakeLists.txt", 'w') as topCmake: topCmake.write(stdprolog_cmake + "\n") topCmake.write("""cmake_minimum_required(VERSION 2.8.12.2) project(eventpipe) set(CMAKE_INCLUDE_CURRENT_DIR ON) include_directories(${CLR_DIR}/src/vm) add_library(eventpipe STATIC\n""") for providerNode in tree.getElementsByTagName('provider'): providerName = providerNode.getAttribute('name') providerName = providerName.replace("Windows-", '') providerName = providerName.replace("Microsoft-", '') providerName_File = providerName.replace('-', '') providerName_File = providerName_File.lower() topCmake.write(' "%s.cpp"\n' % (providerName_File)) topCmake.write(' "eventpipehelpers.cpp"\n') topCmake.write(""" ) add_dependencies(eventpipe GeneratedEventingFiles) # Install the static eventpipe library install(TARGETS eventpipe DESTINATION lib) """) topCmake.close() def generateEventPipeHelperFile(etwmanifest, eventpipe_directory): with open(eventpipe_directory + "eventpipehelpers.cpp", 'w') as helper: helper.write(stdprolog) helper.write(""" #include "stdlib.h" bool ResizeBuffer(char *&buffer, unsigned int& size, unsigned int currLen, unsigned int newSize, bool &fixedBuffer) { newSize *= 1.5; _ASSERTE(newSize > size); // check for overflow if (newSize < 32) newSize = 32; char *newBuffer = new char[newSize]; memcpy(newBuffer, buffer, currLen); if (!fixedBuffer) delete[] buffer; buffer = newBuffer; size = newSize; fixedBuffer = false; return true; } bool WriteToBuffer(const BYTE *src, unsigned int len, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer) { if(!src) return true; if (offset + len > size) { if (!ResizeBuffer(buffer, size, offset, size + len, fixedBuffer)) return false; } memcpy(buffer + offset, src, len); offset += len; return true; } bool WriteToBuffer(PCWSTR str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer) { if(!str) return true; unsigned int byteCount = (PAL_wcslen(str) + 1) * sizeof(*str); if (offset + byteCount > size) { if (!ResizeBuffer(buffer, size, offset, size + byteCount, fixedBuffer)) return false; } memcpy(buffer + offset, str, byteCount); offset += byteCount; return true; } bool WriteToBuffer(const char *str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer) { if(!str) return true; unsigned int len = strlen(str) + 1; if (offset + len > size) { if (!ResizeBuffer(buffer, size, offset, size + len, fixedBuffer)) return false; } memcpy(buffer + offset, str, len); offset += len; return true; } """) tree = DOM.parse(etwmanifest) for providerNode in tree.getElementsByTagName('provider'): providerName = providerNode.getAttribute('name') providerPrettyName = providerName.replace("Windows-", '') providerPrettyName = providerPrettyName.replace("Microsoft-", '') providerPrettyName = providerPrettyName.replace('-', '_') helper.write( "extern \"C\" void Init" + providerPrettyName + "();\n\n") helper.write("extern \"C\" void InitProvidersAndEvents()\n{\n") for providerNode in tree.getElementsByTagName('provider'): providerName = providerNode.getAttribute('name') providerPrettyName = providerName.replace("Windows-", '') providerPrettyName = providerPrettyName.replace("Microsoft-", '') providerPrettyName = providerPrettyName.replace('-', '_') helper.write(" Init" + providerPrettyName + "();\n") helper.write("}") helper.close() def generateEventPipeImplFiles( etwmanifest, eventpipe_directory, exclusionListFile): tree = DOM.parse(etwmanifest) coreclrRoot = os.getcwd() for providerNode in tree.getElementsByTagName('provider'): providerGUID = providerNode.getAttribute('guid') providerGUID = generateGUID(providerGUID) providerName = providerNode.getAttribute('name') providerPrettyName = providerName.replace("Windows-", '') providerPrettyName = providerPrettyName.replace("Microsoft-", '') providerName_File = providerPrettyName.replace('-', '') providerName_File = providerName_File.lower() providerPrettyName = providerPrettyName.replace('-', '_') eventpipefile = eventpipe_directory + providerName_File + ".cpp" eventpipeImpl = open(eventpipefile, 'w') eventpipeImpl.write(stdprolog) header = """ #include \"%s/src/vm/common.h\" #include \"%s/src/vm/eventpipeprovider.h\" #include \"%s/src/vm/eventpipeevent.h\" #include \"%s/src/vm/eventpipe.h\" bool ResizeBuffer(char *&buffer, unsigned int& size, unsigned int currLen, unsigned int newSize, bool &fixedBuffer); bool WriteToBuffer(PCWSTR str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer); bool WriteToBuffer(const char *str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer); bool WriteToBuffer(const BYTE *src, unsigned int len, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer); template <typename T> bool WriteToBuffer(const T &value, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer) { if (sizeof(T) + offset > size) { if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer)) return false; } *(T *)(buffer + offset) = value; offset += sizeof(T); return true; } """ % (coreclrRoot, coreclrRoot, coreclrRoot, coreclrRoot) eventpipeImpl.write(header) eventpipeImpl.write( "GUID const " + providerPrettyName + "GUID = " + providerGUID + ";\n") eventpipeImpl.write( "EventPipeProvider *EventPipeProvider" + providerPrettyName + " = nullptr;\n") templateNodes = providerNode.getElementsByTagName('template') allTemplates = parseTemplateNodes(templateNodes) eventNodes = providerNode.getElementsByTagName('event') eventpipeImpl.write( generateClrEventPipeWriteEventsImpl( providerName, eventNodes, allTemplates, exclusionListFile) + "\n") eventpipeImpl.close() def generateEventPipeFiles( etwmanifest, eventpipe_directory, exclusionListFile): eventpipe_directory = eventpipe_directory + "/" tree = DOM.parse(etwmanifest) if not os.path.exists(eventpipe_directory): os.makedirs(eventpipe_directory) # generate Cmake file generateEventPipeCmakeFile(etwmanifest, eventpipe_directory) # generate helper file generateEventPipeHelperFile(etwmanifest, eventpipe_directory) # generate all keywords for keywordNode in tree.getElementsByTagName('keyword'): keywordName = keywordNode.getAttribute('name') keywordMask = keywordNode.getAttribute('mask') keywordMap[keywordName] = int(keywordMask, 0) # generate .cpp file for each provider generateEventPipeImplFiles( etwmanifest, eventpipe_directory, exclusionListFile) import argparse import sys def main(argv): # parse the command line parser = argparse.ArgumentParser( description="Generates the Code required to instrument eventpipe logging mechanism") required = parser.add_argument_group('required arguments') required.add_argument('--man', type=str, required=True, help='full path to manifest containig the description of events') required.add_argument('--intermediate', type=str, required=True, help='full path to eventprovider intermediate directory') required.add_argument('--exc', type=str, required=True, help='full path to exclusion list') args, unknown = parser.parse_known_args(argv) if unknown: print('Unknown argument(s): ', ', '.join(unknown)) return const.UnknownArguments sClrEtwAllMan = args.man intermediate = args.intermediate exclusionListFile = args.exc generateEventPipeFiles(sClrEtwAllMan, intermediate, exclusionListFile) if __name__ == '__main__': return_code = main(sys.argv[1:]) sys.exit(return_code)
mit
-4,561,604,318,752,009,700
33.297787
130
0.622492
false
3.947661
false
false
false
wasserfeder/lomap
lomap/algorithms/dijkstra.py
1
12332
#! /usr/bin/python # Copyright (C) 2012-2015, Alphan Ulusoy ([email protected]) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from __future__ import print_function __all__ = ['subset_to_subset_dijkstra_path_value', 'source_to_target_dijkstra', 'dijkstra_to_all'] def subset_to_subset_dijkstra_path_value(source_set, G, target_set, combine_fn='sum', degen_paths=False, weight_key='weight'): """ Compute the shortest path lengths between two sets of nodes in a weighted graph. Adapted from 'single_source_dijkstra_path_length' in NetworkX, available at http://networkx.github.io. Parameters ---------- G: NetworkX graph source_set: Set of node labels Starting nodes for paths target_set: Set of node labels Ending nodes for paths combine_fn: Function, optional (default: (lambda a,b: a+b)) Function used to combine two path values degen_paths: Boolean, optional (default: False) Controls whether degenerate paths (paths that do not traverse any edges) are acceptable. weight_key: String, optional (default: 'weight') Edge data key corresponding to the edge weight. Returns ------- length : dictionary Dictionary of dictionaries of shortest lengths keyed by source and target labels. Notes ----- Edge weight attributes must be numerical. This algorithm is not guaranteed to work if edge weights are negative or are floating point numbers (overflows and roundoff errors can cause problems). Input is assumed to be a MultiDiGraph with singleton edges. """ import heapq all_dist = {} # dictionary of final distances from source_set to target_set if combine_fn == 'sum': # Classical dijkstra for source in source_set: dist = {} # dictionary of final distances from source fringe=[] # use heapq with (distance,label) tuples if degen_paths: # Allow degenerate paths # Add zero length path from source to source seen = {source:0} heapq.heappush(fringe,(0,source)) else: # Don't allow degenerate paths # Add all neighbors of source to start the algorithm seen = dict() for _, w, edgedata in G.edges_iter([source], data=True): vw_dist = edgedata[weight_key] seen[w] = vw_dist heapq.heappush(fringe,(vw_dist,w)) while fringe: (d,v)=heapq.heappop(fringe) if v in dist: continue # Already searched this node. dist[v] = d # Update distance to this node for _, w, edgedata in G.edges_iter([v], data=True): vw_dist = dist[v] + edgedata[weight_key] if w in dist: if vw_dist < dist[w]: raise ValueError('Contradictory paths found:', 'negative weights?') elif w not in seen or vw_dist < seen[w]: seen[w] = vw_dist heapq.heappush(fringe,(vw_dist,w)) # Remove the entries that we are not interested in for key in dist.keys(): if key not in target_set: dist.pop(key) # Add inf cost to target nodes not in dist for t in target_set: if t not in dist.keys(): dist[t] = float('inf') # Save the distance info for this source all_dist[source] = dist elif combine_fn == 'max': # Path length is (max edge length, total edge length) for source in source_set: dist = {} # dictionary of final distances from source fringe=[] # use heapq with (bot_dist,dist,label) tuples if degen_paths: # Allow degenerate paths # Add zero length path from source to source seen = {source:(0,0)} heapq.heappush(fringe,(0,0,source)) else: # Don't allow degenerate paths # Add all neighbors of source to start the algorithm seen = dict() for _, w, edgedata in G.edges_iter([source], data=True): vw_dist = edgedata[weight_key] seen[w] = (vw_dist,vw_dist) heapq.heappush(fringe,(vw_dist,vw_dist,w)) while fringe: (d_bot,d_sum,v)=heapq.heappop(fringe) if v in dist: continue # Already searched this node. dist[v] = (d_bot,d_sum) # Update distance to this node for _, w, edgedata in G.edges_iter([v], data=True): vw_dist_bot = max(dist[v][0],edgedata[weight_key]) vw_dist_sum = dist[v][1] + edgedata[weight_key] if w in dist: if vw_dist_bot < dist[w][0]: raise ValueError('Contradictory paths found:', 'negative weights?') elif w not in seen or vw_dist_bot < seen[w][0] \ or (vw_dist_bot == seen[w][0] \ and vw_dist_sum < seen[w][1]): seen[w] = (vw_dist_bot, vw_dist_sum) heapq.heappush(fringe,(vw_dist_bot,vw_dist_sum,w)) # Remove the entries that we are not interested in for key in dist.keys(): if key not in target_set: dist.pop(key) # Add inf cost to target nodes not in dist for t in target_set: if t not in dist.keys(): dist[t] = (float('inf'),float('inf')) # Save the distance info for this source all_dist[source] = dist else: assert(False) return all_dist def dijkstra_to_all(G, source, degen_paths = False, weight_key='weight'): """ Compute shortest paths and lengths in a weighted graph G. Adapted from 'single_source_dijkstra_path' in NetworkX, available at http://networkx.github.io. Parameters ---------- G : NetworkX graph source : Node label Starting node for the path weight_key: String, optional (default: 'weight') Edge data key corresponding to the edge weight. Returns ------- distance,path : Tuple Returns a tuple distance and path from source to target. Notes --------- Edge weight attributes must be numerical. Based on the Python cookbook recipe (119466) at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466 This algorithm is not guaranteed to work if edge weights are negative or are floating point numbers (overflows and roundoff errors can cause problems). """ import heapq dist = {} # dictionary of final distances fringe=[] # use heapq with (distance,label) tuples if degen_paths: # Allow degenerate paths # Add zero length path from source to source seen = {source:0} heapq.heappush(fringe,(0,source)) paths = {source:[source]} else: # Don't allow degenerate paths # Add all neighbors of source to start the algorithm paths = dict() seen = dict() for _, w, edgedata in G.edges_iter([source], data=True): vw_dist = edgedata[weight_key] paths[w] = [source, w] seen[w] = vw_dist heapq.heappush(fringe,(vw_dist,w)) while fringe: (d,v)=heapq.heappop(fringe) if v in dist: continue # already searched this node. dist[v] = d # Update distance to this node for _, w, edgedata in G.edges_iter([v], data=True): vw_dist = dist[v] + edgedata[weight_key] if w in dist: if vw_dist < dist[w]: raise ValueError('Contradictory paths found:', 'negative weights?') elif w not in seen or vw_dist < seen[w]: seen[w] = vw_dist paths[w] = paths[v]+[w] heapq.heappush(fringe,(vw_dist,w)) return (dist, paths) def source_to_target_dijkstra(G, source, target, combine_fn='sum', degen_paths=False, cutoff=None, weight_key='weight'): """ Compute shortest paths and lengths in a weighted graph G. Adapted from 'single_source_dijkstra_path' in NetworkX, available at http://networkx.github.io. Parameters ---------- G : NetworkX graph source : Node label Starting node for the path target : Node label Ending node for the path degen_paths: Boolean, optional (default: False) Controls whether degenerate paths (paths that do not traverse any edges) are acceptable. cutoff : integer or float, optional (default: None) Depth to stop the search. Only paths of length <= cutoff are returned. weight_key: String, optional (default: 'weight') Edge data key corresponding to the edge weight. Returns ------- distance,path : Tuple Returns a tuple distance and path from source to target. Examples -------- >>> G=networkx.path_graph(5) >>> length,path=source_to_target_dijkstra(G,0,4) >>> print(length) 4 >>> path [0, 1, 2, 3, 4] Notes --------- Edge weight attributes must be numerical. Based on the Python cookbook recipe (119466) at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466 This algorithm is not guaranteed to work if edge weights are negative or are floating point numbers (overflows and roundoff errors can cause problems). """ import heapq dist = {} # dictionary of final distances fringe=[] # use heapq with (distance,label) tuples if combine_fn == 'sum': if degen_paths: # Allow degenerate paths if source==target: # Terminate immediately if source == target return (0, [source]) else: # Add zero length path from source to source paths = {source:[source]} # dictionary of paths seen = {source:0} heapq.heappush(fringe,(0,source)) else: # Don't allow degenerate paths # Add all neighbors of source to start the algorithm paths = dict() seen = dict() for _, w, edgedata in G.edges_iter([source], data=True): vw_dist = edgedata[weight_key] paths[w] = [source, w] seen[w] = vw_dist heapq.heappush(fringe,(vw_dist,w)) while fringe: (d,v)=heapq.heappop(fringe) if v in dist: continue # already searched this node. dist[v] = d # Update distance to this node if v == target: break # Discovered path to target node for _, w, edgedata in G.edges_iter([v], data=True): vw_dist = dist[v] + edgedata[weight_key] if cutoff is not None: if vw_dist>cutoff: continue # Longer than cutoff, ignore this path if w in dist: if vw_dist < dist[w]: raise ValueError('Contradictory paths found:', 'negative weights?') elif w not in seen or vw_dist < seen[w]: seen[w] = vw_dist paths[w] = paths[v]+[w] heapq.heappush(fringe,(vw_dist,w)) # Add inf cost to target if not in dist if target not in dist.keys(): dist[target] = float('inf') paths[target] = [''] return (dist[target],paths[target]) elif combine_fn == 'max': if degen_paths: # Allow degenerate paths if source==target: # Terminate immediately if source == target return (0, [source]) else: # Add zero length path from source to source paths = {source:[source]} # dictionary of paths seen = {source:(0,0)} heapq.heappush(fringe,(0,0,source)) else: # Don't allow degenerate paths # Add all neighbors of source to start the algorithm paths = dict() seen = dict() for _, w, edgedata in G.edges_iter([source], data=True): vw_dist = edgedata[weight_key] paths[w] = [source, w] seen[w] = (vw_dist, vw_dist) heapq.heappush(fringe,(vw_dist,vw_dist,w)) while fringe: (d_bot,d_sum,v)=heapq.heappop(fringe) if v in dist: continue # already searched this node. dist[v] = (d_bot,d_sum) # Update distance to this node if v == target: break # Discovered path to target node for _, w, edgedata in G.edges_iter([v], data=True): vw_dist_bot = max(dist[v][0], edgedata[weight_key]) vw_dist_sum = dist[v][1] + edgedata[weight_key] if cutoff is not None: if vw_dist_bot>cutoff: continue # Longer than cutoff, ignore this path if w in dist: if vw_dist_bot < dist[w][0]: raise ValueError('Contradictory paths found:', 'negative weights?') elif w not in seen or vw_dist_bot < seen[w][0] \ or (vw_dist_bot == seen[w][0] \ and vw_dist_sum < seen[w][1]): seen[w] = (vw_dist_bot, vw_dist_sum) paths[w] = paths[v]+[w] heapq.heappush(fringe,(vw_dist_bot,vw_dist_sum,w)) # Add inf cost to target if not in dist if target not in dist.keys(): dist[target] = (float('inf'),float('inf')) paths[target] = [''] return (dist[target][0],paths[target]) else: assert(False)
gpl-2.0
134,945,866,439,583,540
27.948357
81
0.665504
false
3.112569
false
false
false
jness/MTG-Toolbox
WebMTG/models.py
1
2426
from django.db import models from django.contrib.auth.models import User class MTGSet(models.Model): label = models.CharField(max_length=75, unique=True) display_name = models.CharField(max_length=75) magiccards_info = models.CharField(max_length=10, null=True) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __unicode__(self): return self.display_name class MTGCard(models.Model): magiccard_id = models.CharField(max_length=10) gatherer_id = models.IntegerField() tcgplayer_id = models.IntegerField() card_name = models.CharField(max_length=75) cost = models.CharField(max_length=20, null=True) rarity = models.CharField(max_length=50) type = models.CharField(max_length=50) set = models.ForeignKey(MTGSet) low = models.DecimalField(decimal_places=2, max_digits=10) avg = models.DecimalField(decimal_places=2, max_digits=10) high = models.DecimalField(decimal_places=2, max_digits=10) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __unicode__(self): return self.card_name class MTGPrice(models.Model): card = models.ForeignKey(MTGCard) low = models.DecimalField(decimal_places=2, max_digits=10) avg = models.DecimalField(decimal_places=2, max_digits=10) high = models.DecimalField(decimal_places=2, max_digits=10) created = models.DateTimeField() modified = models.DateTimeField() def __unicode__(self): return self.card.card_name class MTGPriceArchive(models.Model): card = models.ForeignKey(MTGCard) datelabel = models.CharField(max_length=12) avg = models.DecimalField(decimal_places=2, max_digits=10) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.card.card_name class MTGHash(models.Model): card = models.ForeignKey(MTGCard) hash = models.CharField(max_length=30) created = models.DateTimeField(auto_now=True) modified = models.DateTimeField(auto_now=True) def __unicode__(self): return self.card.card_name class UserWatch(models.Model): card = models.ForeignKey(MTGCard) user = models.ForeignKey(User) def __unicode__(self): return self.card.card_name
gpl-2.0
-2,379,355,143,322,243,000
35.208955
64
0.694559
false
3.594074
false
false
false
lucasdavid/grapher
grapher/repositories/graph.py
1
6676
import abc import py2neo from py2neo import Graph, Node, Relationship from . import base from .. import errors, settings class GraphRepository(base.Repository, metaclass=abc.ABCMeta): _g = None connection_string = settings.effective.DATABASES['neo4j'] @property def g(self): self._g = self._g or Graph('http://%s:%s@%s' % ( self.connection_string['username'], self.connection_string['password'], self.connection_string['uri'], )) return self._g def _build(self, identities): """Build entities or relationships based on their identities. :param identities: :list of identities compatible with self.schema[self.schema.Meta.identity]['type']. :return: a list of :nodes or :relationships corresponding to the identities passed, in order. """ raise NotImplementedError def find(self, identities): entities = self._build(identities) try: self.g.pull(*entities) except py2neo.GraphError: raise errors.NotFoundError(('NOT_FOUND', identities)) return self.to_dict_of_dicts(entities) def create(self, entities, raise_errors=False): entities = self.from_dict_of_dicts(entities) entities = self.g.create(*entities) return self.to_dict_of_dicts(entities), {} def update(self, entities, raise_errors=False): entities = self.from_dict_of_dicts(entities) self.g.push(*entities) return self.to_dict_of_dicts(entities), {} def delete(self, identities, raise_errors=False): entities = self._build(identities) entities = self.g.delete(*entities) return self.to_dict_of_dicts(entities), {} class GraphEntityRepository(GraphRepository, base.EntityRepository): def _build(self, identities): return [self.g.node(i) for i in identities] def from_dict_of_dicts(self, entries): nodes = [] for i, entry in entries.items(): # Find if the node exists on the database or is a new node. if self.schema.Meta.identity in entry: # The entry claims to have an identity, # bind the node to a database node. node = self.g.node(entry[self.schema.Meta.identity]) del entry[self.schema.Meta.identity] else: # That's a new entry. Create a new node. node = Node(self.label) node.properties.update(entry) nodes.append(node) return nodes def to_dict_of_dicts(self, entities, indices=None): entries, entities = [], list(entities) for node in entities: e = node.properties e[self.schema.Meta.identity] = node._id entries.append(e) return super().to_dict_of_dicts(entries) def all(self, skip=0, limit=None): if limit is not None: limit += skip nodes = self.g.find(self.label, limit=limit) # Discard :skip elements. for _ in range(skip): next(nodes) return self.to_dict_of_dicts(nodes) def where(self, skip=0, limit=None, **query): if len(query) != 1: raise ValueError('GraphRepository.where does not support ' 'multiple parameter filtering yet.') # TODO: Allow multiple keys when searching. This issue might help: # http://stackoverflow.com/questions/27795874/py2neo-graph-find-one-with-multiple-key-values query_item = query.popitem() if query_item[0] == self.schema.Meta.identity: return self.find((query_item[1],)) if limit is not None: limit += skip nodes = self.g.find(self.label, *query_item, limit=limit) for _ in range(skip): next(nodes) return self.to_dict_of_dicts(nodes) class GraphRelationshipRepository(GraphRepository, base.RelationshipRepository): def _build(self, identities): return [self.g.relationship(i) for i in identities] def from_dict_of_dicts(self, entries): entities, indices = super().from_dict_of_dicts(entries) relationships = [] for r in entities: if self.schema.Meta.identity in r: relationship = self.g.relationship(r) else: origin = self.g.node(r['_origin']) target = self.g.node(r['_target']) relationship = Relationship(origin, self.label.upper(), target) # Delete meta properties, if present. if self.schema.Meta.identity in r: del r[self.schema.Meta.identity] if '_origin' in r: del r['_origin'] if '_target' in r: del r['_target'] relationship.properties.update(r) relationships.append(relationship) return relationships, indices def to_dict_of_dicts(self, entities, indices=None): relationships = [] for r in entities: e = r.properties e[self.schema.Meta.identity] = r._id e['_origin'] = r.start_node._id e['_target'] = r.end_node._id relationships.append(e) return super().to_dict_of_dicts(relationships, indices) def all(self, skip=0, limit=None): """Match all relationships, as long as they share the same label with this repository. :param skip: the number of elements to skip when retrieving. If None, none element should be skipped. :param limit: the maximum length of the list retrieved. If None, returns all elements after :skip. """ return self.match(skip=skip, limit=limit) def match(self, origin=None, target=None, skip=0, limit=None): if origin: origin = self.g.node(origin) if target: target = self.g.node(target) if limit is not None: limit += skip relationships = self.g.match(origin, self.label.upper(), target, limit=limit) for _ in range(skip): next(relationships) return self.to_dict_of_dicts(relationships) def where(self, skip=0, limit=None, **query): if len(query) != 1: raise ValueError('GraphRepository.where does not support' 'multiple parameter filtering yet.') query_item = query.popitem() if query_item[0] == self.schema.Meta.identity: return self.find((query_item[1],)) raise NotImplementedError
mit
-8,926,344,130,431,322,000
30.342723
100
0.587627
false
4.149161
false
false
false
ktnyt/chainer
chainer/training/extensions/variable_statistics_plot.py
1
13261
from __future__ import division import os import warnings import numpy import six import chainer from chainer import backend from chainer.backends import cuda from chainer.training import extension from chainer.training import trigger as trigger_module _available = None def _try_import_matplotlib(): global matplotlib, _available global _plot_color, _plot_color_trans, _plot_common_kwargs try: import matplotlib _available = True except ImportError: _available = False if _available: if hasattr(matplotlib.colors, 'to_rgba'): _to_rgba = matplotlib.colors.to_rgba else: # For matplotlib 1.x _to_rgba = matplotlib.colors.ColorConverter().to_rgba _plot_color = _to_rgba('#1f77b4') # C0 color _plot_color_trans = _plot_color[:3] + (0.2,) # apply alpha _plot_common_kwargs = { 'alpha': 0.2, 'linewidth': 0, 'color': _plot_color_trans} def _check_available(): if _available is None: _try_import_matplotlib() if not _available: warnings.warn('matplotlib is not installed on your environment, ' 'so nothing will be plotted at this time. ' 'Please install matplotlib to plot figures.\n\n' ' $ pip install matplotlib\n') def _unpack_variables(x, memo=None): if memo is None: memo = () if isinstance(x, chainer.Variable): memo += (x,) elif isinstance(x, chainer.Link): memo += tuple(x.params(include_uninit=True)) elif isinstance(x, (list, tuple)): for xi in x: memo += _unpack_variables(xi) return memo class Reservoir(object): """Reservoir sample with a fixed sized buffer.""" def __init__(self, size, data_shape, dtype=numpy.float32): self.size = size self.data = numpy.zeros((size,) + data_shape, dtype=dtype) self.idxs = numpy.zeros((size,), dtype=numpy.int32) self.counter = 0 def add(self, x, idx=None): if self.counter < self.size: self.data[self.counter] = x self.idxs[self.counter] = idx or self.counter elif self.counter >= self.size and \ numpy.random.random() < self.size / float(self.counter + 1): i = numpy.random.randint(self.size) self.data[i] = x self.idxs[i] = idx or self.counter self.counter += 1 def get_data(self): idxs = self.idxs[:min(self.counter, self.size)] sorted_args = numpy.argsort(idxs) return idxs[sorted_args], self.data[sorted_args] class Statistician(object): """Helper to compute basic NumPy-like statistics.""" def __init__(self, collect_mean, collect_std, percentile_sigmas): self.collect_mean = collect_mean self.collect_std = collect_std self.percentile_sigmas = percentile_sigmas def __call__(self, x, axis=0, dtype=None, xp=None): if axis is None: axis = tuple(range(x.ndim)) elif not isinstance(axis, (tuple, list)): axis = axis, return self.collect(x, axis) def collect(self, x, axis): out = dict() if self.collect_mean: out['mean'] = x.mean(axis=axis) if self.collect_std: out['std'] = x.std(axis=axis) if self.percentile_sigmas: xp = backend.get_array_module(x) if xp is numpy: p = numpy.percentile(x, self.percentile_sigmas, axis=axis) else: # TODO(hvy): Use percentile from CuPy once it is supported p = cuda.to_gpu( numpy.percentile( cuda.to_cpu(x), self.percentile_sigmas, axis=axis)) out['percentile'] = p return out class VariableStatisticsPlot(extension.Extension): """Trainer extension to plot statistics for :class:`Variable`\\s. This extension collects statistics for a single :class:`Variable`, a list of :class:`Variable`\\s or similarly a single or a list of :class:`Link`\\s containing one or more :class:`Variable`\\s. In case multiple :class:`Variable`\\s are found, the means are computed. The collected statistics are plotted and saved as an image in the directory specified by the :class:`Trainer`. Statistics include mean, standard deviation and percentiles. This extension uses reservoir sampling to preserve memory, using a fixed size running sample. This means that collected items in the sample are discarded uniformly at random when the number of items becomes larger than the maximum sample size, but each item is expected to occur in the sample with equal probability. Args: targets (:class:`Variable`, :class:`Link` or list of either): Parameters for which statistics are collected. max_sample_size (int): Maximum number of running samples. report_data (bool): If ``True``, data (e.g. weights) statistics are plotted. If ``False``, they are neither computed nor plotted. report_grad (bool): If ``True``, gradient statistics are plotted. If ``False``, they are neither computed nor plotted. plot_mean (bool): If ``True``, means are plotted. If ``False``, they are neither computed nor plotted. plot_std (bool): If ``True``, standard deviations are plotted. If ``False``, they are neither computed nor plotted. percentile_sigmas (float or tuple of floats): Percentiles to plot in the range :math:`[0, 100]`. trigger: Trigger that decides when to save the plots as an image. This is distinct from the trigger of this extension itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`. file_name (str): Name of the output image file under the output directory. figsize (tuple of int): Matlotlib ``figsize`` argument that specifies the size of the output image. marker (str): Matplotlib ``marker`` argument that specified the marker style of the plots. grid (bool): Matplotlib ``grid`` argument that specifies whether grids are rendered in in the plots or not. """ def __init__(self, targets, max_sample_size=1000, report_data=True, report_grad=True, plot_mean=True, plot_std=True, percentile_sigmas=( 0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87, 100), trigger=(1, 'epoch'), file_name='statistics.png', figsize=None, marker=None, grid=True): if file_name is None: raise ValueError('Missing output file name of statstics plot') self._vars = _unpack_variables(targets) if len(self._vars) == 0: raise ValueError( 'Need at least one variables for which to collect statistics.' '\nActual: 0 <= 0') if not any((plot_mean, plot_std, bool(percentile_sigmas))): raise ValueError('Nothing to plot') self._keys = [] if report_data: self._keys.append('data') if report_grad: self._keys.append('grad') self._report_data = report_data self._report_grad = report_grad self._statistician = Statistician( collect_mean=plot_mean, collect_std=plot_std, percentile_sigmas=percentile_sigmas) self._plot_mean = plot_mean self._plot_std = plot_std self._plot_percentile = bool(percentile_sigmas) self._trigger = trigger_module.get_trigger(trigger) self._file_name = file_name self._figsize = figsize self._marker = marker self._grid = grid if not self._plot_percentile: n_percentile = 0 else: if not isinstance(percentile_sigmas, (list, tuple)): n_percentile = 1 # scalar, single percentile else: n_percentile = len(percentile_sigmas) self._data_shape = ( len(self._keys), int(plot_mean) + int(plot_std) + n_percentile) self._samples = Reservoir(max_sample_size, data_shape=self._data_shape) @staticmethod def available(): _check_available() return _available def __call__(self, trainer): if self.available(): # Dynamically import pyplot to call matplotlib.use() # after importing chainer.training.extensions import matplotlib.pyplot as plt else: return xp = backend.get_array_module(self._vars[0].data) stats = xp.zeros(self._data_shape, dtype=xp.float32) for i, k in enumerate(self._keys): xs = [] for var in self._vars: x = getattr(var, k, None) if x is not None: xs.append(x.ravel()) if len(xs) > 0: stat_dict = self._statistician( xp.concatenate(xs, axis=0), axis=0, xp=xp) stat_list = [] if self._plot_mean: stat_list.append(xp.atleast_1d(stat_dict['mean'])) if self._plot_std: stat_list.append(xp.atleast_1d(stat_dict['std'])) if self._plot_percentile: stat_list.append(xp.atleast_1d(stat_dict['percentile'])) stats[i] = xp.concatenate(stat_list, axis=0) if xp != numpy: stats = cuda.to_cpu(stats) self._samples.add(stats, idx=trainer.updater.iteration) if self._trigger(trainer): file_path = os.path.join(trainer.out, self._file_name) self.save_plot_using_module(file_path, plt) def save_plot_using_module(self, file_path, plt): nrows = int(self._plot_mean or self._plot_std) \ + int(self._plot_percentile) ncols = len(self._keys) fig, axes = plt.subplots( nrows, ncols, figsize=self._figsize, sharex=True) if not isinstance(axes, numpy.ndarray): # single subplot axes = numpy.asarray([axes]) if nrows == 1: axes = axes[None, :] elif ncols == 1: axes = axes[:, None] assert axes.ndim == 2 idxs, data = self._samples.get_data() # Offset to access percentile data from `data` offset = int(self._plot_mean) + int(self._plot_std) n_percentile = data.shape[-1] - offset n_percentile_mid_floor = n_percentile // 2 n_percentile_odd = n_percentile % 2 == 1 for col in six.moves.range(ncols): row = 0 ax = axes[row, col] ax.set_title(self._keys[col]) # `data` or `grad` if self._plot_mean or self._plot_std: if self._plot_mean and self._plot_std: ax.errorbar( idxs, data[:, col, 0], data[:, col, 1], color=_plot_color, ecolor=_plot_color_trans, label='mean, std', marker=self._marker) else: if self._plot_mean: label = 'mean' elif self._plot_std: label = 'std' ax.plot( idxs, data[:, col, 0], color=_plot_color, label=label, marker=self._marker) row += 1 if self._plot_percentile: ax = axes[row, col] for i in six.moves.range(n_percentile_mid_floor + 1): if n_percentile_odd and i == n_percentile_mid_floor: # Enters at most once per sub-plot, in case there is # only a single percentile to plot or when this # percentile is the mid percentile and the numner of # percentiles are odd ax.plot( idxs, data[:, col, offset + i], color=_plot_color, label='percentile', marker=self._marker) else: if i == n_percentile_mid_floor: # Last percentiles and the number of all # percentiles are even label = 'percentile' else: label = '_nolegend_' ax.fill_between( idxs, data[:, col, offset + i], data[:, col, -i - 1], label=label, **_plot_common_kwargs) ax.set_xlabel('iteration') for ax in axes.ravel(): ax.legend() if self._grid: ax.grid() ax.set_axisbelow(True) fig.savefig(file_path) plt.close()
mit
-7,535,819,348,694,704,000
35.938719
79
0.544529
false
4.193865
false
false
false
Taywee/texttables
texttables/fixed/_writer.py
1
8641
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright © 2017 Taylor C. Richberger <[email protected]> # This code is released under the license described in the LICENSE file from __future__ import division, absolute_import, print_function, unicode_literals from six.moves import zip from texttables.dialect import Dialect class writer(object): """Fixed-table document writer, writing tables with predefined column-sizes. The :class:`texttables.Dialect` class is used to configure how this writes tables. This works as a context manager, in which case :meth:`writetop` and :meth:`writebottom` will be called automatically.""" def __init__(self, file, widths, dialect=None, **fmtparams): """ :param file: A writable file object with a ``write`` method :param widths: An iterable of widths, containing the field sizes of the table. Each width may be prefixed with <, >, =, or ^, for alignment through the Python format specification. :param dialect: A dialect class or object used to define aspects of the table. The stored dialect is always an instance of :class:`texttables.Dialect`, not necessarily the passed-in object. All the attributes of Dialect are grabbed from this object using getattr. :param fmtparams: parameters to override the parameters in :obj:`dialect`. """ self._file = file self._widths = tuple(widths) self.dialect = dialect for attribute in dir(self.dialect): if '__' not in attribute: if attribute in fmtparams: setattr(self._dialect, attribute, fmtparams[attribute]) self.__wroterow = False self.__wroteheader = False def __enter__(self): if self.dialect.top_border: self.writetop() return self def __exit__(self, type, value, traceback): if self.dialect.bottom_border: self.writebottom() @property def file(self): '''The file object that was passed in to the constructor. It is not safe to change this object until you are finished using the class''' return self._file @property def widths(self): '''The widths that were passed into the constructor, as a tuple.''' return self._widths @property def dialect(self): '''The :class:`texttables.Dialect` constructed from the passed-in dialect. This is always unique, and is not the same object that is passed in. Assigning to this will also likewise construct a new :class:`texttables.Dialect`, not simply assign the attribute.''' return self._dialect @dialect.setter def dialect(self, value): self._dialect = Dialect() if value: for attribute in dir(self._dialect): if '__' not in attribute: setattr(self._dialect, attribute, getattr(value, attribute)) def _row(self, row): dialect = self.dialect contents = list() for cell, rawwidth in zip(row, self._widths): swidth = str(rawwidth) alignment = '<' try: width = int(swidth) except ValueError: alignment = swidth[0] width = int(swidth[1:]) contents.append('{content!s:{alignment}{width}.{width}s}'.format( content=cell, alignment=alignment, width=width)) row = '' if dialect.left_border: row = dialect.left_border row += dialect.cell_delimiter.join(contents) if dialect.right_border: row += dialect.right_border return row def _rowdelim(self, delimiter): dialect = self.dialect delimcontents = list() for rawwidth in self._widths: swidth = str(rawwidth) try: width = int(swidth) except ValueError: width = int(swidth[1:]) delimcontents.append(delimiter * width) delim = '' if dialect.left_border: delim = dialect.corner_border delim += dialect.corner_border.join(delimcontents) if dialect.right_border: delim += dialect.corner_border return delim def writerow(self, row): '''Write a single row out to :meth:`file`, respecting any delimiters and header separators necessary. :param row: An iterable representing the row to write ''' dialect = self.dialect if self.__wroteheader: if dialect.header_delimiter and dialect.corner_border: self._file.write(self._rowdelim(dialect.header_delimiter)) self._file.write(dialect.lineterminator) elif self.__wroterow: if dialect.row_delimiter and dialect.corner_border: self._file.write(self._rowdelim(dialect.row_delimiter)) self._file.write(dialect.lineterminator) self._file.write(self._row(row)) self._file.write(dialect.lineterminator) self.__wroteheader = False self.__wroterow = True def writerows(self, rows): '''Write a multiple rows out to :meth:`file`, respecting any delimiters and header separators necessary. :param rows: An iterable of iterables representing the rows to write ''' for row in rows: self.writerow(row) def writeheader(self, row): '''Write the header out to :meth:`file`. :param row: An iterable representing the row to write as a header ''' self.writerow(row) self.__wroteheader = True def writetop(self): '''Write the top of the table out to :meth:`file`.''' dialect = self.dialect self._file.write(self._rowdelim(dialect.top_border)) self._file.write(dialect.lineterminator) def writebottom(self): '''Write the bottom of the table out to :meth:`file`.''' dialect = self.dialect self._file.write(self._rowdelim(dialect.bottom_border)) self._file.write(dialect.lineterminator) class DictWriter(object): """Fixed-table document writer, writing tables with predefined column-sizes and names through dictionary rows passed in. The :class:`texttables.Dialect` class is used to configure how this writes tables. This is a simple convenience frontend to :class:`texttables.fixed.writer`. This works as a context manager, in which case :meth:`writetop` and :meth:`writebottom` will be called automatically. """ def __init__(self, file, fieldnames, widths, dialect=None, **fmtparams): """ All the passed in construction parameters are passed to the :class:`texttables.fixed.writer` constructor literally. All properties and most methods also align directly as well. """ self._writer = writer(file, widths, dialect, **fmtparams) self._fieldnames = fieldnames def __enter__(self): self._writer.__enter__() return self def __exit__(self, type, value, traceback): self._writer.__exit__(type, value, traceback) return self @property def file(self): return self._writer.file @property def widths(self): return self._writer.widths @property def dialect(self): return self._writer.dialect @dialect.setter def dialect(self, value): self._writer.dialect = value @property def fieldnames(self): return self._fieldnames @fieldnames.setter def fieldnames(self, value): self._fieldnames = value def writeheader(self): '''Write the header based on :meth:`fieldnames`.''' self._writer.writeheader(self._fieldnames) def writerow(self, row): '''Write a single row out to :meth:`file`, respecting any delimiters and header separators necessary. :param row: A dictionary representing the row to write ''' self._writer.writerow(row[field] for field in self._fieldnames) def writerows(self, rows): '''Write multiple rows out to :meth:`file`, respecting any delimiters and header separators necessary. :param row: An iterable of dictionaries representing the rows to write ''' for row in rows: self.writerow(row) def writetop(self): self._writer.writetop() def writebottom(self): self._writer.writebottom()
mit
2,384,864,735,107,385,000
33.422311
86
0.612731
false
4.465116
false
false
false
thorfi/pass-words-py
pass-words.py
1
6672
#!/usr/bin/env python # # The MIT License (MIT) # # Copyright (c) 2014-2021 David Goh <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # # Python script to generate correct horse battery staple passwords on Unix # http://xkcd.com/936/ from random import SystemRandom import getopt import itertools import math import os import os.path import string import sys COPYRIGHT = "Copyright (c) 2014 David Goh <[email protected]>" AUTHOR = "David Goh <[email protected]> - https://goh.id.au/~david/" SOURCE = "GIT: https://github.com/thorfi/pass-words-py" LICENSE = "MIT License - https://github.com/thorfi/pass-words-py/blob/master/LICENSE" DEFAULT_MAX_WORD_LEN = 8 DEFAULT_MIN_WORD_LEN = 4 DEFAULT_WORD_COUNT = 5 DEFAULT_WORD_SEPARATOR = " " WORDS_SUB_PATHS = ( "share/dict/words", "dict/words", "share/words", "words", ) DEFAULT_WORDS_PATHS = set() for p in os.environ["PATH"].split(":"): p = os.path.dirname(p.rstrip(os.path.sep)) for w in WORDS_SUB_PATHS: w_path = os.path.join(p, w) if os.path.isfile(w_path): DEFAULT_WORDS_PATHS.add(w_path) def usage_exit(msg=None): """Exit with a potential error message.""" exitcode = 0 f = sys.stderr if msg else sys.stdout if msg is not None: print("Error:", msg, file=f) exitcode = 1 print("Usage:", sys.argv[0], "[...]", file=f) print( """ Python script that generates correct horse battery staple passwords from Unix dictionaries See https://xkcd.com/936/ -c n: count n words in password (Default: {}) -m N: max length of words to use (Default: {}) -n n: min length of words to use (Default: {}) -s s: word separator to use (Default: {!r}) -p /path/to/words: Add this file to look for words in. If none specified, file(s) used: {} -v: verbose print of more common password entropies for comparison -h: print this help """.format( DEFAULT_WORD_COUNT, DEFAULT_MAX_WORD_LEN, DEFAULT_MIN_WORD_LEN, DEFAULT_WORD_SEPARATOR, ":".join(DEFAULT_WORDS_PATHS), ), file=f, ) sys.exit(exitcode) def main(): words_paths = [] word_count = DEFAULT_WORD_COUNT max_word_len = DEFAULT_MAX_WORD_LEN min_word_len = DEFAULT_MIN_WORD_LEN word_separator = DEFAULT_WORD_SEPARATOR verbose = False try: opts, remainder_args = getopt.getopt( sys.argv[1:], "p:c:m:n:s:vh", [ "path=", "count=", "max=", "min=", "sep=", "verbose", "help", ], ) except getopt.GetoptError as exc: usage_exit(str(exc)) assert False for o, a in opts: if o in ("-c", "--count"): try: word_count = int(a) except ValueError as exc: usage_exit(f"--count={a!r} {str(exc)}") elif o in ("-m", "--max"): try: max_word_len = int(a) except ValueError as exc: usage_exit(f"--max={a!r} {str(exc)}") elif o in ("-n", "--min"): try: min_word_len = int(a) except ValueError as exc: usage_exit(f"--min={a!r} {str(exc)}") elif o in ("-p", "--path"): if not os.path.isfile(a): usage_exit(f"--path={a!r} is not a file") words_paths.append(a) elif o in ("-s", "--sep"): word_separator = a elif o in ("-v", "--verbose"): verbose = True elif o in ("-h", "--help"): usage_exit() else: usage_exit(f"unknown option {o} {a!r}") if max_word_len < min_word_len: usage_exit(f"--max={max_word_len} < --min={min_word_len}") min_word_len = DEFAULT_MIN_WORD_LEN entropies = [] if verbose: desc_texts = ( ("ASCII lowercase letters", string.ascii_lowercase), ("ASCII letters", string.ascii_letters), ("ASCII letters or digits", string.ascii_letters + string.digits), ("ASCII printable non whitespace", "".join(string.printable.split())), ) counts = (8, 10, 16, 20) for (desc, text), n in itertools.product(desc_texts, counts): len_text = len(text) choices = len_text ** n choices_desc = f"{n:2d}*[{len_text:d} {desc}]" entropies.append((choices, choices_desc)) if not words_paths: words_paths = list(DEFAULT_WORDS_PATHS) words = set() for wp in words_paths: with open(wp) as wf: for line in (line.strip().lower() for line in wf): if min_word_len < len(line) < max_word_len: words.add(line) def count_choices(len_w, w_count): if w_count == 1: return len_w assert w_count > 1 return len_w * count_choices(len_w - 1, w_count - 1) len_words = len(words) choices = count_choices(len_words, word_count) choices_desc = ( f"{word_count:2d}*[{len_words:d} words ({min_word_len:d}-{max_word_len:d} letters) from {':'.join(words_paths)}]" ) entropies.append((choices, choices_desc)) if len(entropies) > 1: print("Bit Entropy comparisons") entropies.sort() for n, d in entropies: print(f"{math.log(n, 2):5.1f} bits - {d}") random = SystemRandom() words = random.sample(list(words), word_count) for word in words: print(word) print(word_separator.join(words)) if __name__ == "__main__": main()
mit
7,877,558,534,059,127,000
31.546341
121
0.583633
false
3.537646
false
false
false
kickstandproject/python-ripcordclient
ripcordclient/common/utils.py
1
2525
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC. # Copyright (C) 2013 PolyBeacon, Inc. # # Author: Paul Belanger <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os def env(*vars, **kwargs): """Search for the first defined of possibly many env vars Returns the first environment variable defined in vars, or returns the default defined in kwargs. """ for v in vars: value = os.environ.get(v, None) if value: return value return kwargs.get('default', '') def add_show_list_common_argument(parser): parser.add_argument( '-D', '--show-details', help='show detailed info', action='store_true', default=False, ) parser.add_argument( '--show_details', action='store_true', help=argparse.SUPPRESS) parser.add_argument( '--fields', help=argparse.SUPPRESS, action='append', default=[]) parser.add_argument( '-F', '--field', dest='fields', metavar='FIELD', help='specify the field(s) to be returned by server,' ' can be repeated', action='append', default=[]) def get_item_properties(item, fields, mixed_case_fields=[]): """Return a tuple containing the item properties. :param item: a single item resource (e.g. Server, Tenant, etc) :param fields: tuple of strings with the desired field names :param mixed_case_fields: tuple of field names to preserve case """ row = [] for field in fields: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') if not hasattr(item, field_name) and isinstance(item, dict): data = item[field_name] else: data = getattr(item, field_name, '') if data is None: data = '' row.append(data) return tuple(row)
apache-2.0
-3,201,683,492,522,797,600
29.421687
74
0.629703
false
4.059486
false
false
false
carthagecollege/django-djtools
djtools/context_processors.py
1
1233
from django.conf import settings def sitevars(request): context = {} try: context['static_root'] = settings.MEDIA_ROOT context['media_root'] = settings.MEDIA_ROOT context['media_url'] = settings.MEDIA_URL context['static_url'] = settings.STATIC_URL context['server_url'] = settings.SERVER_URL context['root_url'] = settings.ROOT_URL context['login_url'] = settings.LOGIN_URL context['logout_url'] = settings.LOGOUT_URL context['templates_debug'] = settings.TEMPLATES[0]['OPTIONS']['debug'] context['debug'] = settings.DEBUG # UI helpers for email context['dl_dt'] = ''' style="background:#efefef; color:#000; float:left; font-weight:bold; margin-right:10px; padding:5px; width:200px;" ''' context['dl_dd'] = ''' style="margin:2px 0; padding:5px 0;" ''' context['dl_detail'] = ''' style="margin-bottom:5px;" ''' context['dd_desc'] = ''' style="margin-bottom:7px 0;" ''' context['clear'] = ''' style="clear:both;" ''' except: pass return context
unlicense
4,474,720,833,405,657,600
33.228571
126
0.534469
false
4.055921
false
false
false
anilpai/leetcode
Matrix/MatrixRotate90deg.py
1
1767
# An Inplace function to rotate a N x N matrix by 90 degrees # In both clockwise and counter clockwise direction class Solution(object): def Rotate90Clock(self, mat): N = len(mat) for x in range(int(N/2)): for y in range(x, N-x-1): temp = mat[x][y] ''' Move values from left to top. Move values from bottom to left. Move values from right to bottom. Move values from top to right. ''' mat[x][y] = mat[N-1-y][x] mat[N-1-y][x] = mat[N-1-x][N-1-y] mat[N-1-x][N-1-y] = mat[y][N-1-x] mat[y][N-1-x] = temp return mat def Rotate90CounterClock(self, mat): N = len(mat) for x in range(0, int(N/2)): for y in range(x, N-x-1): temp = mat[x][y] ''' Move values from right to top. Move values from bottom to right. Move values from left to bottom. Move values from left to bottom. ''' mat[x][y] = mat[y][N-1-x] mat[y][N-1-x] = mat[N-1-x][N-1-y] mat[N-1-x][N-1-y] = mat[N-1-y][x] mat[N-1-y][x] = temp return mat def printMatrix(self, mat): # Utility Function print("######") for row in mat: print(row) if __name__=='__main__': s = Solution() matrix = [ [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16] ] print(matrix) s.printMatrix(matrix) s.printMatrix(s.Rotate90Clock(matrix)) s.printMatrix(s.Rotate90CounterClock(matrix))
mit
-1,409,958,039,993,768,700
26.625
60
0.449915
false
3.411197
false
false
false
isra17/nrs
tests/test_nsisfile.py
1
2037
from nrs import nsisfile import pytest import utils import os import sys EMPTY_PATH = os.path.join(utils.SAMPLES_DIR, 'empty') EXAMPLE1_PATH = os.path.join(utils.SAMPLES_DIR, 'example1.exe') EMPTY_PATH = os.path.join(utils.SAMPLES_DIR, 'vopackage.exe') def test_non_nsis(): with pytest.raises(nsisfile.HeaderNotFound): nsis = nsisfile.NSIS.from_path(os.path.join(utils.SAMPLES_DIR, 'empty')) def test_get_version(): with open(EXAMPLE1_PATH, 'rb') as fd: nsis = nsisfile.NSIS(fd) assert nsis.version_major == '3' def test_get_string(): with open(EXAMPLE1_PATH, 'rb') as fd: nsis = nsisfile.NSIS(fd) assert nsis.get_string(0x4e) == 'Example1' assert nsis.get_string(0x4a) == '$__SHELL_16_25__\\Example1' assert nsis.get_string(0x57) == '$INSTALLDIR' assert nsis.get_string(0x87) == '$(LangString2) Setup' def test_get_raw_string(): with open(EXAMPLE1_PATH, 'rb') as fd: nsis = nsisfile.NSIS(fd) assert nsis.get_raw_string(0x4e) == b'Example1' assert nsis.get_raw_string(0x4a) == b'\x02\x10\x19\\Example1' assert nsis.get_raw_string(0x57) == b'\x03\x95\x80' assert nsis.get_raw_string(0x87) == b'\x01\x82\x80 Setup' def test_get_all_strings(): with open(EXAMPLE1_PATH, 'rb') as fd: nsis = nsisfile.NSIS(fd) strings = nsis.get_all_strings() assert 'example1.nsi' in strings assert '$INSTALLDIR' in strings def test_block(): with open(EXAMPLE1_PATH, 'rb') as fd: nsis = nsisfile.NSIS(fd) assert len(nsis.block(nsisfile.NB_PAGES)) == 0xc0 assert len(nsis.block(nsisfile.NB_SECTIONS)) == 0x418 assert len(nsis.block(nsisfile.NB_ENTRIES)) == 0x54 assert len(nsis.block(nsisfile.NB_STRINGS)) == 0x362 assert len(nsis.block(nsisfile.NB_LANGTABLES)) == 0xe6 assert len(nsis.block(nsisfile.NB_CTLCOLORS)) == 0x0 assert len(nsis.block(nsisfile.NB_BGFONT)) == 0x8 assert len(nsis.block(nsisfile.NB_DATA)) == 0x8
gpl-3.0
3,935,376,642,075,163,000
37.433962
80
0.644084
false
2.737903
true
false
false
leiferikb/bitpop
src/tools/telemetry/telemetry/core/webpagereplay.py
1
9250
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Start and stop Web Page Replay. Of the public module names, the following one is key: ReplayServer: a class to start/stop Web Page Replay. """ import logging import os import re import signal import subprocess import sys import time import urllib _CHROME_SRC_DIR = os.path.abspath(os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir)) REPLAY_DIR = os.path.join( _CHROME_SRC_DIR, 'third_party', 'webpagereplay') LOG_PATH = os.path.join( _CHROME_SRC_DIR, 'webpagereplay_logs', 'logs.txt') # Chrome options to make it work with Web Page Replay. def GetChromeFlags(replay_host, http_port, https_port): assert replay_host and http_port and https_port, 'All arguments required' return [ '--host-resolver-rules=MAP * %s,EXCLUDE localhost' % replay_host, '--testing-fixed-http-port=%s' % http_port, '--testing-fixed-https-port=%s' % https_port, '--ignore-certificate-errors', ] # Signal masks on Linux are inherited from parent processes. If anything # invoking us accidentally masks SIGINT (e.g. by putting a process in the # background from a shell script), sending a SIGINT to the child will fail # to terminate it. Running this signal handler before execing should fix that # problem. def ResetInterruptHandler(): signal.signal(signal.SIGINT, signal.SIG_DFL) class ReplayError(Exception): """Catch-all exception for the module.""" pass class ReplayNotFoundError(ReplayError): def __init__(self, label, path): super(ReplayNotFoundError, self).__init__() self.args = (label, path) def __str__(self): label, path = self.args return 'Path does not exist for %s: %s' % (label, path) class ReplayNotStartedError(ReplayError): pass class ReplayServer(object): """Start and Stop Web Page Replay. Web Page Replay is a proxy that can record and "replay" web pages with simulated network characteristics -- without having to edit the pages by hand. With WPR, tests can use "real" web content, and catch performance issues that may result from introducing network delays and bandwidth throttling. Example: with ReplayServer(archive_path): self.NavigateToURL(start_url) self.WaitUntil(...) Environment Variables (for development): WPR_ARCHIVE_PATH: path to alternate archive file (e.g. '/tmp/foo.wpr'). WPR_RECORD: if set, puts Web Page Replay in record mode instead of replay. WPR_REPLAY_DIR: path to alternate Web Page Replay source. """ def __init__(self, archive_path, replay_host, dns_port, http_port, https_port, replay_options=None, replay_dir=None, log_path=None): """Initialize ReplayServer. Args: archive_path: a path to a specific WPR archive (required). replay_host: the hostname to serve traffic. dns_port: an integer port on which to serve DNS traffic. May be zero to let the OS choose an available port. If None DNS forwarding is disabled. http_port: an integer port on which to serve HTTP traffic. May be zero to let the OS choose an available port. https_port: an integer port on which to serve HTTPS traffic. May be zero to let the OS choose an available port. replay_options: an iterable of options strings to forward to replay.py. replay_dir: directory that has replay.py and related modules. log_path: a path to a log file. """ self.archive_path = os.environ.get('WPR_ARCHIVE_PATH', archive_path) self.replay_options = list(replay_options or ()) self.replay_dir = os.environ.get('WPR_REPLAY_DIR', replay_dir or REPLAY_DIR) self.log_path = log_path or LOG_PATH self.dns_port = dns_port self.http_port = http_port self.https_port = https_port self._replay_host = replay_host if 'WPR_RECORD' in os.environ and '--record' not in self.replay_options: self.replay_options.append('--record') self.is_record_mode = '--record' in self.replay_options self._AddDefaultReplayOptions() self.replay_py = os.path.join(self.replay_dir, 'replay.py') if self.is_record_mode: self._CheckPath('archive directory', os.path.dirname(self.archive_path)) elif not os.path.exists(self.archive_path): self._CheckPath('archive file', self.archive_path) self._CheckPath('replay script', self.replay_py) self.replay_process = None def _AddDefaultReplayOptions(self): """Set WPR command-line options. Can be overridden if needed.""" self.replay_options = [ '--host', str(self._replay_host), '--port', str(self.http_port), '--ssl_port', str(self.https_port), '--use_closest_match', '--no-dns_forwarding', '--log_level', 'warning' ] + self.replay_options if self.dns_port is not None: self.replay_options.extend(['--dns_port', str(self.dns_port)]) def _CheckPath(self, label, path): if not os.path.exists(path): raise ReplayNotFoundError(label, path) def _OpenLogFile(self): log_dir = os.path.dirname(self.log_path) if not os.path.exists(log_dir): os.makedirs(log_dir) return open(self.log_path, 'w') def WaitForStart(self, timeout): """Checks to see if the server is up and running.""" port_re = re.compile( '.*?(?P<protocol>[A-Z]+) server started on (?P<host>.*):(?P<port>\d+)') start_time = time.time() elapsed_time = 0 while elapsed_time < timeout: if self.replay_process.poll() is not None: break # The process has exited. # Read the ports from the WPR log. if not self.http_port or not self.https_port or not self.dns_port: with open(self.log_path) as f: for line in f.readlines(): m = port_re.match(line.strip()) if m: if not self.http_port and m.group('protocol') == 'HTTP': self.http_port = int(m.group('port')) elif not self.https_port and m.group('protocol') == 'HTTPS': self.https_port = int(m.group('port')) elif not self.dns_port and m.group('protocol') == 'DNS': self.dns_port = int(m.group('port')) # Try to connect to the WPR ports. if self.http_port and self.https_port: try: up_url = '%s://%s:%s/web-page-replay-generate-200' http_up_url = up_url % ('http', self._replay_host, self.http_port) https_up_url = up_url % ('https', self._replay_host, self.https_port) if (200 == urllib.urlopen(http_up_url, None, {}).getcode() and 200 == urllib.urlopen(https_up_url, None, {}).getcode()): return True except IOError: pass poll_interval = min(max(elapsed_time / 10., .1), 5) time.sleep(poll_interval) elapsed_time = time.time() - start_time return False def StartServer(self): """Start Web Page Replay and verify that it started. Raises: ReplayNotStartedError: if Replay start-up fails. """ cmd_line = [sys.executable, self.replay_py] cmd_line.extend(self.replay_options) cmd_line.append(self.archive_path) logging.debug('Starting Web-Page-Replay: %s', cmd_line) with self._OpenLogFile() as log_fh: kwargs = {'stdout': log_fh, 'stderr': subprocess.STDOUT} if sys.platform.startswith('linux') or sys.platform == 'darwin': kwargs['preexec_fn'] = ResetInterruptHandler self.replay_process = subprocess.Popen(cmd_line, **kwargs) if not self.WaitForStart(30): with open(self.log_path) as f: log = f.read() raise ReplayNotStartedError( 'Web Page Replay failed to start. Log output:\n%s' % log) def StopServer(self): """Stop Web Page Replay.""" if self.replay_process: logging.debug('Trying to stop Web-Page-Replay gracefully') try: url = 'http://localhost:%s/web-page-replay-command-exit' urllib.urlopen(url % self.http_port, None, {}) except IOError: # IOError is possible because the server might exit without response. pass start_time = time.time() while time.time() - start_time < 10: # Timeout after 10 seconds. if self.replay_process.poll() is not None: break time.sleep(1) else: try: # Use a SIGINT so that it can do graceful cleanup. self.replay_process.send_signal(signal.SIGINT) except: # pylint: disable=W0702 # On Windows, we are left with no other option than terminate(). if 'no-dns_forwarding' not in self.replay_options: logging.warning('DNS configuration might not be restored!') try: self.replay_process.terminate() except: # pylint: disable=W0702 pass self.replay_process.wait() def __enter__(self): """Add support for with-statement.""" self.StartServer() return self def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb): """Add support for with-statement.""" self.StopServer()
gpl-3.0
-2,729,612,260,395,174,000
35.132813
80
0.645838
false
3.647476
false
false
false
LibreGameArchive/silvertree
scons/gl.py
1
1408
# vi: syntax=python:et:ts=4 def CheckOpenGL(context, libs = ["gl"]): context.Message("Checking for OpenGL... ") env = context.env backup = env.Clone().Dictionary() if env["PLATFORM"] == "win32": libnames = { "gl" : "opengl32", "glu" : "glu32" } else: libnames = { "gl" : "GL", "glu" : "GLU" } env.AppendUnique(LIBS = map(libnames.get, libs)) test_program = "" for lib in libs: test_program += "#include <GL/%s.h>\n" % lib test_program += "int main()\n{}\n" if context.TryLink(test_program, ".c"): context.Result("yes") return True else: env.Replace(**backup) context.Result("no") return False def CheckGLEW(context): context.Message("Checking for OpenGL Extension Wrangler... ") env = context.env backup = env.Clone().Dictionary() if env["PLATFORM"] == "win32": env.AppendUnique(LIBS = ["glew32", "glu32", "opengl32"]) else: env.AppendUnique(LIBS = ["GLEW", "GLU", "GL"]) test_program = """ #include <GL/glew.h> int main() { glewInit(); } """ if context.TryLink(test_program, ".c"): context.Result("yes") return True else: env.Replace(**backup) context.Result("no") return False def get_checks(): return { "CheckOpenGL" : CheckOpenGL, "CheckGLEW" : CheckGLEW }
gpl-3.0
-9,038,046,539,964,435,000
28.333333
67
0.551136
false
3.425791
true
false
false
brunogamacatao/portalsaladeaula
portal/models.py
1
34418
# -*- coding: utf-8 -*- import logging from operator import attrgetter from django.db import models from django.utils.translation import ugettext as _ from django.core.urlresolvers import reverse from django.contrib.auth.models import User from djangotoolbox.fields import BlobField, SetField from django.template import Context from django.template.loader import get_template from django.core.mail import EmailMultiAlternatives from google.appengine.api import images from portal.fields import AutoSlugField from portal.constants import STATES_CHOICES, REGISTRATION_TYPE_CHOICES, ACCESS_TYPE_CHOICES, MODERATE_REGISTRATION, PUBLIC_ACCESS from portal.utils import split_string, formata_hora from portal.updates.models import Update import datetime from settings import PERIODO_ATUAL class Indexable(models.Model): index = SetField(blank=False) messages_cache = models.TextField(blank=True, null=True) updates_cache = models.TextField(blank=True, null=True) teachers_cache = models.TextField(blank=True, null=True) students_cache = models.TextField(blank=True, null=True) def notify_upload(self, user, uploaded_file): return None def notify_new_student(self, user, student): return None def notify_new_teacher(self, user, teacher): return None def notify_comment(self, user, comment): return None def get_update_list(self): return None class Meta: abstract = True class Picture(models.Model): picture = BlobField(blank=False) filename = models.CharField(blank=False, max_length=200) width = models.IntegerField(blank=True) height = models.IntegerField(blank=True) format = models.CharField(blank=True, max_length=10) parent = models.ForeignKey('self', blank=True, null=True, related_name='thumb_set') ''' The felds below are just for thumbnails. They store the requested width and height values as they can change according to the image's aspect ratio. ''' intended_width = models.IntegerField(blank=True, null=True) intended_height = models.IntegerField(blank=True, null=True) @classmethod def create_thumbnail(cls, parent, width, height): img = images.Image(parent.picture) img.resize(width=width, height=height) img.im_feeling_lucky() thumb = Picture() thumb.picture = img.execute_transforms(output_encoding=images.JPEG) thumb.filename = parent.filename.split('.')[0] + '_thumb.jpg' thumb.parent = parent thumb.intended_width = width thumb.intended_height = height thumb.save() return thumb @classmethod def get_thumbnail(cls, picture, width, height): if picture.thumb_set.filter(intended_width=width, intended_height=height).exists(): return picture.thumb_set.filter(intended_width=width, intended_height=height)[0] return cls.create_thumbnail(picture, width, height) #Here we automatically fill the width, height and format fields of a picture. def fill_picture_fields(sender, instance, **kw): image = images.Image(instance.picture) instance.width = image.width instance.height = image.height instance.format = instance.filename.split('.')[-1] models.signals.pre_save.connect(fill_picture_fields, sender=Picture) class UserInfo(Indexable): name = models.CharField(_('Name'), blank=False, max_length=100) picture = models.ForeignKey(Picture, blank=True, null=True) city = models.CharField(_('City'), blank=False, max_length=100) province = models.CharField(_('State or Province'), blank=False, max_length=2, choices=STATES_CHOICES) email = models.EmailField(_('Email'), blank=False) user = models.ForeignKey(User, unique=True) show_help_text = models.NullBooleanField(blank=True, null=True) is_teacher = models.NullBooleanField(blank=True, null=True) birth_date = models.DateField(blank=True, null=True) schedule_cache = models.TextField(blank=True, null=True) def get_absolute_url(self): return reverse('portal.accounts.views.user_info', args=[self.user.id],) def get_disciplines_studies(self): student_role = UserDisciplineRole.objects.student_role() queryset = self.reluserdiscipline_set.filter(role=student_role, user=self, period=PERIODO_ATUAL) if queryset.exists(): disciplines = [] for rel in queryset.all(): disciplines.append(rel.discipline) return disciplines return [] def get_disciplines_teaches(self): teacher_role = UserDisciplineRole.objects.teacher_role() queryset = self.reluserdiscipline_set.filter(role=teacher_role, user=self, period=PERIODO_ATUAL) if queryset.exists(): disciplines = [] for rel in queryset.all(): disciplines.append(rel.discipline) return disciplines return [] def get_courses_studies(self): student_role = UserCourseRole.objects.student_role() queryset = self.relusercourse_set.filter(role=student_role, user=self, period=PERIODO_ATUAL) if queryset.exists(): courses = [] for rel in queryset.all(): courses.append(rel.course) return courses return [] def get_courses_teaches(self): teacher_role = UserCourseRole.objects.teacher_role() queryset = self.relusercourse_set.filter(role=teacher_role, user=self, period=PERIODO_ATUAL) if queryset.exists(): courses = [] for rel in queryset.all(): courses.append(rel.course) return courses return [] def get_institutions_studies(self): student_role = UserInstitutionRole.objects.student_role() queryset = self.reluserinstitution_set.filter(role=student_role, user=self, period=PERIODO_ATUAL) if queryset.exists(): institutions = [] for rel in queryset.all(): institutions.append(rel.institution) return institutions return [] def get_institutions_teaches(self): teacher_role = UserInstitutionRole.objects.teacher_role() queryset = self.reluserinstitution_set.filter(role=teacher_role, user=self, period=PERIODO_ATUAL) if queryset.exists(): institutions = [] for rel in queryset.all(): institutions.append(rel.institution) return institutions return [] def get_update_list(self): updates = [] if self.is_teacher: for discipline in self.get_disciplines_teaches(): for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]: updates.append(update) for course in self.get_courses_teaches(): for update in Update.objects.for_model(course).order_by('-date_published')[:5]: updates.append(update) for institution in self.get_institutions_teaches(): for update in Update.objects.for_model(institution).order_by('-date_published')[:5]: updates.append(update) else: for discipline in self.get_disciplines_studies(): for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]: updates.append(update) for course in self.get_courses_studies(): for update in Update.objects.for_model(course).order_by('-date_published')[:5]: updates.append(update) for institution in self.get_institutions_studies(): for update in Update.objects.for_model(institution).order_by('-date_published')[:5]: updates.append(update) return sorted(updates, key=attrgetter('date_published'), reverse=True)[:5] class Meta: verbose_name = _('User Information') verbose_name_plural = _('User Information') def __unicode__(self): return self.name def fill_user_index(sender, instance, **kw): index = [] if instance.name: index += split_string(instance.name) instance.index = index models.signals.pre_save.connect(fill_user_index, sender=UserInfo) class PreInscricao(models.Model): matricula = models.CharField(blank=False, max_length=50) nome = models.CharField(blank=False, max_length=100) cpf = models.CharField(blank=False, max_length=50) sexo = models.CharField(blank=False, max_length=2) email = models.EmailField(blank=True, max_length=200) data_nasc = models.DateField(blank=True) rua = models.CharField(blank=False, max_length=200) numero = models.CharField(blank=False, max_length=10) bairro = models.CharField(blank=False, max_length=100) cidade = models.CharField(blank=False, max_length=100) estado = models.CharField(blank=False, max_length=2) senha = models.CharField(blank=False, max_length=50) disciplinas = SetField(blank=True, null=True) user_info = models.ForeignKey(UserInfo, blank=True, null=True) class Address(models.Model): address = models.CharField(_('Address'), blank=False, max_length=200) number = models.CharField(_('Number'), blank=False, max_length=10) neighborhood = models.CharField(_('Neighborhood'), blank=False, max_length=100) city = models.CharField(_('City'), blank=False, max_length=100) province = models.CharField(_('State or Province'), blank=False, max_length=2, choices=STATES_CHOICES) def get_index(self): index = [] if self.address: index += split_string(self.address) if self.neighborhood: index += split_string(self.neighborhood) if self.city: index += split_string(self.city) return set(index) class Meta: verbose_name = _('Address') verbose_name_plural = _('Addresses') class Institution(Indexable): name = models.CharField(_('Name'), blank=False, max_length=100) slug = AutoSlugField(prepopulate_from=('acronym',), unique=True, blank=True, max_length=100) acronym = models.CharField(_('Acronym'), blank=True, null=True, max_length=100) picture = models.ForeignKey(Picture, blank=True, null=True) address = models.ForeignKey(Address, blank=True, null=True) description = models.TextField(_('Description')) homepage = models.URLField(_('Homepage'), blank=True, null=True) feed_url = models.CharField(_('News Feed URL'), blank=True, null=True, max_length=512) twitter_id = models.CharField(_('Twitter ID'), blank=True, null=True, max_length=100) ''' ATENÇÃO !!! Pelo jeito, Django-nonrel não suporta campos ManyToMany ''' ''' Hora do brainstorm: Que outros campos devem vir aqui ? descrição ? endereço ? telefones ? página na internet ? Outras informações, tais como personalização, feeds e instituições parceiras, deverão ser adicionadas como módulos extra. Módulos extra: 1. Personalização: a) Logomarca; b) Imagens de fundo (topo, meio, rodapé); c) Esquema de cores; 2. Feeds (rss/atom/twitter) - Caso tenha um twitter, seria legal colocar um siga-nos; 3. Instituições parceiras (do próprio portal); 4. Links (com ou sem imagem) 5. Vestibular 6. Eventos 7. Biblioteca (integrar com o Pergamum) 8. IM (Google Talk Like) 9. Álbum de fotos 10. Arquivos (área de upload/download) 11. Comentários ''' class Meta: verbose_name = _('Institution') verbose_name_plural = _('Institutions') def get_students(self): student_role = UserInstitutionRole.objects.student_role() queryset = self.reluserinstitution_set.filter(role=student_role, institution=self) if queryset.exists(): students = [] for rel in queryset.all(): try: students.append(rel.user) except: logging.error("[get_students] Not able to find an user for RelUserInstitution %s" % rel.id) return students return None def get_student_count(self): student_role = UserInstitutionRole.objects.student_role() return self.reluserinstitution_set.filter(role=student_role, institution=self).count() def get_teachers(self): teacher_role = UserInstitutionRole.objects.teacher_role() queryset = self.reluserinstitution_set.filter(role=teacher_role, institution=self) if queryset.exists(): teachers = [] for rel in queryset.all(): try: teachers.append(rel.user) except: logging.error("[get_teachers] Not able to find an user for RelUserInstitution %s" % rel.id) return teachers return None def get_update_list(self): updates = [] for update in Update.objects.for_model(self).order_by('-date_published')[:5]: updates.append(update) for course in self.course_set.all(): for update in Update.objects.for_model(course).order_by('-date_published')[:5]: updates.append(update) for discipline in course.discipline_set.all(): for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]: updates.append(update) return sorted(updates, key=attrgetter('date_published'), reverse=True)[:5] def get_class_name(self): return 'portal.models.Institution' def get_absolute_url(self): return reverse('portal.institutions.views.detail', args=[self.slug,]) def __unicode__(self): return self.name #If the acronym field is not filled, it will receive the value from name field. def fill_acronym(sender, instance, **kw): if not instance.acronym or instance.acronym == '': instance.acronym = instance.name models.signals.pre_save.connect(fill_acronym, sender=Institution) def fill_institution_index(sender, instance, **kw): index = [] if instance.name: index += split_string(instance.name) if instance.acronym: index += split_string(instance.acronym) if instance.description: index += split_string(instance.description) if instance.address: index += instance.address.get_index() instance.index = index models.signals.pre_save.connect(fill_institution_index, sender=Institution) class InstitutionUpdateCache(models.Model): text = models.CharField(blank=False, max_length=100) link = models.CharField(blank=False, max_length=512) date_published = models.DateTimeField(default=datetime.datetime.now) author = models.ForeignKey(User, blank=False) institution = models.ForeignKey(Institution, blank=True, null=True) class RelInstitutionOwner(models.Model): owner = models.ForeignKey(UserInfo, blank=True, null=True, related_name='owner_set') institution = models.ForeignKey(Institution, blank=True, null=True, related_name='owner_set') class PhoneNumber(models.Model): region_code = models.CharField(_('Region Code'), blank=False, max_length=5) telephone = models.CharField(_('Telephone'), blank=False, max_length=20) description = models.CharField(_('Description'), blank=False, max_length=50) institution = models.ForeignKey(Institution, blank=False) class Course(Indexable): name = models.CharField(_('Name'), blank=False, max_length=100) slug = AutoSlugField(prepopulate_from=('acronym',), parent_name='institution', unique=True, blank=True, max_length=100) acronym = models.CharField(_('Acronym'), blank=True, null=True, max_length=100) picture = models.ForeignKey(Picture, blank=True, null=True) description = models.TextField(_('Description')) feed_url = models.CharField(_('News Feed URL'), blank=True, null=True, max_length=512) twitter_id = models.CharField(_('Twitter ID'), blank=True, null=True, max_length=100) institution = models.ForeignKey(Institution, blank=False) def get_students(self): student_role = UserCourseRole.objects.student_role() queryset = self.relusercourse_set.filter(role=student_role, course=self) if queryset.exists(): students = [] for rel in queryset.all(): try: students.append(rel.user) except: logging.error("[get_students] Not able to find an user for RelUserInstitution %s" % rel.id) return students return None def get_teachers(self): teacher_role = UserCourseRole.objects.teacher_role() queryset = self.relusercourse_set.filter(role=teacher_role, course=self) if queryset.exists(): teachers = [] for rel in queryset.all(): try: teachers.append(rel.user) except: logging.error("[get_teachers] Not able to find an user for RelUserInstitution %s" % rel.id) return teachers return None class Meta: verbose_name = _('Course') verbose_name_plural = _('Courses') def get_class_name(self): return 'portal.models.Course' def get_absolute_url(self): return reverse('portal.courses.views.detail', args=[self.institution.slug, self.slug,]) def get_update_list(self): updates = [] for update in Update.objects.for_model(self).order_by('-date_published')[:5]: updates.append(update) for discipline in self.discipline_set.all(): for update in Update.objects.for_model(discipline).order_by('-date_published')[:5]: updates.append(update) return sorted(updates, key=attrgetter('date_published'), reverse=True)[:5] def __unicode__(self): return self.name models.signals.pre_save.connect(fill_acronym, sender=Course) def fill_course_index(sender, instance, **kw): index = [] if instance.name: index += split_string(instance.name) if instance.acronym: index += split_string(instance.acronym) if instance.description: index += split_string(instance.description) if instance.institution: index += instance.institution.index instance.index = index models.signals.pre_save.connect(fill_course_index, sender=Course) class RelCourseOwner(models.Model): owner = models.ForeignKey(UserInfo, blank=True, null=True, related_name='course_owner_set') course = models.ForeignKey(Course, blank=True, null=True, related_name='course_owner_set') class Discipline(Indexable): name = models.CharField(_('Name'), blank=False, max_length=100) slug = AutoSlugField(prepopulate_from=('acronym',), parent_name='course', unique=True, blank=True, max_length=100) acronym = models.CharField(_('Acronym'), blank=True, null=True, max_length=100) picture = models.ForeignKey(Picture, blank=True, null=True) description = models.TextField(_('Description')) feed_url = models.CharField(_('News Feed URL'), blank=True, null=True, max_length=512) twitter_id = models.CharField(_('Twitter ID'), blank=True, null=True, max_length=100) course = models.ForeignKey(Course, blank=False) registration_type = models.IntegerField(_('Registration type'), blank=False, default=MODERATE_REGISTRATION, choices=REGISTRATION_TYPE_CHOICES) access_type = models.IntegerField(_('Access type'), blank=False, default=PUBLIC_ACCESS, choices=ACCESS_TYPE_CHOICES) period = models.CharField(_('Period'), blank=True, null=True, max_length=5) class Meta: verbose_name = _('Discipline') verbose_name_plural = _('Disciplines') def get_students(self): student_role = UserDisciplineRole.objects.student_role() queryset = self.reluserdiscipline_set.filter(role=student_role, discipline=self) if queryset.exists(): students = [] for rel in queryset.all(): try: students.append(rel.user) except: logging.error("[get_students] Not able to find an user for RelUserInstitution %s" % rel.id) return students return None def get_teachers(self): teacher_role = UserDisciplineRole.objects.teacher_role() queryset = self.reluserdiscipline_set.filter(role=teacher_role, discipline=self) if queryset.exists(): teachers = [] for rel in queryset.all(): try: teachers.append(rel.user) except: logging.error("[get_teachers] Not able to find an user for RelUserInstitution %s" % rel.id) return teachers return None def get_horario(self): not_empty = lambda x: (x and len(x.lstrip()) > 0 and x != 'null') or False if self.disciplinemetadata_set.exists(): m_data = self.disciplinemetadata_set.all()[0] horario = u'' if not_empty(m_data.segunda): horario += u'Segunda-feira ' + unicode(formata_hora(m_data.segunda), 'utf-8') if not_empty(m_data.terca): horario += u'\nTerça-feira ' + unicode(formata_hora(m_data.terca), 'utf-8') if not_empty(m_data.quarta): horario += u'\nQuarta-feira ' + unicode(formata_hora(m_data.quarta), 'utf-8') if not_empty(m_data.quinta): horario += u'\nQuinta-feira ' + unicode(formata_hora(m_data.quinta), 'utf-8') if not_empty(m_data.sexta): horario += u'\nSexta-feira ' + unicode(formata_hora(m_data.sexta), 'utf-8') if not_empty(m_data.sabado): horario += u'\nSábado ' + unicode(formata_hora(m_data.sabado), 'utf-8') return horario return None def get_sala(self): if self.disciplinemetadata_set.exists(): return self.disciplinemetadata_set.all()[0].sala return None def get_class_name(self): return 'portal.models.Discipline' def get_absolute_url(self): return reverse('portal.disciplines.views.detail', args=[self.course.institution.slug, self.course.slug, self.slug,]) def get_update_list(self): return Update.objects.for_model(self).order_by('-date_published')[:5] def notify_upload(self, user, uploaded_file): text = u'%s postou um novo material didático <a href="%s">%s</a>' % (user.get_profile().name, self.get_absolute_url(), uploaded_file.description, ) link = self.get_absolute_url() update = Update.createUpdate(user, text, link, self) ctx = { 'mensagem': update.text, 'link': 'http://www.portalsaladeaula.com%s' % update.link, } subject = 'Novo Material Didático' from_email = 'Portal Sala de Aula <[email protected]>' text_content = get_template('emails/update.txt').render(Context(ctx)) html_content = get_template('emails/update.html').render(Context(ctx)) if self.get_students(): for student in self.get_students(): msg = EmailMultiAlternatives(subject, text_content, from_email, [student.email,]) msg.attach_alternative(html_content, "text/html") try: msg.send() except: logging.error('Não foi possível enviar o email') if self.get_teachers(): for teacher in self.get_teachers(): if teacher != uploaded_file.user: msg = EmailMultiAlternatives(subject, text_content, from_email, [teacher.email,]) msg.attach_alternative(html_content, "text/html") try: msg.send() except: logging.error('Não foi possível enviar o email') def __unicode__(self): return self.name models.signals.pre_save.connect(fill_acronym, sender=Discipline) def fill_discipline_index(sender, instance, **kw): index = [] if instance.name: index += split_string(instance.name) if instance.acronym: index += split_string(instance.acronym) if instance.description: index += split_string(instance.description) if instance.course: index += instance.course.index instance.index = index models.signals.pre_save.connect(fill_discipline_index, sender=Discipline) class DisciplineMetadata(models.Model): cod_turma = models.CharField(blank=False, max_length=50) periodo = models.CharField(blank=False, max_length=50) senha = models.CharField(blank=False, max_length=50) discipline = models.ForeignKey(Discipline, blank=True, null=True) segunda = models.CharField(blank=True, null=True, max_length=5) terca = models.CharField(blank=True, null=True, max_length=5) quarta = models.CharField(blank=True, null=True, max_length=5) quinta = models.CharField(blank=True, null=True, max_length=5) sexta = models.CharField(blank=True, null=True, max_length=5) sabado = models.CharField(blank=True, null=True, max_length=5) sala = models.CharField(blank=True, null=True, max_length=5) class RelDisciplineOwner(models.Model): owner = models.ForeignKey(UserInfo, blank=True, null=True, related_name='discipline_owner_set') discipline = models.ForeignKey(Discipline, blank=True, null=True, related_name='discipline_owner_set') #To speedup the system, these roles will be queried just once INSTITUTION_STUDENT_ROLE = None INSTITUTION_TEACHER_ROLE = None INSTITUTION_COORDINATOR_ROLE = None INSTITUTION_MANAGER_ROLE = None class ManagerUserInstitutionRole(models.Manager): def student_role(self): global INSTITUTION_STUDENT_ROLE if INSTITUTION_STUDENT_ROLE: return INSTITUTION_STUDENT_ROLE queryset = self.filter(slug='student') if queryset.exists(): INSTITUTION_STUDENT_ROLE = queryset.all()[0] return INSTITUTION_STUDENT_ROLE INSTITUTION_STUDENT_ROLE = UserInstitutionRole(name='Student', slug='student') INSTITUTION_STUDENT_ROLE.save() return INSTITUTION_STUDENT_ROLE def teacher_role(self): global INSTITUTION_TEACHER_ROLE if INSTITUTION_TEACHER_ROLE: return INSTITUTION_TEACHER_ROLE queryset = self.filter(slug='teacher') if queryset.exists(): INSTITUTION_TEACHER_ROLE = queryset.all()[0] return INSTITUTION_TEACHER_ROLE INSTITUTION_TEACHER_ROLE = UserInstitutionRole(name='Teacher', slug='teacher') INSTITUTION_TEACHER_ROLE.save() return INSTITUTION_TEACHER_ROLE def coordinator_role(self): global INSTITUTION_COORDINATOR_ROLE if INSTITUTION_COORDINATOR_ROLE: return INSTITUTION_COORDINATOR_ROLE queryset = self.filter(slug='coordinator') if queryset.exists(): INSTITUTION_COORDINATOR_ROLE = queryset.all()[0] return INSTITUTION_COORDINATOR_ROLE INSTITUTION_COORDINATOR_ROLE = UserInstitutionRole(name='Coordinator', slug='coordinator') INSTITUTION_COORDINATOR_ROLE.save() return INSTITUTION_COORDINATOR_ROLE def manager_role(self): global INSTITUTION_MANAGER_ROLE if INSTITUTION_MANAGER_ROLE: return INSTITUTION_MANAGER_ROLE queryset = self.filter(slug='manager') if queryset.exists(): INSTITUTION_MANAGER_ROLE = queryset.all()[0] return INSTITUTION_MANAGER_ROLE INSTITUTION_MANAGER_ROLE = UserInstitutionRole(name='Manager', slug='manager') INSTITUTION_MANAGER_ROLE.save() return INSTITUTION_MANAGER_ROLE class UserInstitutionRole(models.Model): name = models.CharField(_('Role Name'), blank=False, max_length=100) slug = AutoSlugField(prepopulate_from=('name',), unique=True, blank=True, max_length=100) objects = ManagerUserInstitutionRole() class Meta: verbose_name = _('Role for User/Institution Relationship') verbose_name_plural = _('Roles for User/Institution Relationship') def __unicode__(self): return self.name class RelUserInstitution(models.Model): user = models.ForeignKey(UserInfo, blank=False) institution = models.ForeignKey(Institution, blank=False) role = models.ForeignKey(UserInstitutionRole, blank=False) period = models.CharField(_('Period'), blank=True, null=True, max_length=5) #To speedup the system, these roles will be queried just once COURSE_STUDENT_ROLE = None COURSE_TEACHER_ROLE = None COURSE_COORDINATOR_ROLE = None COURSE_SECRETARY_ROLE = None class ManagerUserCourseRole(models.Manager): def student_role(self): global COURSE_STUDENT_ROLE if COURSE_STUDENT_ROLE: return COURSE_STUDENT_ROLE queryset = self.filter(slug='student') if queryset.exists(): COURSE_STUDENT_ROLE = queryset.all()[0] return COURSE_STUDENT_ROLE COURSE_STUDENT_ROLE = UserCourseRole(name='Student', slug='student') COURSE_STUDENT_ROLE.save() return COURSE_STUDENT_ROLE def teacher_role(self): global COURSE_TEACHER_ROLE if COURSE_TEACHER_ROLE: return COURSE_TEACHER_ROLE queryset = self.filter(slug='teacher') if queryset.exists(): COURSE_TEACHER_ROLE = queryset.all()[0] return COURSE_TEACHER_ROLE COURSE_TEACHER_ROLE = UserCourseRole(name='Teacher', slug='teacher') COURSE_TEACHER_ROLE.save() return COURSE_TEACHER_ROLE def coordinator_role(self): global COURSE_COORDINATOR_ROLE if COURSE_COORDINATOR_ROLE: return COURSE_COORDINATOR_ROLE queryset = self.filter(slug='coordinator') if queryset.exists(): COURSE_COORDINATOR_ROLE = queryset.all()[0] return COURSE_COORDINATOR_ROLE COURSE_COORDINATOR_ROLE = UserCourseRole(name='Coordinator', slug='coordinator') COURSE_COORDINATOR_ROLE.save() return COURSE_COORDINATOR_ROLE def secretary_role(self): global COURSE_SECRETARY_ROLE if COURSE_SECRETARY_ROLE: return COURSE_SECRETARY_ROLE queryset = self.filter(slug='secretary') if queryset.exists(): COURSE_SECRETARY_ROLE = queryset.all()[0] return COURSE_SECRETARY_ROLE COURSE_SECRETARY_ROLE = UserCourseRole(name='Secretary', slug='secretary') COURSE_SECRETARY_ROLE.save() return COURSE_SECRETARY_ROLE class UserCourseRole(models.Model): name = models.CharField(_('Role Name'), blank=False, max_length=100) slug = AutoSlugField(prepopulate_from=('name',), unique=True, blank=True, max_length=100) objects = ManagerUserCourseRole() class Meta: verbose_name = _('Role for User/Course Relationship') verbose_name_plural = _('Roles for User/Course Relationship') def __unicode__(self): return self.name class RelUserCourse(models.Model): user = models.ForeignKey(UserInfo, blank=False) course = models.ForeignKey(Course, blank=False) role = models.ForeignKey(UserCourseRole, blank=False) period = models.CharField(_('Period'), blank=True, null=True, max_length=5) #To speedup the system, these roles will be queried just once DISCIPLINE_STUDENT_ROLE = None DISCIPLINE_TEACHER_ROLE = None class ManagerUserDisciplineRole(models.Manager): def student_role(self): global DISCIPLINE_STUDENT_ROLE if DISCIPLINE_STUDENT_ROLE: return DISCIPLINE_STUDENT_ROLE queryset = self.filter(slug='student') if queryset.exists(): DISCIPLINE_STUDENT_ROLE = queryset.all()[0] return DISCIPLINE_STUDENT_ROLE DISCIPLINE_STUDENT_ROLE = UserDisciplineRole(name='Student', slug='student') DISCIPLINE_STUDENT_ROLE.save() return DISCIPLINE_STUDENT_ROLE def teacher_role(self): global DISCIPLINE_TEACHER_ROLE if DISCIPLINE_TEACHER_ROLE: return DISCIPLINE_TEACHER_ROLE queryset = self.filter(slug='teacher') if queryset.exists(): DISCIPLINE_TEACHER_ROLE = queryset.all()[0] return DISCIPLINE_TEACHER_ROLE DISCIPLINE_TEACHER_ROLE = UserDisciplineRole(name='Teacher', slug='teacher') DISCIPLINE_TEACHER_ROLE.save() return DISCIPLINE_TEACHER_ROLE class UserDisciplineRole(models.Model): name = models.CharField(_('Role Name'), blank=False, max_length=100) slug = AutoSlugField(prepopulate_from=('name',), unique=True, blank=True, max_length=100) objects = ManagerUserDisciplineRole() class Meta: verbose_name = _('Role for User/Discipline Relationship') verbose_name_plural = _('Roles for User/Discipline Relationship') def __unicode__(self): return self.name class RelUserDiscipline(models.Model): user = models.ForeignKey(UserInfo, blank=False) discipline = models.ForeignKey(Discipline, blank=False) role = models.ForeignKey(UserDisciplineRole, blank=False) period = models.CharField(_('Period'), blank=True, null=True, max_length=5) def invalidate_reluserdiscipline_cache(sender, instance, **kw): if instance.user: instance.user.schedule_cache = None instance.user.save() models.signals.pre_save.connect(invalidate_reluserdiscipline_cache, sender=RelUserDiscipline)
bsd-3-clause
-775,253,353,824,724,200
40.678788
155
0.640686
false
3.722126
false
false
false
j-i-l/cryptsypy
cryptsypy/CryptsyAccount.py
1
7173
from pyapi import Request,RequestPrivate #this is going to be it #from pyapi import AccountStructure from CryptsyInfo import Info import time # <codecell> #pur account into pyapi and inherit the specific platform account #from the general class. class Account(): #class Account(AccountStructure): #it does not make much sense to have the info in a class... def __init__(self, PlatformInfo = Info(), public_key = '', private_key = '',): """ This class is designed to hold all information specific to a user account on cryptsy.com. Be carefull the secret (priv_key) """ #AccountStructure.__init__(self, # PlatfromInfo = PlatformInfo, # public_key = public_key, # private_key = private_key, # ) # self._init_Requests(PlatformInfo = PlatformInfo) self.marketid = {} self.Pairs = {} self._init_mid_pairs() self.CryptoAdresses = {} self.CryptoAdresses['LTC'] = 'LMGgCFsxJBjkPwAW9bn5MnZG4vyTGv1aJr' # self.pub_key = public_key # self.priv_key = private_key #self.Request = private_request(Account = self) self.MyTrades = {} self.MyOrders = {} self.MyTransactions = {} self.TradeHisory = {} self.Depths = {} ##Those have to adapted to the specific platform self.command_account_info = 'getinfo' self.command_market_info = 'getmarkets' self.command_trades_history = '' self.command_open_orders = '' #not used self.command_my_transactions = '' self.command_my_trades = '' self.command_my_orders = 'allmyorders' self.command_new_order = 'createorder' self.command_cancel_order = '' self.command_cancel_all_orders = '' self.parameter_ordertype = 'ordertype' self.parameter_market = 'marketid' self.parameter_quantity = 'quantity' self.parameter_price = 'price' self.parameter_order_id = '' self.parameter_market_id = '' return None # def _init_Requests(self, PlatformInfo): self.Request = RequestPrivate(Account = self, Info = PlatformInfo) self.pubRequest = Request(Info = PlatformInfo) return 0 def _init_mid_pairs(self,): md = self.pubRequest.fetch('marketdatav2')['markets'] for p in md.keys(): pair = tuple(p.split('/')) mid = md[p]['marketid'] self.Pairs[mid] = pair self.marketid[pair] = mid self.OpenOrders[pair] = md[p]['buyorders'] del md return 0 # def update_Info(self,): return self.Request.fetch('getinfo') # def update_MarketInfo(self,): return self.Request.fetch('getmarkets') def update_MyTransactions(self,): m_trans = self.Request.fetch('mytransactions') for trans in m_trans: self.MyTransactions[trans.pop('timestamp')] = trans return 0 def update_TradeHistory(self, market): """market is a tuple""" ##self.marketid is to do!!! mid = self.marketid(market) history = self.Request.fetch('markettrades',params={'marketid':mid}) pair = self.Pairs[mid] self.TradeHistory[pair] = history return 0 def update_OpenOrders(self, market): """market is a tuple""" mid = self.marketid(market) o_orders = self.Request.fetch('marketorders',params={'marketid':mid}) ##check the form of o_orders print o_orders #self.OpenOrders[self.Pairs[mid]] = return 0 def update_MyTrades(self, market = None, limit = 200): if market: mid = self.marketid[market] pair = self.Pairs[mid] method = 'mytrades' params = {'marketid':mid, 'limit':limit} else: method = 'allmytrades' params = {} m_trades = self.Request.fetch(method,params = params) #check format of m_trades print m_trades #self.MyTrades[pair] = m_trades return 0 def update_MyOrders(self, market = None): if market: mid = self.marketid[market] pair = self.Pairs[mid] method = 'myorders' params = {'marketid':mid} else: method = 'allmyorders' params = {} m_orders = self.Request.fetch(method, params = params) ##check the format. #self.MyOrders[pair] = ... print m_orders return 0 def update_Depths(self, market): #what is this again? mid = self.marketid[market] pair = self.Pairs[mid] depths = self.Request.fetch('depth',params={'marketid':mid}) ##check format #self.Dephts[pair] = ... return 0 # def CreateOrder(self, market, order_type, quantity, price): mid = self.marketid[market] pair = self.Pairs[mid] params = { 'marketid':mid, 'ordertype':order_type, 'quantity':quantity, 'price':price } ##check if funds are sufficient, if minimal value is exceded, etc if self._order_possible(params): now = time.time() oid = self.Request.fetch('createorder',params = params) self.MyOpenOrders[oid] = params self.MyOpenOrders[oid][u'timestamp'] = now return 0 def _order_possible(self, params): ##to do #if ok # return True #else: # return False return True def CancelOrder(self, **orders): if 'orderid' in orders: c_o = self.Request.fetch('cancelorder',params={'orderid':orders['orderid']}) print c_o #if successfull: # if orderid in self.MyOpenOrders: # self.MyOpenOrders.pop(orderid) if 'marketid' in orders: mid = orders['marketid'] c_o = self.Request.fetch('cancelmarketorders',params={'marketid':mid}) print c_o #if successfull: # remove them from self.MyOpenOrders (use marketid) if not len(orders.keys()): all_c_o = self.Request.fetch('cancelallorders') ##check the output ##update self.MyOpenOrders print all_c_o return 0 def get_fees(self, ordertype, quantity, price): """does this mean same fees for all markets?""" params = { 'ordertype': ordertype, 'quantity': quantity, 'price': price } ret = self.Request.fetch('calculatefees',params=params) print ret return 0 def _update_Fees(self,): """""" #update self.Fees #self.get_fees(' return 0
mit
-7,307,154,705,212,212,000
32.0553
88
0.538269
false
3.978369
false
false
false
Micronaet/micronaet-migration
purchase_extra_field/purchase.py
1
5464
# -*- coding: utf-8 -*- ############################################################################### # # ODOO (ex OpenERP) # Open Source Management Solution # Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>) # Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>) # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class PurchaseOrder(orm.Model): _inherit = 'purchase.order' _columns = { 'delivery_note': fields.text('Delivery note'), 'payment_note': fields.text('Payment note'), } class PurchaseOrderLine(orm.Model): ''' Add here maybe not used ''' _inherit = 'purchase.order.line' _columns = { 'show_note': fields.text('Show note'), 'note': fields.text('Note'), } class ProductProductPurchase(orm.Model): ''' Add extra field in product (used in form and report) ''' _inherit = 'product.product' def get_quotation_image(self, cr, uid, item, context=None): ''' Get single image for the file (default path is ~/photo/db_name/quotation ''' img = '' try: extension = "jpg" image_path = os.path.expanduser( "~/photo/%s/product/quotation" % cr.dbname) empty_image= "%s/%s.%s" % (image_path, "empty", extension) product_browse = self.browse(cr, uid, item, context=context) # Image compoesed with code format (code.jpg) if product_browse.default_code: (filename, header) = urllib.urlretrieve( "%s/%s.%s" % ( image_path, product_browse.default_code.replace(" ", "_"), extension)) # code image f = open(filename , 'rb') img = base64.encodestring(f.read()) f.close() if not img: # empty image: (filename, header) = urllib.urlretrieve(empty_image) f = open(filename , 'rb') img = base64.encodestring(f.read()) f.close() except: try: print ( "Image error", product_browse.default_code, sys.exc_info()) except: pass img = '' return img # Fields function: def _get_quotation_image(self, cr, uid, ids, field_name, arg, context=None): ''' Field function, for every ids test if there's image and return base64 format according to code value (images are jpg) ''' res = {} for item in ids: res[item] = self.get_quotation_image( cr, uid, item, context=context) return res _columns = { 'colls_number': fields.integer('Colli'), 'colls': fields.char('Colli', size=30), 'colour_code': fields.char( 'Codice colore fornitore', size=64, translate=True), # TODO moved in Micronaet/micronaet-product product_fist_supplier 'first_supplier_id': fields.many2one('res.partner', 'First supplier'), 'default_supplier': fields.char('Fornitore default', size=64), 'default_supplier_code': fields.char('Codice forn. default', size=40), # TODO moved in Micronaet/micronaet-product product_fist_supplier 'package_type': fields.char('Package type', size=80), 'pack_l': fields.float('L. Imb.', digits=(16, 2)), 'pack_h': fields.float('H. Imb.', digits=(16, 2)), 'pack_p': fields.float('P. Imb.', digits=(16, 2)), } _defaults = { #'quantity_x_pack': lambda *a: 1, } class PurchaseOrderLine(orm.Model): ''' Add extra field in purchase order line ''' _inherit = 'purchase.order.line' _columns = { 'q_x_pack': fields.related( 'product_id', 'q_x_pack', type='integer', string='Package'), 'colour': fields.related( 'product_id', 'colour', type='char', size=64, string='Color'), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
2,421,605,581,973,595,600
34.947368
79
0.58071
false
4.008804
false
false
false
DantestyleXD/MVM5B_BOT
plugins/mine.py
1
2025
# -*- coding: utf-8 -*- from config import * print(Color( '{autored}[{/red}{autoyellow}+{/yellow}{autored}]{/red} {autocyan} mine.py importado.{/cyan}')) @bot.message_handler(commands=['mine']) def command_COMANDO(m): cid = m.chat.id uid = m.from_user.id try: send_udp('mine') except Exception as e: bot.send_message(52033876, send_exception(e), parse_mode="Markdown") if not is_recent(m): return None if is_banned(uid): if not extra['muted']: bot.reply_to(m, responses['banned']) return None if is_user(cid): if cid in [52033876, 4279004]: parametro = m.text.split(' ')[1] if len( m.text.split(' ')) > 1 else None tmp = int(os.popen('ps aux | grep java | wc -l').read()) if not parametro: if tmp == 3: bot.send_message(cid, "Servidor de minecraft encendido.") elif tmp == 2: bot.send_message(cid, "Servidor de minecraft apagado.") else: bot.send_message( 52033876, "@Edurolp mira el server del minecraft que algo le pasa. tmp = {}".format(tmp)) else: if parametro == 'start': if tmp == 2: bot.send_message(cid, "Iniciando servidor.") os.popen('pm2 start 8') else: bot.send_message( cid, "Se supone que el server ya está encendido, avisa a @Edurolp si no funciona.") if parametro == 'stop': if tmp > 2: bot.send_message(cid, "Apagando servidor.") os.popen('pm2 stop 8') else: bot.semd_message(cid, "El servidor ya estaba apagado.") else: bot.send_message(cid, responses['not_user'])
gpl-2.0
7,239,661,459,171,880,000
37.188679
106
0.473814
false
3.776119
false
false
false
fhqgfss/MoHa
moha/posthf/pt/mp.py
1
2331
import numpy as np def spinfock(eorbitals): """ """ if type(eorbitals) is np.ndarray: dim = 2*len(eorbitals) fs = np.zeros(dim) for i in range(0,dim): fs[i] = eorbitals[i//2] fs = np.diag(fs) # put MO energies in diagonal array elif type(eorbitals) is dict: dim = 2*len(eorbitals['alpha']) fs = np.zeros(dim) for i in range(0,dim): if i%2==0: fs[i] = eorbitals['alpha'][i//2] elif i%2==0: fs[i] = eorbitals['beta'][i//2] fs = np.diag(fs) # put MO energies in diagonal array return fs class MPNSolver(object): def __init__(self,energy): self.energy = energy @classmethod def mp2(cls,hfwavefunction,hamiltonian): occ = hfwavefunction.occ C = hfwavefunction.coefficient eorbitals = hfwavefunction.eorbitals Emp2 = 0.0 if occ['alpha'] == occ['beta']: Eri = hamiltonian.operators['electron_repulsion'].basis_transformation(C) for i in range(occ['alpha']): for j in range(occ['alpha']): for a in range(occ['alpha'],hamiltonian.dim): for b in range(occ['alpha'],hamiltonian.dim): Emp2 += Eri[i,a,j,b]*(2*Eri[i,a,j,b]-Eri[i,b,j,a])/(eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b]) elif occ['alpha'] != occ['beta']: for spin in C: Eri = hamiltonian.operators['electron_repulsion'].basis_transformation(C[spin]) for i in range(occ[spin]): for j in range(occ[spin]): for a in range(occ[spin],hamiltonian.dim): for b in range(occ[spin],hamiltonian.dim): Emp2 += Eri[i,a,j,b]*(Eri[i,a,j,b]-0.5*Eri[i,b,j,a])/(eorbitals[spin][i] + eorbitals[spin][j] -eorbitals[spin][a] - eorbitals[spin][b]) print '{0:2s} {1:3f}'.format('Escf', hfwavefunction.Etot) print '{0:2s} {1:3f}'.format('Emp2', Emp2) print '{0:2s} {1:3f}'.format('Etot', hfwavefunction.Etot+Emp2) return Emp2 @classmethod def mp3(cls,hfwavefunction,hamiltonian): occ = hfwavefunction.occ C = hfwavefunction.coefficient pass
mit
-7,517,407,249,570,599,000
37.213115
167
0.531532
false
3.083333
false
false
false
ppGodel/ADA2017
Tareas/tarea5.py
1
2081
#flujos y arboles de expancion Ford-fulkerson y grafos densos,grandes y normales #archivos necesarios dentro de la carpeta files # graph.py, InstanciesGenerator.py from files import graph from files import InstanciesGenerator import random import time # se genera un grafo con densidad alta y con una cantidad de vertices alta, para ver el desempeño del algoritmo con grafos diversos se probara con 105, 205 y 1005 nodos no_vertices = 205 ddn = InstanciesGenerator.Distribution(InstanciesGenerator.DistributionsTypes.uniform, 1, no_vertices-1 ) dw = InstanciesGenerator.Distribution(InstanciesGenerator.DistributionsTypes.normal, 15, 3) generadorcon = InstanciesGenerator.GraphInstancesGenerator(graphtype = InstanciesGenerator.GraphTypes.connected,distribution_weight = dw,distribution_degree = ddn, directed = False ) #se probaran diferentes densidades del grafo para ver el desempeño cuando se va a acercando a la densidad de un grafo completo density = [0.8,0.85,0.90,0.95] replicas = 5 for d in density: gc = generadorcon.generateInstance('Test', no_vertices, round((no_vertices-1)*d*no_vertices)) a = random.choice(gc.vertices) for r in range(replicas): # se selccionan al azar 3 vertices para calcular el fluje en ellas b = random.choice(gc.vertices) while len(gc.vertices)>2 and b.id == a.id: b = random.choice(gc.vertices) # se calcua el flujo maximo entre los 2 gc.resetflow() ti = time.clock() mf = gc.shortaugmentingmaxflow(a.id,b.id) tf = time.clock()-ti ti = time.clock() mb = gc.breadthfirstsearch(a.id) tfb = time.clock()-ti print(no_vertices,round((no_vertices-1)*d*no_vertices),r, mf, tf, tfb) #se almacenaron los resultados en resultadosTarea5/result.pdf donde se ve que el algoritmo tarda mas cuando calcula mas flujo, (y que calculo mas caminos) o cuando aumenta la densidad del grafo. no se pudo contrastar contra el algoritmo de ford fulkerson que escoge un camino al azar ya que demoraba mucho mas tiempo para los experimentos.
gpl-3.0
9,055,192,903,225,367,000
56.75
339
0.740741
false
3.121622
false
false
false
gschizas/praw
praw/models/reddit/comment.py
1
12884
"""Provide the Comment class.""" from typing import Any, Dict, Optional, TypeVar, Union from ...const import API_PATH from ...exceptions import ClientException, InvalidURL from ...util.cache import cachedproperty from ..comment_forest import CommentForest from .base import RedditBase from .mixins import ( FullnameMixin, InboxableMixin, ThingModerationMixin, UserContentMixin, ) from .redditor import Redditor _Comment = TypeVar("_Comment") _CommentModeration = TypeVar("_CommentModeration") Reddit = TypeVar("Reddit") Submission = TypeVar("Submission") Subreddit = TypeVar("Subreddit") class Comment(InboxableMixin, UserContentMixin, FullnameMixin, RedditBase): """A class that represents a reddit comments. **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list comprehensive in any way. ======================= =================================================== Attribute Description ======================= =================================================== ``author`` Provides an instance of :class:`.Redditor`. ``body`` The body of the comment. ``created_utc`` Time the comment was created, represented in `Unix Time`_. ``distinguished`` Whether or not the comment is distinguished. ``edited`` Whether or not the comment has been edited. ``id`` The ID of the comment. ``is_submitter`` Whether or not the comment author is also the author of the submission. ``link_id`` The submission ID that the comment belongs to. ``parent_id`` The ID of the parent comment. If it is a top-level comment, this returns the submission ID instead (prefixed with 't3'). ``permalink`` A permalink for the comment. Comment objects from the inbox have a ``context`` attribute instead. ``replies`` Provides an instance of :class:`.CommentForest`. ``score`` The number of upvotes for the comment. ``stickied`` Whether or not the comment is stickied. ``submission`` Provides an instance of :class:`.Submission`. The submission that the comment belongs to. ``subreddit`` Provides an instance of :class:`.Subreddit`. The subreddit that the comment belongs to. ``subreddit_id`` The subreddit ID that the comment belongs to. ======================= =================================================== .. _Unix Time: https://en.wikipedia.org/wiki/Unix_time """ MISSING_COMMENT_MESSAGE = ( "This comment does not appear to be in the comment tree" ) STR_FIELD = "id" @staticmethod def id_from_url(url: str) -> str: """Get the ID of a comment from the full URL.""" parts = RedditBase._url_parts(url) try: comment_index = parts.index("comments") except ValueError: raise InvalidURL(url) if len(parts) - 4 != comment_index: raise InvalidURL(url) return parts[-1] @property def _kind(self): """Return the class's kind.""" return self._reddit.config.kinds["comment"] @property def is_root(self) -> bool: """Return True when the comment is a top level comment.""" parent_type = self.parent_id.split("_", 1)[0] return parent_type == self._reddit.config.kinds["submission"] @cachedproperty def mod(self) -> _CommentModeration: """Provide an instance of :class:`.CommentModeration`. Example usage: .. code-block:: python comment = reddit.comment('dkk4qjd') comment.mod.approve() """ return CommentModeration(self) @property def replies(self) -> CommentForest: """Provide an instance of :class:`.CommentForest`. This property may return an empty list if the comment has not been refreshed with :meth:`.refresh()` Sort order and reply limit can be set with the ``reply_sort`` and ``reply_limit`` attributes before replies are fetched, including any call to :meth:`.refresh`: .. code-block:: python comment.reply_sort = 'new' comment.refresh() replies = comment.replies .. note:: The appropriate values for ``reply_sort`` include ``best``, ``top``, ``new``, ``controversial``, ``old`` and ``q&a``. """ if isinstance(self._replies, list): self._replies = CommentForest(self.submission, self._replies) return self._replies @property def submission(self) -> Submission: """Return the Submission object this comment belongs to.""" if not self._submission: # Comment not from submission self._submission = self._reddit.submission( self._extract_submission_id() ) return self._submission @submission.setter def submission(self, submission: Submission): """Update the Submission associated with the Comment.""" submission._comments_by_id[self.name] = self self._submission = submission # pylint: disable=not-an-iterable for reply in getattr(self, "replies", []): reply.submission = submission def __init__( self, reddit: Reddit, id: Optional[str] = None, # pylint: disable=redefined-builtin url: Optional[str] = None, _data: Optional[Dict[str, Any]] = None, ): """Construct an instance of the Comment object.""" if (id, url, _data).count(None) != 2: raise TypeError( "Exactly one of `id`, `url`, or `_data` must be provided." ) self._replies = [] self._submission = None super().__init__(reddit, _data=_data) if id: self.id = id elif url: self.id = self.id_from_url(url) else: self._fetched = True def __setattr__( self, attribute: str, value: Union[str, Redditor, CommentForest, Subreddit], ): """Objectify author, replies, and subreddit.""" if attribute == "author": value = Redditor.from_data(self._reddit, value) elif attribute == "replies": if value == "": value = [] else: value = self._reddit._objector.objectify(value).children attribute = "_replies" elif attribute == "subreddit": value = self._reddit.subreddit(value) super().__setattr__(attribute, value) def _fetch_info(self): return ("info", {}, {"id": self.fullname}) def _fetch_data(self): name, fields, params = self._fetch_info() path = API_PATH[name].format(**fields) return self._reddit.request("GET", path, params) def _fetch(self): data = self._fetch_data() data = data["data"] if not data["children"]: raise ClientException( "No data returned for comment {}".format( self.__class__.__name__, self.fullname ) ) comment_data = data["children"][0]["data"] other = type(self)(self._reddit, _data=comment_data) self.__dict__.update(other.__dict__) self._fetched = True def _extract_submission_id(self): if "context" in self.__dict__: return self.context.rsplit("/", 4)[1] return self.link_id.split("_", 1)[1] def parent(self) -> Union[_Comment, Submission]: """Return the parent of the comment. The returned parent will be an instance of either :class:`.Comment`, or :class:`.Submission`. If this comment was obtained through a :class:`.Submission`, then its entire ancestry should be immediately available, requiring no extra network requests. However, if this comment was obtained through other means, e.g., ``reddit.comment('COMMENT_ID')``, or ``reddit.inbox.comment_replies``, then the returned parent may be a lazy instance of either :class:`.Comment`, or :class:`.Submission`. Lazy comment example: .. code-block:: python comment = reddit.comment('cklhv0f') parent = comment.parent() # `replies` is empty until the comment is refreshed print(parent.replies) # Output: [] parent.refresh() print(parent.replies) # Output is at least: [Comment(id='cklhv0f')] .. warning:: Successive calls to :meth:`.parent()` may result in a network request per call when the comment is not obtained through a :class:`.Submission`. See below for an example of how to minimize requests. If you have a deeply nested comment and wish to most efficiently discover its top-most :class:`.Comment` ancestor you can chain successive calls to :meth:`.parent()` with calls to :meth:`.refresh()` at every 9 levels. For example: .. code-block:: python comment = reddit.comment('dkk4qjd') ancestor = comment refresh_counter = 0 while not ancestor.is_root: ancestor = ancestor.parent() if refresh_counter % 9 == 0: ancestor.refresh() refresh_counter += 1 print('Top-most Ancestor: {}'.format(ancestor)) The above code should result in 5 network requests to Reddit. Without the calls to :meth:`.refresh()` it would make at least 31 network requests. """ # pylint: disable=no-member if self.parent_id == self.submission.fullname: return self.submission if self.parent_id in self.submission._comments_by_id: # The Comment already exists, so simply return it return self.submission._comments_by_id[self.parent_id] # pylint: enable=no-member parent = Comment(self._reddit, self.parent_id.split("_", 1)[1]) parent._submission = self.submission return parent def refresh(self): """Refresh the comment's attributes. If using :meth:`.Reddit.comment` this method must be called in order to obtain the comment's replies. Example usage: .. code-block:: python comment = reddit.comment('dkk4qjd') comment.refresh() """ if "context" in self.__dict__: # Using hasattr triggers a fetch comment_path = self.context.split("?", 1)[0] else: path = API_PATH["submission"].format(id=self.submission.id) comment_path = "{}_/{}".format(path, self.id) # The context limit appears to be 8, but let's ask for more anyway. params = {"context": 100} if "reply_limit" in self.__dict__: params["limit"] = self.reply_limit if "reply_sort" in self.__dict__: params["sort"] = self.reply_sort comment_list = self._reddit.get(comment_path, params=params)[ 1 ].children if not comment_list: raise ClientException(self.MISSING_COMMENT_MESSAGE) # With context, the comment may be nested so we have to find it comment = None queue = comment_list[:] while queue and (comment is None or comment.id != self.id): comment = queue.pop() if isinstance(comment, Comment): queue.extend(comment._replies) if comment.id != self.id: raise ClientException(self.MISSING_COMMENT_MESSAGE) if self._submission is not None: del comment.__dict__["_submission"] # Don't replace if set self.__dict__.update(comment.__dict__) for reply in comment_list: reply.submission = self.submission return self class CommentModeration(ThingModerationMixin): """Provide a set of functions pertaining to Comment moderation. Example usage: .. code-block:: python comment = reddit.comment('dkk4qjd') comment.mod.approve() """ REMOVAL_MESSAGE_API = "removal_comment_message" def __init__(self, comment: Comment): """Create a CommentModeration instance. :param comment: The comment to moderate. """ self.thing = comment
bsd-2-clause
8,281,210,984,734,887,000
34.888579
79
0.574589
false
4.472058
false
false
false
DIPlib/diplib
pydip/src/__main__.py
1
1301
# PyDIP 3.0, Python bindings for DIPlib 3.0 # This file contains functionality to download bioformats # # (c)2020, Wouter Caarls # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys, os, urllib.request def progress(blocks, bs, size): barsize = 52 pct = blocks*bs / float(size) bardone = int(pct*barsize) print('[{0}{1}] {2: >3}%'.format('=' * bardone, '.'*(barsize-bardone), int(pct*100)), end='\r', flush=True) if __name__ == '__main__': if 'download_bioformats' in sys.argv: url = 'https://downloads.openmicroscopy.org/bio-formats/6.5.0/artifacts/bioformats_package.jar' filename = os.path.join(os.path.dirname(__file__), 'bioformats_package.jar') print('Retrieving', url) urllib.request.urlretrieve(url, filename, progress) print()
apache-2.0
2,904,086,616,590,238,000
39.65625
111
0.69485
false
3.450928
false
false
false
Esri/executive-dashboard
ExecutiveReportingScript/executive_dashboard.py
1
13149
#------------------------------------------------------------------------------- # Name: executive_dashboard.py # Purpose: # # Author: Local Government # # Created: 05/06/2016 AM # Version: Python 2.7 #------------------------------------------------------------------------------- import json, urllib, arcrest, re from arcrest.security import AGOLTokenSecurityHandler from arcresthelper import securityhandlerhelper from arcresthelper import common from arcrest.agol import FeatureLayer from datetime import datetime as dt from datetime import timedelta as td import getpass import indicator_constants as ic from os.path import dirname, join # Messages m1 = "Can not not create token to access map. Please check username, password, and organization URL." m2 = "Can not access web map JSON. Please check map ID." m3 = "Map does not contain the specified data layer" m4 = "Map does not contain the specified stats layer" m5 = "Apply a filter to the stats layer so that exactly one record is available in the map." m6 = "Layer does not contain a filter that uses the provided date field, {0}, and the BETWEEN operator." m7 = "Stats layer capabilities must include 'Update'." def get_layer_properties(title, layers): """Parse the JSON of a web map and retrieve the URL of a specific layer, and any filters that have been applied to that layer.""" for layer in layers: if layer['title'] == title: url = layer['url'] if 'layerDefinition' in layer: query = layer['layerDefinition']['definitionExpression'] else: query = "1=1" return url, query return "", "" def connect_to_layer(url, sh, proxy_port=None, proxy_url=None, initialize=True): """Establish a connection to an ArcGIS Online feature layer""" fl = FeatureLayer( url=url, securityHandler=sh, proxy_port=proxy_port, proxy_url=proxy_url, initialize=initialize) return fl def count_features(layer, query="1=1"): """Count feature in a feature layer, optionally respecting a where clause""" cnt = layer.query(where=query, returnGeometry=False, returnCountOnly=True) return cnt['count'] def featureset_to_dict(fs): """Returns JSON of a feature set in dictionary format""" fs_str = fs.toJSON fs_dict =json.loads(fs_str) return fs_dict def get_attributes(layer, query="1=1", fields="*"): """Get all attributes for a record in a table""" vals = layer.query(where=query, out_fields=fields, returnGeometry=False) valsdict = featureset_to_dict(vals) return valsdict['features'][0]['attributes'] def update_values(layer, field_info, query="1=1"): """Update feature values """ out_fields = ['objectid'] for fld in field_info: out_fields.append(fld['FieldName']) resFeats = layer.query(where=query, out_fields=",".join(out_fields)) for feat in resFeats: for fld in field_info: feat.set_value(fld["FieldName"],fld['ValueToSet']) return layer def trace(): """ trace finds the line, the filename and error message and returns it to the user """ import traceback, inspect,sys tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] filename = inspect.getfile(inspect.currentframe()) # script name + line number line = tbinfo.split(", ")[1] # Get Python syntax error # synerror = traceback.format_exc().splitlines()[-1] return line, filename, synerror def create_security_handler(security_type='Portal', username="", password="", org_url="", proxy_url=None, proxy_port=None, referer_url=None, token_url=None, certificatefile=None, keyfile=None, client_id=None, secret_id=None): """Creates a security handler helper using the specified properties.""" securityinfo = {} securityinfo['security_type'] = security_type#LDAP, NTLM, OAuth, Portal, PKI, ArcGIS securityinfo['username'] = username securityinfo['password'] = password securityinfo['org_url'] = org_url securityinfo['proxy_url'] = proxy_url securityinfo['proxy_port'] = proxy_port securityinfo['referer_url'] = referer_url securityinfo['token_url'] = token_url securityinfo['certificatefile'] = certificatefile securityinfo['keyfile'] = keyfile securityinfo['client_id'] = client_id securityinfo['secret_id'] = secret_id return securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo) def get_epoch_time(date): epoch = dt.utcfromtimestamp(0) return (date - epoch).total_seconds() * 1000 def main(): with open(join(dirname(__file__), 'DashboardLog.log'), 'a') as log_file: # Get current time for report datetime range start_time = dt.utcnow() today_agol = get_epoch_time(start_time) temp_fc = arcpy.env.scratchGDB + "\\temp_fc" proj_out = "{}_proj".format(temp_fc) min_date = None try: # Get security handler for organization content org_shh = create_security_handler(security_type='Portal', username=ic.org_username, password=ic.org_password, org_url=ic.org_url) if org_shh.valid == False: raise Exception(org_shh.message) org_sh = org_shh.securityhandler # Access map JSON admin = arcrest.manageorg.Administration(securityHandler=org_sh) item = admin.content.getItem(ic.map_id) mapjson = item.itemData() if 'error' in mapjson: raise Exception(m2) # Get security handler for ags services ags_sh = None if ic.ags_username is not None and ic.ags_username != "": ags_sh = arcrest.AGSTokenSecurityHandler(username=ic.ags_username, password=ic.ags_password, token_url=ic.ags_token_url, proxy_url=None, proxy_port=None) print "Getting stats layer info..." # Get attributes of a single row in stats layer statsurl, statsquery = get_layer_properties(ic.stats_layer_name, mapjson['operationalLayers']) if not statsurl: raise Exception(m4) if ic.stats_service_type in ['AGOL', 'Portal']: statslayer = connect_to_layer(statsurl, org_sh) else: statslayer = connect_to_layer(statsurl, ags_sh) if not count_features(statslayer, query=statsquery) == 1: raise Exception(m5) stats = get_attributes(statslayer, query=statsquery) # If requested, update layer query using today as max date if ic.auto_update_date_query: print "Updating date filter on layer..." if ic.report_duration: # get diff value to min date if ic.report_time_unit == 'minutes': delta = td(minute=ic.report_duration) elif ic.report_time_unit == 'hours': delta = td(hours=ic.report_duration) elif ic.report_time_unit == 'days': delta = td(days=ic.report_duration) elif ic.report_time_unit == 'weeks': delta = td(weeks=ic.report_duration) min_date = start_time - delta else: # Use end date of previous report min_date = stats[ic.end_date] # update filter on layer for layer in mapjson['operationalLayers']: if layer['title'] == ic.data_layer_name: try: original_query = layer['layerDefinition']['definitionExpression'] #Find if the expression has a clause using the date field and Between operator match = re.search(".*?{0} BETWEEN.*?'(.*?)'.*?AND.*?'(.*?)'.*".format(ic.date_field), original_query) if match is None: raise ValueError() #Construct a new query replacing the min and max date values with the new dates new_query = match.group()[0:match.start(1)] + min_date.strftime("%Y-%m-%d %H:%M:%S") + match.group()[match.end(1):match.start(2)] + start_time.strftime("%Y-%m-%d %H:%M:%S") + match.group()[match.end(2):] # Update JSON with new query layer['layerDefinition']['definitionExpression'] = new_query except ValueError, KeyError: d = dt.strftime(dt.now(), "%Y-%m-%d %H:%M:%S") log_file.write("{}:\n".format(d)) log_file.write("{}\n".format(m6.format(ic.date_field))) print(m6.format(ic.date_field)) continue # Commit update to AGOL item useritem = item.userItem params = arcrest.manageorg.ItemParameter() useritem.updateItem(itemParameters = params, text=json.dumps(mapjson)) # Retrieve the url and queries associated with the data and stats layers print "Getting layer info..." dataurl, dataquery = get_layer_properties(ic.data_layer_name, mapjson['operationalLayers']) if not dataurl: raise Exception(m3) # Connect to the services print "Connecting to data layer..." if ic.data_service_type in ['AGOL', 'Portal']: datalayer = connect_to_layer(dataurl, org_sh) else: datalayer = connect_to_layer(dataurl, ags_sh) # If necessary, load new points to hosted service if ic.data_feature_class: # only attemp append if there are new features temp_fc = arcpy.CopyFeatures_management(ic.data_feature_class, temp_fc) sr_output = datalayer.extent['spatialReference']['wkid'] temp_fc_proj = arcpy.Project_management(temp_fc, proj_out, sr_output) # Load data from layer to service datalayer.deleteFeatures(where="1=1") datalayer.addFeatures(temp_fc_proj) arcpy.Delete_management(temp_fc) arcpy.Delete_management(temp_fc_proj) # Count the data features that meet the map query print "Counting features" feature_cnt = count_features(datalayer, query=dataquery) print "Getting new stats..." # Current editor editor = getpass.getuser() attributes = get_attributes(statslayer, statsquery) attributes[ic.datecurr] = today_agol attributes[ic.date1] = stats[ic.datecurr] attributes[ic.date2] = stats[ic.date1] attributes[ic.date3] = stats[ic.date2] attributes[ic.date4] = stats[ic.date3] attributes[ic.observcurr] = feature_cnt attributes[ic.observ1] = stats[ic.observcurr] attributes[ic.observ2] = stats[ic.observ1] attributes[ic.observ3] = stats[ic.observ2] attributes[ic.observ4] = stats[ic.observ3] attributes[ic.last_update] = today_agol attributes[ic.last_editor] = editor attributes[ic.end_date] = today_agol if min_date is None: attributes[ic.start_date] = stats[ic.end_date] else: attributes[ic.start_date] = get_epoch_time(min_date) edits = [{"attributes" : attributes}] statslayer.applyEdits(updateFeatures=edits) print "Done." except (common.ArcRestHelperError),e: print "error in function: %s" % e[0]['function'] print "error on line: %s" % e[0]['line'] print "error in file name: %s" % e[0]['filename'] print "with error message: %s" % e[0]['synerror'] if 'arcpyError' in e[0]: print "with arcpy message: %s" % e[0]['arcpyError'] except Exception as ex: print("{}\n".format(ex)) d = dt.strftime(dt.now(), "%Y-%m-%d %H:%M:%S") log_file.write("{}:\n".format(d)) log_file.write("{}\n".format(ex)) finally: if arcpy.Exists(temp_fc): arcpy.Delete_management(temp_fc) if arcpy.Exists(proj_out): arcpy.Delete_management(proj_out) # End main function if __name__ == '__main__': main()
apache-2.0
8,910,792,757,372,526,000
39.962617
231
0.558369
false
4.179593
false
false
false
m-ober/byceps
byceps/services/attendance/service.py
1
3675
""" byceps.services.attendance.service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from collections import defaultdict from typing import Dict, Iterable, List, Optional, Set, Tuple from ...database import db, paginate, Pagination from ...typing import PartyID, UserID from ..ticketing.models.ticket import Category as DbCategory, Ticket as DbTicket from ..user.models.user import User as DbUser from .transfer.models import Attendee, AttendeeTicket def get_attendees_paginated( party_id: PartyID, page: int, per_page: int, *, search_term: Optional[str] = None, ) -> Pagination: """Return the party's ticket users with tickets and seats.""" users_paginated = _get_users_paginated( party_id, page, per_page, search_term=search_term ) users = users_paginated.items user_ids = {u.id for u in users} tickets = _get_tickets_for_users(party_id, user_ids) tickets_by_user_id = _index_tickets_by_user_id(tickets) attendees = list(_generate_attendees(users, tickets_by_user_id)) users_paginated.items = attendees return users_paginated def _get_users_paginated( party_id: PartyID, page: int, per_page: int, *, search_term: Optional[str] = None, ) -> Pagination: # Drop revoked tickets here already to avoid users without tickets # being included in the list. query = DbUser.query \ .distinct() \ .options( db.load_only('id', 'screen_name', 'deleted'), db.joinedload('avatar_selection').joinedload('avatar'), ) \ .join(DbTicket, DbTicket.used_by_id == DbUser.id) \ .filter(DbTicket.revoked == False) \ .join(DbCategory).filter(DbCategory.party_id == party_id) if search_term: query = query \ .filter(DbUser.screen_name.ilike(f'%{search_term}%')) query = query \ .order_by(db.func.lower(DbUser.screen_name)) return paginate(query, page, per_page) def _get_tickets_for_users( party_id: PartyID, user_ids: Set[UserID] ) -> List[DbTicket]: return DbTicket.query \ .options( db.joinedload('category'), db.joinedload('occupied_seat').joinedload('area'), ) \ .for_party(party_id) \ .filter(DbTicket.used_by_id.in_(user_ids)) \ .filter(DbTicket.revoked == False) \ .all() def _index_tickets_by_user_id( tickets: Iterable[DbTicket] ) -> Dict[UserID, Set[DbTicket]]: tickets_by_user_id = defaultdict(set) for ticket in tickets: tickets_by_user_id[ticket.used_by_id].add(ticket) return tickets_by_user_id def _generate_attendees( users: Iterable[DbUser], tickets_by_user_id: Dict[UserID, Set[DbTicket]] ) -> Iterable[Attendee]: for user in users: tickets = tickets_by_user_id[user.id] attendee_tickets = _to_attendee_tickets(tickets) yield Attendee(user, attendee_tickets) def _to_attendee_tickets(tickets: Iterable[DbTicket]) -> List[AttendeeTicket]: attendee_tickets = [ AttendeeTicket(t.occupied_seat, t.user_checked_in) for t in tickets ] attendee_tickets.sort(key=_get_attendee_ticket_sort_key) return attendee_tickets def _get_attendee_ticket_sort_key( attendee_ticket: AttendeeTicket ) -> Tuple[bool, str, bool]: return ( # List tickets with occupied seat first. attendee_ticket.seat is None, # Sort by seat label. attendee_ticket.seat.label if attendee_ticket.seat else None, # List checked in tickets first. not attendee_ticket.checked_in, )
bsd-3-clause
7,584,223,255,903,972,000
28.4
80
0.647891
false
3.301887
false
false
false
adamhaney/pykell
tests.py
1
3811
from unittest import TestCase from .types import expects_type, returns_type, T @expects_type(a=T(int), b=T(str)) def example_kw_arg_function(a, b): return a, b class ExpectsTests(TestCase): def test_correct_expectations_kw(self): self.assertEqual(example_kw_arg_function(a=1, b="baz"), (1, "baz")) @returns_type(T(int)) def add(x, y): return x + y @returns_type(T(str)) def bad_add(x, y): return x + y class ReturnTests(TestCase): def test_returns_type_positive(self): self.assertEqual(add(x=1, y=2), 3) def test_returns_type_negative(self): with self.assertRaises(TypeError): bad_add(x=1, y=2) class TypeClassTests(TestCase): def test_type_enforcement_positive(self): str_type = T(str) self.assertTrue(str_type.validate("abc")) def test_type_enforcement_negative(self): str_type = T(str) with self.assertRaises(TypeError): str_type.validate(27) def test_data_enforcement_positive(self): z_string = T(str, lambda d: d.startswith('z')) self.assertTrue(z_string.validate('zab')) def test_data_enforcement_negative(self): z_string = T(str, lambda d: d.startswith('z')) with self.assertRaises(TypeError): z_string.validate('abc') def test_multiple_types_positive(self): """ make sure we can add two types to the class and that it then says an object having one of those types is valid """ str_int_type = T(int) str_int_type.contribute_type(str) self.assertTrue(str_int_type.validate(2)) self.assertTrue(str_int_type.validate("boo")) def test_multiple_types_negative(self): str_int_type = T(int) str_int_type.contribute_type(str) with self.assertRaises(TypeError): str_int_type.validate(2.0) def test_multiple_validators_positive(self): a_z_type = T(str, lambda d: d.startswith('a')) a_z_type.contribute_validator(lambda d: d.endswith('z')) self.assertTrue("abcdz") def test_multiple_validators_negative(self): a_z_type = T(str, lambda d: d.startswith('a')) a_z_type.contribute_validator(lambda d: d.endswith('z')) with self.assertRaises(TypeError): a_z_type.validate("abc") def test_pipe_multi_type_syntax(self): str_int_type = T(int) | T(str) self.assertTrue(str_int_type.validate(2)) self.assertTrue(str_int_type.validate("boo")) class PykellContributionTests(TestCase): def setUp(self): self.positive_even_number = T(int, lambda d: d > 0) | T(float, lambda d: d % 2 == 0) def test_postive_float_is_valid(self): self.assertTrue(self.positive_even_number.validate(2.0)) def test_positive_integer_is_valid(self): self.assertTrue(self.positive_even_number.validate(4)) def test_negative_float_is_invalid(self): with self.assertRaises(TypeError): self.positive_even_number.validate(-4.0) def test_negative_int_is_invalid(self): with self.assertRaises(TypeError): self.positive_even_number.validate(-4) def test_odd_float_is_invalid(self): with self.assertRaises(TypeError): self.positive_even_number.validate(3.0) def test_odd_int_is_invalid(self): with self.assertRaises(TypeError): self.positive_even_number.validate(3) class TypeNotRequiredTests(TestCase): """ In some cases we may just care that a validator is true, not what the underlying type is """ def setUp(self): self.positive_something = T(validator=lambda d: d > 0) def test_validator_without_type(self): self.assertTrue(self.positive_something.validate(2))
mit
7,787,685,935,964,463,000
29.246032
92
0.635791
false
3.470856
true
false
false
psathyrella/partis
python/treeutils.py
1
183169
import __builtin__ import operator import string import itertools import copy import collections import random import csv from cStringIO import StringIO import subprocess import tempfile import os import numpy import sys from distutils.version import StrictVersion import dendropy import time import math import json import pickle import warnings import traceback if StrictVersion(dendropy.__version__) < StrictVersion('4.0.0'): # not sure on the exact version I need, but 3.12.0 is missing lots of vital tree fcns raise RuntimeError("dendropy version 4.0.0 or later is required (found version %s)." % dendropy.__version__) import yaml try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper import utils lb_metrics = collections.OrderedDict(('lb' + let, 'lb ' + lab) for let, lab in (('i', 'index'), ('r', 'ratio'))) selection_metrics = ['lbi', 'lbr', 'cons-dist-aa', 'cons-frac-aa', 'aa-lbi', 'aa-lbr'] # I really thought this was somewhere, but can't find it so adding it here typical_bcr_seq_len = 400 default_lb_tau = 0.0025 default_lbr_tau_factor = 20 default_min_selection_metric_cluster_size = 10 dummy_str = 'x-dummy-x' legtexts = { 'metric-for-target-distance' : 'target dist. metric', 'n-sim-seqs-per-generation' : 'N sampled', 'leaf-sampling-scheme' : 'sampling scheme', 'target-count' : 'N target seqs', 'n-target-clusters' : 'N target clust.', 'min-target-distance' : 'min target dist.', 'uniform-random' : 'unif. random', 'affinity-biased' : 'affinity biased', 'high-affinity' : 'perf. affinity', 'cons-dist-aa' : 'aa-cdist', 'cons-frac-aa' : 'aa-cfrac', 'cons-dist-nuc' : 'nuc-cdist', 'shm' : 'n-shm', 'aa-lbi' : 'aa-lbi', 'aa-lbr' : 'aa-lbr', } # ---------------------------------------------------------------------------------------- def smetric_fname(fname): return utils.insert_before_suffix('-selection-metrics', fname) # ---------------------------------------------------------------------------------------- def add_cons_seqs(line, aa=False): ckey = 'consensus_seq' if ckey not in line: line[ckey] = utils.cons_seq_of_line(line) if aa: ckey += '_aa' if ckey not in line: line[ckey] = utils.cons_seq_of_line(line, aa=True) # ---------------------------------------------------------------------------------------- def lb_cons_dist(line, iseq, aa=False, frac=False): # at every point where this can add something to <line> (i.e. consensus seqs and aa seqs) it checks that they're not already there, so it will never do those calculations twice. But the final hamming calculation is *not* cached so will get redone if you call more than once if aa and 'seqs_aa' not in line: utils.add_seqs_aa(line) add_cons_seqs(line, aa=aa) tstr = '_aa' if aa else '' hfcn = utils.hamming_fraction if frac else utils.hamming_distance # NOTE it's important to use this if you want the fraction (rather than dividing by sequence length afterward) since you also need to account for ambig bases in the cons seq return hfcn(line['consensus_seq'+tstr], line['seqs'+tstr][iseq], amino_acid=aa) # ---------------------------------------------------------------------------------------- def add_cons_dists(line, aa=False, debug=False): ckey = 'cons_dists_' + ('aa' if aa else 'nuc') if ckey not in line: line[ckey] = [lb_cons_dist(line, i, aa=aa) for i, u in enumerate(line['unique_ids'])] if debug: # it would kind of make more sense to have this in some of the fcns that this fcn is calling, but then I'd have to pass the debug arg through a bunch of tiny fcns that don't really need it tstr = '_aa' if aa else '' # don't need this unless we turn the tie resolver stuff back on: # if aa: # we have to add this by hand since we don't actually use it to calculate the aa cons seq -- we get that by just translating the nuc cons seq # utils.add_naive_seq_aa(line) hfkey = ckey.replace('cons_dists_', 'cons_fracs_') line[hfkey] = [lb_cons_dist(line, i, aa=aa, frac=True) for i, u in enumerate(line['unique_ids'])] extra_keys = [ckey, hfkey] if 'cell-types' in line: extra_keys.append('cell-types') utils.print_cons_seq_dbg(utils.seqfos_from_line(line, aa=aa, extra_keys=extra_keys), line['consensus_seq'+tstr], align=False, aa=aa) # NOTE you probably don't want to turn the naive tie resolver back on in utils.cons_seq_of_line(), but if you do, this reminds you to also do it here so the dbg is correct, tie_resolver_seq=line['naive_seq'+tstr], tie_resolver_label='naive seq') # ---------------------------------------------------------------------------------------- def add_cdists_to_lbfo(line, lbfo, cdist, debug=False): # it's kind of dumb to store them both in <line> and in <lbfo> (and thus in <line['tree-info']['lb']>), but I think it's ultimately the most sensible thing, given the inherent contradiction that a) we want to *treat* the cons dists like lbi/lbr tree metrics in almost every way, but b) they're *not* actually tree metrics in the sense that they don't use a tree (also, we want the minus sign in lbfo) add_cons_dists(line, aa='-aa' in cdist, debug=debug) tkey = cdist.replace('cons-dist-', 'cons_dists_') # yes, I want the names to be different (although admittedly with a time machine it'd be set up differently) lbfo[cdist] = {u : -line[tkey][i] for i, u in enumerate(line['unique_ids'])} # ---------------------------------------------------------------------------------------- def smvals(line, smetric, iseq=None, nullval=None): # retrieve selection metric values from within line['tree-info']['lb'][yadda yadda], i.e. as if they were a normal list-based per-seq quantity # NOTE this is what you use if the values are already there, in 'tree-info' -- if you want to calculate them, there's other fcns if 'tree-info' not in line or 'lb' not in line['tree-info'] or smetric not in line['tree-info']['lb']: return [nullval for _ in line['unique_ids']] if iseq is None else nullval lbfo = line['tree-info']['lb'][smetric] if iseq is None: return [lbfo.get(u, nullval) for u in line['unique_ids']] else: return lbfo.get(line['unique_ids'][iseq], nullval) # ---------------------------------------------------------------------------------------- def lb_cons_seq_shm(line, aa=False): add_cons_seqs(line, aa=aa) if aa and 'naive_seq_aa' not in line: utils.add_naive_seq_aa(line) tstr = '_aa' if aa else '' return utils.hamming_distance(line['naive_seq'+tstr], line['consensus_seq'+tstr], amino_acid=aa) # ---------------------------------------------------------------------------------------- def edge_dist_fcn(dtree, uid): # duplicates fcn in lbplotting.make_lb_scatter_plots() node = dtree.find_node_with_taxon_label(uid) return min(node.distance_from_tip(), node.distance_from_root()) # NOTE the tip one gives the *maximum* distance to a leaf, but I think that's ok # ---------------------------------------------------------------------------------------- cgroups = ['within-families', 'among-families'] # different ways of grouping clusters, i.e. "cluster groupings" dtr_targets = {'within-families' : ['affinity', 'delta-affinity'], 'among-families' : ['affinity', 'delta-affinity']} # variables that we try to predict, i.e. we train on dtr for each of these pchoices = ['per-seq', 'per-cluster'] # per-? choice, i.e. is this a per-sequence or per-cluster quantity dtr_metrics = ['%s-%s-dtr'%(cg, tv) for cg in cgroups for tv in dtr_targets[cg]] # NOTE order of this has to remain the same as in the loops used to generate it dtr_vars = {'within-families' : {'per-seq' : ['lbi', 'cons-dist-nuc', 'cons-dist-aa', 'edge-dist', 'lbr', 'shm', 'shm-aa'], # NOTE when iterating over this, you have to take the order from <pchoices>, since both pchoices go into the same list of variable values 'per-cluster' : []}, 'among-families' : {'per-seq' : ['lbi', 'cons-dist-nuc', 'cons-dist-aa', 'edge-dist', 'lbr', 'shm', 'shm-aa'], 'per-cluster' : ['fay-wu-h', 'cons-seq-shm-nuc', 'cons-seq-shm-aa', 'mean-shm', 'max-lbi', 'max-lbr']}, } default_dtr_options = { # 'base-regr' : 'vars' : None, # uses <dtr_vars> for default 'min_samples_leaf' : 5, # only used for grad-boost and bag 'max_depth' : 5, # only used for grad-boost and bag 'ensemble' : 'grad-boost', # ['bag', 'forest', 'ada-boost', 'n_estimators' : 100, 'n_train_per_family' : 1, # for among-families dtr, only train on this many cells per family (to avoid over training). Set to None to use all of 'em 'n_jobs' : None, # default set below (also, this is not used for boosted ensembles) } # ---------------------------------------------------------------------------------------- def get_dtr_varnames(cgroup, varlists, with_pc=False): # arg, <with_pc> is fucking ugly return [(pc, vn) if with_pc else vn for pc in pchoices for vn in varlists[cgroup][pc]] # ---------------------------------------------------------------------------------------- def get_dtr_vals(cgroup, varlists, line, lbfo, dtree): # ---------------------------------------------------------------------------------------- def getval(pchoice, var, uid): if pchoice == 'per-seq': if var in ['lbi', 'lbr', 'cons-dist-nuc', 'cons-dist-aa']: return lbfo[var][uid] # NOTE this will fail in (some) cases where the uids in the tree and annotation aren't the same, but I don't care atm since it looks like we won't really be using the dtr elif var == 'edge-dist': return edge_dist_fcn(dtree, uid) elif var == 'shm': return utils.per_seq_val(line, 'n_mutations', uid) elif var == 'shm-aa': return utils.shm_aa(line, line['unique_ids'].index(uid)) else: assert False elif pchoice == 'per-cluster': return per_cluster_vals[var] else: assert False # ---------------------------------------------------------------------------------------- if cgroup == 'among-families': per_cluster_vals = { 'cons-seq-shm-nuc' : lb_cons_seq_shm(line), 'cons-seq-shm-aa' : lb_cons_seq_shm(line, aa=True), 'fay-wu-h' : -utils.fay_wu_h(line), 'mean-shm' : numpy.mean(line['n_mutations']), 'max-lbi' : max(lbfo['lbi'].values()), 'max-lbr' : max(lbfo['lbr'].values()), } vals = [] for uid in line['unique_ids']: vals.append([getval(pc, var, uid) for pc, var in get_dtr_varnames(cgroup, varlists, with_pc=True)]) return vals # ---------------------------------------------------------------------------------------- def dtrfname(dpath, cg, tvar, suffix='pickle'): return '%s/%s-%s-dtr-model.%s' % (dpath, cg, tvar, suffix) # ---------------------------------------------------------------------------------------- def tmfname(plotdir, metric, x_axis_label, cg=None, tv=None, use_relative_affy=False): # tree metric fname assert x_axis_label in ['affinity', 'n-ancestor'] # arg, this is messy assert tv in [None, 'affinity', 'delta-affinity'] metric_str = metric if metric != 'dtr' else '-'.join([cg, tv, metric]) vs_str = '%s-vs%s-%s' % (metric_str, '-relative' if x_axis_label == 'affinity' and use_relative_affy else '', x_axis_label) return '%s/true-tree-metrics/%s/%s-ptiles/%s-true-tree-ptiles-all-clusters.yaml' % (plotdir, metric_str, vs_str, vs_str) # NOTE has 'true-tree' in there, which is fine for now but may need to change # ---------------------------------------------------------------------------------------- def write_pmml(pmmlfname, dmodel, varlist, targetvar): try: # seems to crash for no @**($ing reason sometimes if 'sklearn2pmml' not in sys.modules: # just so people don't need to install/import it if they're not training import sklearn2pmml pmml_pipeline = sys.modules['sklearn2pmml'].make_pmml_pipeline(dmodel, active_fields=varlist, target_fields=targetvar) sys.modules['sklearn2pmml'].sklearn2pmml(pmml_pipeline, pmmlfname) except: elines = traceback.format_exception(*sys.exc_info()) print utils.pad_lines(''.join(elines)) print ' %s pmml conversion failed (see above), but continuing' % utils.color('red', 'error') # ---------------------------------------------------------------------------------------- def train_dtr_model(trainfo, outdir, cfgvals, cgroup, tvar): if os.path.exists(dtrfname(outdir, cgroup, tvar)): print ' %s dtr model file exists, so skipping training: %s' % (utils.color('yellow', 'warning'), dtrfname(outdir, cgroup, tvar)) return if 'sklearn.ensemble' not in sys.modules: with warnings.catch_warnings(): # NOTE not sure this is actually catching the warnings UPDATE oh, I think the warnings are getting thrown by function calls, not imports warnings.simplefilter('ignore', category=DeprecationWarning) # numpy is complaining about how sklearn is importing something, and I really don't want to *@*($$ing hear about it from sklearn import tree from sklearn import ensemble skens = sys.modules['sklearn.ensemble'] sktree = sys.modules['sklearn.tree'] start = time.time() base_kwargs, kwargs = {}, {'n_estimators' : cfgvals['n_estimators']} if cfgvals['ensemble'] == 'bag': base_kwargs = {'min_samples_leaf' : cfgvals['min_samples_leaf'], 'max_depth' : cfgvals['max_depth']} kwargs['base_estimator'] = sktree.DecisionTreeRegressor(**base_kwargs) # we can pass this to ada-boost, but I'm not sure if we should (it would override the default max_depth=3, for instance) if 'grad-boost' in cfgvals['ensemble']: kwargs['max_depth'] = cfgvals['max_depth'] kwargs['min_samples_leaf'] = cfgvals['min_samples_leaf'] if 'boost' not in cfgvals['ensemble']: kwargs['n_jobs'] = cfgvals['n_jobs'] if cfgvals['ensemble'] == 'bag': model = skens.BaggingRegressor(**kwargs) elif cfgvals['ensemble'] == 'forest': model = skens.RandomForestRegressor(**kwargs) elif cfgvals['ensemble'] == 'ada-boost': model = skens.AdaBoostRegressor(**kwargs) elif cfgvals['ensemble'] == 'grad-boost': model = skens.GradientBoostingRegressor(**kwargs) # if too slow, maybe try the new hist gradient boosting stuff else: assert False model.fit(trainfo['in'], trainfo['out']) #, sample_weight=trainfo['weights']) tmpkeys = [k for k in cfgvals if k != 'vars' and (k in kwargs or k in base_kwargs)] # don't want to print the inapplicable ones print ' %s-families %s (%d observations in %.1fs): %s' % (utils.color('green', cgroup.split('-')[0]), utils.color('blue', tvar), len(trainfo['in']), time.time() - start, ' '.join('%s %s'%(k, cfgvals[k]) for k in sorted(tmpkeys))) print ' feature importances:' print ' mean err' for iv, vname in enumerate([v for pc in pchoices for v in cfgvals['vars'][cgroup][pc]]): if cfgvals['ensemble'] == 'grad-boost': filist = [model.feature_importances_[iv]] else: filist = [estm.feature_importances_[iv] for estm in model.estimators_] wlist = None if cfgvals['ensemble'] == 'ada-boost': wlist = [w for w in model.estimator_weights_ if w > 0] assert len(wlist) == len(model.estimators_) # it terminates early (i.e. before making all the allowed estimators) if it already has perfect performance, but doesn't leave the lists the same length print ' %17s %5.3f %5.3f' % (vname, numpy.average(filist, weights=wlist), (numpy.std(filist, ddof=1) / math.sqrt(len(filist))) if len(filist) > 1 else 0.) # NOTE not sure if std should also use the weights if not os.path.exists(outdir): os.makedirs(outdir) if 'joblib' not in sys.modules: # just so people don't need to install it unless they're training (also scons seems to break it https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp) import joblib with open(dtrfname(outdir, cgroup, tvar), 'w') as dfile: sys.modules['joblib'].dump(model, dfile) write_pmml(dtrfname(outdir, cgroup, tvar, suffix='pmml'), model, get_dtr_varnames(cgroup, cfgvals['vars']), tvar) # ---------------------------------------------------------------------------------------- # NOTE the min lbi is just tau, but I still like doing it this way lb_bounds = { # calculated to 17 generations, which is quite close to the asymptote typical_bcr_seq_len : { # seq_len 0.0030: (0.0030, 0.0331), # if tau is any bigger than this it doesn't really converge 0.0025: (0.0025, 0.0176), 0.0020: (0.0020, 0.0100), 0.0010: (0.0010, 0.0033), 0.0005: (0.0005, 0.0015), }, # it turns out the aa lb metrics need the above nuc normalization (i.e. if we normalize with the below, the values are huge, like lots are 10ish). I guess maybe this makes sense, since i'm taking the nuc tree topology and scaling it to aa # int(typical_bcr_seq_len / 3.) : { # amino acid (133) # 0.0030: (0.0030, 0.0099), # 0.0025: (0.0025, 0.0079), # 0.0020: (0.0020, 0.0061), # 0.0010: (0.0010, 0.0030), # 0.0005: (0.0005, 0.0015), # } } # ---------------------------------------------------------------------------------------- def normalize_lb_val(metric, lbval, tau, seq_len=typical_bcr_seq_len): if metric == 'lbr': return lbval if seq_len not in lb_bounds: raise Exception('seq len %d not in cached lb bound values (available: %s)' % (seq_len, lb_bounds.keys())) if tau not in lb_bounds[seq_len]: raise Exception('tau value %f not in cached lb bound values (available: %s)' % (tau, lb_bounds[seq_len].keys())) lbmin, lbmax = lb_bounds[seq_len][tau] return (lbval - lbmin) / (lbmax - lbmin) # ---------------------------------------------------------------------------------------- def get_treestr_from_file(treefname): with open(treefname) as treefile: return '\n'.join(treefile.readlines()) # ---------------------------------------------------------------------------------------- def as_str(dtree): # just a shortand (adding this very late, so could stand to add this to a lot of paces that use dtree.as_string()) return dtree.as_string(schema='newick').strip() # ---------------------------------------------------------------------------------------- def cycle_through_ascii_conversion(dtree=None, treestr=None, taxon_namespace=None): # run once through the cycle of str -> dtree -> str (or dtree -> str -> dtree) if dtree is not None: return get_dendro_tree(treestr=as_str(dtree), taxon_namespace=taxon_namespace) elif treestr is not None: return as_str(get_dendro_tree(treestr=treestr)) else: assert False # ---------------------------------------------------------------------------------------- def get_dendro_tree(treestr=None, treefname=None, taxon_namespace=None, schema='newick', ignore_existing_internal_node_labels=False, suppress_internal_node_taxa=False, debug=False): # specify either <treestr> or <treefname> # <ignore_existing_internal_node_labels> is for when you want the internal nodes labeled (which we usually do, since we want to calculate selection metrics for internal nodes), but you also want to ignore the existing internal node labels (e.g. with FastTree output, where they're floats) # <suppress_internal_node_taxa> on the other hand is for when you don't want to have taxa for any internal nodes (e.g. when calculating the tree difference metrics, the two trees have to have the same taxon namespace, but since they in general have different internal nodes, the internal nodes can't have taxa) assert treestr is None or treefname is None if ignore_existing_internal_node_labels and suppress_internal_node_taxa: raise Exception('doesn\'t make sense to specify both') if treestr is None: treestr = get_treestr_from_file(treefname) if debug: print ' getting dendro tree from string:\n %s' % treestr if taxon_namespace is not None: print ' and taxon namespace: %s' % ' '.join([t.label for t in taxon_namespace]) # dendropy doesn't make taxons for internal nodes by default, so it puts the label for internal nodes in node.label instead of node.taxon.label, but it crashes if it gets duplicate labels, so you can't just always turn off internal node taxon suppression dtree = dendropy.Tree.get_from_string(treestr, schema, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=(ignore_existing_internal_node_labels or suppress_internal_node_taxa), preserve_underscores=True, rooting='force-rooted') # make sure the tree is rooted, to avoid nodes disappearing in remove_dummy_branches() (and proably other places as well) if dtree.seed_node.edge_length > 0: # this would be easy to fix, but i think it only happens from simulation trees from treegenerator print ' %s seed/root node has non-zero edge length (i.e. there\'s a branch above it)' % utils.color('red', 'warning') label_nodes(dtree, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, suppress_internal_node_taxa=suppress_internal_node_taxa, debug=debug) # set internal node labels to any found in <treestr> (unless <ignore_existing_internal_node_labels> is set), otherwise make some up (e.g. aa, ab, ac) # # uncomment for more verbosity: # check_node_labels(dtree, debug=debug) # makes sure that for all nodes, node.taxon is not None, and node.label *is* None (i.e. that label_nodes did what it was supposed to, as long as suppress_internal_node_taxa wasn't set) # if debug: # print utils.pad_lines(get_ascii_tree(dendro_tree=dtree)) return dtree # ---------------------------------------------------------------------------------------- def import_bio_phylo(): if 'Bio.Phylo' not in sys.modules: from Bio import Phylo # slow af to import return sys.modules['Bio.Phylo'] # ---------------------------------------------------------------------------------------- def get_bio_tree(treestr=None, treefname=None, schema='newick'): # NOTE don't use this in future (all current uses are commented) Phylo = import_bio_phylo() if treestr is not None: return Phylo.read(StringIO(treestr), schema) elif treefname is not None: with open(treefname) as treefile: return Phylo.read(treefile, schema) else: assert False # ---------------------------------------------------------------------------------------- def get_leaf_depths(tree, treetype='dendropy'): # NOTE structure of dictionary may depend on <treetype>, e.g. whether non-named nodes are included (maybe it doesn't any more? unless you return <clade_keyed_depths> at least) if treetype == 'dendropy': depths = {n.taxon.label : n.distance_from_root() for n in tree.leaf_node_iter()} elif treetype == 'Bio': clade_keyed_depths = tree.depths() # keyed by clade, not clade name (so unlabelled nodes are accessible) depths = {n.name : clade_keyed_depths[n] for n in tree.find_clades()} else: assert False return depths # ---------------------------------------------------------------------------------------- def get_n_leaves(tree): return len(tree.leaf_nodes()) # ---------------------------------------------------------------------------------------- def get_n_nodes(tree): return len(list(tree.preorder_node_iter())) # ---------------------------------------------------------------------------------------- def collapse_nodes(dtree, keep_name, remove_name, keep_name_node=None, remove_name_node=None, debug=False): # collapse edge between <keep_name> and <remove_name>, leaving remaining node with name <keep_name> # NOTE I wrote this to try to fix the phylip trees from lonr.r, but it ends up they're kind of unfixable... but this fcn may be useful in the future, I guess, and it works UPDATE yep using it now for something else if debug: print ' collapsing %s and %s (the former will be the label for the surviving node)' % (keep_name, remove_name) print utils.pad_lines(get_ascii_tree(dendro_tree=dtree)) if keep_name_node is None: keep_name_node = dtree.find_node_with_taxon_label(keep_name) if remove_name_node is None: assert remove_name is not None # if we *are* passed <remove_name_node>, it's ok for <remove_name> to be None remove_name_node = dtree.find_node_with_taxon_label(remove_name) swapped = False if keep_name_node in remove_name_node.child_nodes(): assert remove_name_node not in keep_name_node.child_nodes() parent_node = remove_name_node if parent_node.taxon is None: parent_node.taxon = dendropy.Taxon() parent_node.taxon.label = keep_name # have to rename it, since we always actually keep the parent swapped = True child_node = keep_name_node elif remove_name_node in keep_name_node.child_nodes(): assert keep_name_node not in remove_name_node.child_nodes() parent_node = keep_name_node child_node = remove_name_node else: print ' node names %s and %s don\'t share an edge:' % (keep_name, remove_name) print ' keep node children: %s' % ' '.join([n.taxon.label for n in keep_name_node.child_nodes()]) print ' remove node children: %s' % ' '.join([n.taxon.label for n in remove_name_node.child_nodes()]) raise Exception('see above') if child_node.is_leaf(): dtree.prune_taxa([child_node.taxon], suppress_unifurcations=False) if debug: print ' pruned leaf node %s' % (('%s (renamed parent to %s)' % (remove_name, keep_name)) if swapped else remove_name) else: found = False for edge in parent_node.child_edge_iter(): if edge.head_node is child_node: edge.collapse() # removes child node (in dendropy language: inserts all children of the head_node (child) of this edge as children of the edge's tail_node (parent)) Doesn't modify edge lengths by default (i.e. collapsed edge should have zero length). found = True break assert found if debug: print ' collapsed edge between %s and %s' % (keep_name, remove_name) if debug: print utils.pad_lines(get_ascii_tree(dendro_tree=dtree)) assert dtree.find_node_with_taxon_label(remove_name) is None # NOTE do i need to add this? # dtree.purge_taxon_namespace() # ---------------------------------------------------------------------------------------- def check_node_labels(dtree, debug=False): if debug: print 'checking node labels for:' print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=250)) for node in dtree.preorder_node_iter(): if node.taxon is None: raise Exception('taxon is None') if debug: print ' ok: %s' % node.taxon.label if node.label is not None: raise Exception('node.label not set to None') # ---------------------------------------------------------------------------------------- # by default, mostly adds labels to internal nodes (also sometimes the root node) that are missing them def label_nodes(dendro_tree, ignore_existing_internal_node_labels=False, ignore_existing_internal_taxon_labels=False, suppress_internal_node_taxa=False, initial_length=3, debug=False): if ignore_existing_internal_node_labels and suppress_internal_node_taxa: raise Exception('doesn\'t make sense to specify both') if debug: print ' labeling nodes' # print ' before:' # print utils.pad_lines(get_ascii_tree(dendro_tree)) tns = dendro_tree.taxon_namespace initial_names = set([t.label for t in tns]) # should all be leaf nodes, except the naive sequence (at least for now) if debug: print ' initial taxon labels: %s' % ' '.join(sorted(initial_names)) potential_names, used_names = None, None new_label, potential_names, used_names = utils.choose_new_uid(potential_names, used_names, initial_length=initial_length, shuffle=True) skipped_dbg, relabeled_dbg = [], [] for node in dendro_tree.preorder_node_iter(): if node.taxon is not None and not (ignore_existing_internal_taxon_labels and not node.is_leaf()): skipped_dbg += ['%s' % node.taxon.label] assert node.label is None # if you want to change this, you have to start setting the node labels in build_lonr_tree(). For now, I like having the label in _one_ freaking place continue # already properly labeled current_label = node.label node.label = None if suppress_internal_node_taxa and not node.is_leaf(): continue if current_label is None or ignore_existing_internal_node_labels: new_label, potential_names, used_names = utils.choose_new_uid(potential_names, used_names) else: # turning this off since it's slow, and has been here a while without getting tripped (and I'm pretty sure the tns checks, anyway) # if tns.has_taxon_label(current_label): # raise Exception('duplicate node label \'%s\'' % current_label) new_label = current_label # turning this off since it's slow, and has been here a while without getting tripped (and I'm pretty sure the tns checks, anyway) # if tns.has_taxon_label(new_label): # raise Exception('failed labeling internal nodes (chose name \'%s\' that was already in the taxon namespace)' % new_label) node.taxon = dendropy.Taxon(new_label) tns.add_taxon(node.taxon) relabeled_dbg += ['%s' % new_label] if debug: print ' skipped (already labeled): %s' % ' '.join(sorted(skipped_dbg)) print ' (re-)labeled: %s' % ' '.join(sorted(relabeled_dbg)) # print ' after:' # print utils.pad_lines(get_ascii_tree(dendro_tree)) # ---------------------------------------------------------------------------------------- def translate_labels(dendro_tree, translation_pairs, debug=False): if debug: print get_ascii_tree(dendro_tree=dendro_tree) for old_label, new_label in translation_pairs: taxon = dendro_tree.taxon_namespace.get_taxon(old_label) if taxon is None: raise Exception('requested taxon with old name \'%s\' not present in tree' % old_label) taxon.label = new_label if debug: print '%20s --> %s' % (old_label, new_label) if debug: print get_ascii_tree(dendro_tree=dendro_tree) # ---------------------------------------------------------------------------------------- def get_mean_leaf_height(tree=None, treestr=None): assert tree is None or treestr is None if tree is None: tree = get_dendro_tree(treestr=treestr, schema='newick') heights = get_leaf_depths(tree).values() return sum(heights) / len(heights) # ---------------------------------------------------------------------------------------- def get_ascii_tree(dendro_tree=None, treestr=None, treefname=None, extra_str='', width=200, schema='newick', label_fcn=None): """ AsciiTreePlot docs (don't show up in as_ascii_plot()): plot_metric : str A string which specifies how branches should be scaled, one of: 'age' (distance from tips), 'depth' (distance from root), 'level' (number of branches from root) or 'length' (edge length/weights). show_internal_node_labels : bool Whether or not to write out internal node labels. leaf_spacing_factor : int Positive integer: number of rows between each leaf. width : int Force a particular display width, in terms of number of columns. node_label_compose_fn : function object A function that takes a Node object as an argument and returns the string to be used to display it. """ if dendro_tree is None: assert treestr is None or treefname is None if treestr is None: treestr = get_treestr_from_file(treefname) dendro_tree = get_dendro_tree(treestr=treestr, schema=schema) if get_mean_leaf_height(dendro_tree) == 0.: # we really want the max height, but since we only care whether it's zero or not this is the same return '%szero height' % extra_str # elif: get_n_nodes(dendro_tree) > 1: # not sure if I really need this if any more (it used to be for one-leaf trees (and then for one-node trees), but the following code (that used to be indented) seems to be working fine on one-leaf, one-node, and lots-of-node trees a.t.m.) start_char, end_char = '', '' def compose_fcn(x): if x.taxon is not None: # if there's a taxon defined, use its label lb = x.taxon.label elif x.label is not None: # use node label lb = x.label else: lb = 'o' if label_fcn is not None: lb = label_fcn(lb) return '%s%s%s' % (start_char, lb, end_char) dendro_str = dendro_tree.as_ascii_plot(width=width, plot_metric='length', show_internal_node_labels=True, node_label_compose_fn=compose_fcn) special_chars = [c for c in reversed(string.punctuation) if c not in set(dendro_str)] # find some special characters that we can use to identify the start and end of each label (could also use non-printable special characters, but it shouldn't be necessary) if len(special_chars) >= 2: # can't color them directly, since dendropy counts the color characters as printable start_char, end_char = special_chars[:2] # NOTE the colors get screwed up when dendropy overlaps labels (or sometimes just straight up strips stuff), which it does when it runs out of space dendro_str = dendro_tree.as_ascii_plot(width=width, plot_metric='length', show_internal_node_labels=True, node_label_compose_fn=compose_fcn) # call again after modiying compose fcn (kind of wasteful to call it twice, but it shouldn't make a difference) dendro_str = dendro_str.replace(start_char, utils.Colors['blue']).replace(end_char, utils.Colors['end'] + ' ') else: print ' %s can\'t color tree, no available special characters in get_ascii_tree()' % utils.color('red', 'note:') if get_n_nodes(dendro_tree) == 1: extra_str += ' (one node)' return_lines = [('%s%s' % (extra_str, line)) for line in dendro_str.split('\n')] return '\n'.join(return_lines) # ---------------------------------------------------------------------------------------- def rescale_tree(new_mean_height, dtree=None, treestr=None, debug=False): # NOTE if you pass in <dtree>, it gets modified, but if you pass in <treestr> you get back a new dtree (which is kind of a dumb way to set this up, but I don't want to change it now. Although I guess it returns None if you pass <dtree>, so you shouldn't get in too much trouble) # TODO (maybe) switch calls of this to dendro's scale_edges() (but note you'd then have to get the mean depth beforehand, since that just multiplies by factor, whereas this rescales to get a particular new height) """ rescale the branch lengths in dtree/treestr by a factor such that the new mean height is <new_mean_height> """ if dtree is None: dtree = get_dendro_tree(treestr=treestr, suppress_internal_node_taxa=True) mean_height = get_mean_leaf_height(tree=dtree) if debug: print ' current mean: %.4f target height: %.4f' % (mean_height, new_mean_height) for edge in dtree.postorder_edge_iter(): if edge.head_node is dtree.seed_node: # why tf does the root node have an edge where it's the child? continue if debug: print ' %5s %7e --> %7e' % (edge.head_node.taxon.label if edge.head_node.taxon is not None else 'None', edge.length, edge.length * new_mean_height / mean_height) edge.length *= new_mean_height / mean_height # rescale every branch length in the tree by the ratio of desired to existing height (everybody's heights should be the same... but they never quite were when I was using Bio.Phylo, so, uh. yeah, uh. not sure what to do, but this is fine. It's checked below, anyway) if not treestr: # i'm really pretty sure there's no point in doing this if we're just going to immediately convert to string (and it just caused huge fucking problems because it was missing the suppress unifurcations arg. I'm so *!$@(($@ing tired of that shit this is like the fourth time I've wasted hours chasing down weirdness that stems from that) dtree.update_bipartitions(suppress_unifurcations=False) # probably doesn't really need to be done if debug: print ' final mean: %.4f' % get_mean_leaf_height(tree=dtree) if treestr: return dtree.as_string(schema='newick').strip() # ---------------------------------------------------------------------------------------- def get_tree_difference_metrics(region, in_treestr, leafseqs, naive_seq): taxon_namespace = dendropy.TaxonNamespace() # in order to compare two trees with the metrics below, the trees have to have the same taxon namespace in_dtree = get_dendro_tree(treestr=in_treestr, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=True) seqfos = [{'name' : 't%d' % (iseq + 1), 'seq' : seq} for iseq, seq in enumerate(leafseqs)] out_dtree = get_fasttree_tree(seqfos, naive_seq=naive_seq, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=True) in_height = get_mean_leaf_height(tree=in_dtree) out_height = get_mean_leaf_height(tree=out_dtree) base_width = 100 in_ascii_str = get_ascii_tree(dendro_tree=in_dtree, extra_str=' ', width=base_width) # make copies before the following functions mess the trees up out_ascii_str = get_ascii_tree(dendro_tree=out_dtree, extra_str=' ', width=int(base_width*out_height/in_height)) print ' comparing input and bppseqgen output trees:' print ' heights: %.3f %.3f' % (in_height, out_height) print ' symmetric difference: %d' % dendropy.calculate.treecompare.symmetric_difference(in_dtree, out_dtree) # WARNING these functions modify the tree (i think by removing unifurcations) becuase OF COURSE THEY DO, wtf print ' euclidean distance: %f' % dendropy.calculate.treecompare.euclidean_distance(in_dtree, out_dtree) print ' r-f distance: %f' % dendropy.calculate.treecompare.robinson_foulds_distance(in_dtree, out_dtree) print ' %s' % utils.color('blue', 'input:') print in_ascii_str print ' %s' % utils.color('blue', 'output:') print out_ascii_str # ---------------------------------------------------------------------------------------- # loops over uids in <hline> and <lline> (which, in order, must correspond to each other), chooses a new joint uid and applies it to both h and l trees, then checks to make sure the trees are identical def merge_heavy_light_trees(hline, lline, use_identical_uids=False, check_trees=True, debug=False): def ladd(uid, locus): return '%s-%s' % (uid, locus) def lrm(uid, locus): assert '-' in uid and uid.split('-')[-1] == locus return uid.replace('-%s' % locus, '') if debug: print ' before:' print ' heavy:' print utils.pad_lines(get_ascii_tree(treestr=hline['tree'])) print ' light:' print utils.pad_lines(get_ascii_tree(treestr=lline['tree'])) assert len(hline['unique_ids']) == len(lline['unique_ids']) lpair = [hline, lline] joint_reco_id = utils.uidhashstr(hline['reco_id'] + lline['reco_id']) for ltmp in lpair: ltmp['reco_id'] = joint_reco_id ltmp['paired-uids'] = [] dtrees = [get_dendro_tree(treestr=l['tree']) for l in lpair] for iuid, (huid, luid) in enumerate(zip(hline['unique_ids'], lline['unique_ids'])): joint_uid = utils.uidhashstr(huid + luid) for ltmp in lpair: ltmp['unique_ids'][iuid] = joint_uid if not use_identical_uids: ltmp['unique_ids'][iuid] = ladd(ltmp['unique_ids'][iuid], ltmp['loci'][iuid]) for l1, l2 in zip(lpair, reversed(lpair)): l1['paired-uids'].append([l2['unique_ids'][iuid]]) for dt, uid, ltmp in zip(dtrees, [huid, luid], lpair): # NOTE huid and luid here are the *old* ones dt.find_node_with_taxon_label(uid).taxon = dendropy.Taxon(ltmp['unique_ids'][iuid]) # don't need to update the taxon namespace since we don't use it afterward hline['tree'], lline['tree'] = [as_str(dt) for dt in dtrees] # have to make a separate tree to actually put in the <line>s, since the symmetric difference function screws up the tree if check_trees: if not use_identical_uids: # reset back to the plain <joint_uid> so we can compare for dt, ltmp in zip(dtrees, lpair): for uid, locus in zip(ltmp['unique_ids'], ltmp['loci']): # yes, they all have the same locus, but see note in utils dt.find_node_with_taxon_label(uid).taxon = dendropy.Taxon(lrm(uid, locus)) # don't need to update the taxon namespace since we don't use it afterward tns = dendropy.TaxonNamespace() dtrees = [cycle_through_ascii_conversion(dtree=dt, taxon_namespace=tns) for dt in dtrees] # have to recreate from str before calculating symmetric difference to avoid the taxon namespace being screwed up (I tried a bunch to avoid this, I don't know what it's changing, the tns looks fine, but something's wrong) sym_diff = dendropy.calculate.treecompare.symmetric_difference(*dtrees) # WARNING this function modifies the tree (i think by removing unifurcations) becuase OF COURSE THEY DO, wtf if sym_diff != 0: # i guess in principle we could turn this off after we've run a fair bit, but it seems really dangerous, since if the heavy and light trees get out of sync the whole simulation is ruined raise Exception('trees differ (symmetric difference %d) for heavy and light chains' % sym_diff) if debug: print ' after:' print ' symmetric difference: %d' % sym_diff print ' heavy:' print utils.pad_lines(get_ascii_tree(treestr=hline['tree'])) print ' light:' print utils.pad_lines(get_ascii_tree(treestr=lline['tree'])) # ---------------------------------------------------------------------------------------- def collapse_zero_length_leaves(dtree, sequence_uids, debug=False): # <sequence_uids> is uids for which we have actual sequences (i.e. not internal nodes inferred by the tree program without sequences) if debug > 1: print ' merging trivially-dangling leaves into parent internal nodes' print ' distance leaf parent' removed_nodes = [] for leaf in list(dtree.leaf_node_iter()): # subsume super short/zero length leaves into their parent internal nodes recursed = False while leaf.edge_length is not None and leaf.edge_length < 1./(2*typical_bcr_seq_len): # if distance corresponds to less than one mutation, it's probably (always?) just fasttree dangling an internal node as a leaf if leaf.parent_node is None: # why tf can i get the root node here? break if leaf.parent_node.taxon is not None and leaf.parent_node.taxon.label in sequence_uids: # only want to do it if the parent node is a (spurious) internal node added by fasttree (this parent's taxon will be None if suppress_internal_node_taxa was set) break if debug > 1: print ' %8.5f %-20s %-20s' % (leaf.edge_length, ' " ' if recursed else leaf.taxon.label, 'none' if leaf.parent_node.taxon is None else leaf.parent_node.taxon.label) parent_node = leaf.parent_node removed_nodes.append(parent_node.taxon.label if parent_node.taxon is not None else None) collapse_nodes(dtree, leaf.taxon.label, None, keep_name_node=leaf, remove_name_node=leaf.parent_node) leaf = parent_node recursed = True dtree.update_bipartitions(suppress_unifurcations=False) dtree.purge_taxon_namespace() if debug: print ' merged %d trivially-dangling leaves into parent internal nodes: %s' % (len(removed_nodes), ' '.join(str(n) for n in removed_nodes)) # print get_ascii_tree(dendro_tree=dtree, extra_str=' ', width=350) # print dtree.as_string(schema='newick').strip() # ---------------------------------------------------------------------------------------- def get_fasttree_tree(seqfos, naive_seq=None, naive_seq_name='XnaiveX', taxon_namespace=None, suppress_internal_node_taxa=False, debug=False): if debug: print ' running FastTree on %d sequences plus a naive' % len(seqfos) uid_list = [sfo['name'] for sfo in seqfos] if any(uid_list.count(u) > 1 for u in uid_list): raise Exception('duplicate uid(s) in seqfos for FastTree, which\'ll make it crash: %s' % ' '.join(u for u in uid_list if uid_list.count(u) > 1)) with tempfile.NamedTemporaryFile() as tmpfile: if naive_seq is not None: tmpfile.write('>%s\n%s\n' % (naive_seq_name, naive_seq)) for sfo in seqfos: tmpfile.write('>%s\n%s\n' % (sfo['name'], sfo['seq'])) # NOTE the order of the leaves/names is checked when reading bppseqgen output tmpfile.flush() # BEWARE if you forget this you are fucked with open(os.devnull, 'w') as fnull: treestr = subprocess.check_output('./bin/FastTree -gtr -nt ' + tmpfile.name, shell=True, stderr=fnull) if debug: print ' converting FastTree newick string to dendro tree' dtree = get_dendro_tree(treestr=treestr, taxon_namespace=taxon_namespace, ignore_existing_internal_node_labels=not suppress_internal_node_taxa, suppress_internal_node_taxa=suppress_internal_node_taxa, debug=debug) naive_node = dtree.find_node_with_taxon_label(naive_seq_name) if naive_node is not None: dtree.reroot_at_node(naive_node, suppress_unifurcations=False, update_bipartitions=True) if not suppress_internal_node_taxa: # if we *are* suppressing internal node taxa, we're probably calling this from clusterpath, in which case we need to mess with the internal nodes in a way that assumes they can be ignored (so we collapse zero length leaves afterwards) collapse_zero_length_leaves(dtree, uid_list + [naive_seq_name], debug=debug) return dtree # ---------------------------------------------------------------------------------------- # copied from https://github.com/nextstrain/augur/blob/master/base/scores.py # also see explanation here https://photos.app.goo.gl/gtjQziD8BLATQivR6 def set_lb_values(dtree, tau, only_calc_metric=None, dont_normalize=False, multifo=None, debug=False): """ traverses <dtree> in postorder and preorder to calculate the up and downstream tree length exponentially weighted by distance, then adds them as LBI (and divides as LBR) """ def getmulti(node): # number of reads with the same sequence if multifo is None or node.taxon.label not in multifo or multifo[node.taxon.label] is None: # most all of them should be in there, but for instance I'm not adding the dummy branch nodes return 1 return multifo[node.taxon.label] metrics_to_calc = lb_metrics.keys() if only_calc_metric is None else [only_calc_metric] if debug: print ' setting %s values with tau %.4f' % (' and '.join(metrics_to_calc), tau) initial_labels = set([n.taxon.label for n in dtree.preorder_node_iter()]) dtree = get_tree_with_dummy_branches(dtree, tau) # this returns a new dtree, but the old tree is a subtree of the new one (or at least its collection of nodes are), and these nodes get modified by the process (hence the reversal fcn below) # calculate clock length (i.e. for each node, the distance to that node's parent) for node in dtree.postorder_node_iter(): # postorder vs preorder doesn't matter, but I have to choose one if node.parent_node is None: # root node node.clock_length = 0. for child in node.child_node_iter(): child.clock_length = child.distance_from_root() - node.distance_from_root() # lbi is the sum of <node.down_polarizer> (downward message from <node>'s parent) and its children's up_polarizers (upward messages) # traverse the tree in postorder (children first) to calculate message to parents (i.e. node.up_polarizer) for node in dtree.postorder_node_iter(): node.down_polarizer = 0 # used for <node>'s lbi (this probabably shouldn't be initialized here, since it gets reset in the next loop [at least I think they all do]) node.up_polarizer = 0 # used for <node>'s parent's lbi (but not <node>'s lbi) for child in node.child_node_iter(): node.up_polarizer += child.up_polarizer bl = node.clock_length / tau node.up_polarizer *= numpy.exp(-bl) # sum of child <up_polarizer>s weighted by an exponential decayed by the distance to <node>'s parent node.up_polarizer += getmulti(node) * tau * (1 - numpy.exp(-bl)) # add the actual contribution (to <node>'s parent's lbi) of <node>: zero if the two are very close, increasing toward asymptote of <tau> for distances near 1/tau (integral from 0 to l of decaying exponential) # traverse the tree in preorder (parents first) to calculate message to children (i.e. child1.down_polarizer) for node in dtree.preorder_internal_node_iter(): for child1 in node.child_node_iter(): # calculate down_polarizer for each of <node>'s children child1.down_polarizer = node.down_polarizer # first sum <node>'s down_polarizer... for child2 in node.child_node_iter(): # and the *up* polarizers of any other children of <node> if child1 != child2: child1.down_polarizer += child2.up_polarizer # add the contribution of <child2> to its parent's (<node>'s) lbi (i.e. <child2>'s contribution to the lbi of its *siblings*) bl = child1.clock_length / tau child1.down_polarizer *= numpy.exp(-bl) # and decay the previous sum by distance between <child1> and its parent (<node>) child1.down_polarizer += getmulti(child1) * tau * (1 - numpy.exp(-bl)) # add contribution of <child1> to its own lbi: zero if it's very close to <node>, increasing to max of <tau> (integral from 0 to l of decaying exponential) returnfo = {m : {} for m in metrics_to_calc} # go over all nodes and calculate lb metrics (can be done in any order) for node in dtree.postorder_node_iter(): vals = {'lbi' : node.down_polarizer, 'lbr' : 0.} for child in node.child_node_iter(): vals['lbi'] += child.up_polarizer vals['lbr'] += child.up_polarizer if node.down_polarizer > 0.: vals['lbr'] /= node.down_polarizer # it might make more sense to not include the branch between <node> and its parent in either the numerator or denominator (here it's included in the denominator), but this way I don't have to change any of the calculations above if dummy_str in node.taxon.label: continue if node is dtree.seed_node or node.parent_node is dtree.seed_node: # second clause is only because of dummy root addition (well, and if we are adding dummy root the first clause doesn't do anything) vals['lbr'] = 0. for metric in metrics_to_calc: returnfo[metric][node.taxon.label] = float(vals[metric]) if dont_normalize else normalize_lb_val(metric, float(vals[metric]), tau) if debug: max_width = str(max([len(n.taxon.label) for n in dtree.postorder_node_iter()])) print (' %'+max_width+'s %s%s multi') % ('node', ''.join(' %s' % m for m in metrics_to_calc), 16*' ' if 'lbr' in metrics_to_calc else '') for node in dtree.preorder_node_iter(): if dummy_str in node.taxon.label: continue multi_str = '' if multifo is not None: multi_str = str(getmulti(node)) if getmulti(node) > 1: multi_str = utils.color('blue', multi_str, width=3) lbstrs = ['%8.3f' % returnfo[m][node.taxon.label] for m in metrics_to_calc] if 'lbr' in metrics_to_calc: lbstrs += [' = %-5.3f / %-5.3f' % (returnfo['lbr'][node.taxon.label] * node.down_polarizer, node.down_polarizer)] print (' %' + max_width + 's %s %3s') % (node.taxon.label, ''.join(lbstrs), multi_str) # this is maybe time consuming, but I want to leave the tree that was passed in as unmodified as I can (especially since I have to run this fcn twice for lbi/lbr since they need different tau values) for node in dtree.postorder_node_iter(): delattr(node, 'clock_length') delattr(node, 'up_polarizer') delattr(node, 'down_polarizer') remove_dummy_branches(dtree, initial_labels) return returnfo # ---------------------------------------------------------------------------------------- def get_tree_with_dummy_branches(old_dtree, tau, n_tau_lengths=10, add_dummy_leaves=False, debug=False): # add long branches above root and/or below each leaf, since otherwise we're assuming that (e.g.) leaf node fitness is zero # commenting this since I'm pretty sure I've fixed it, but not removing it since if a similar problem surfaces with dummy branch addition, deep copying is an easy way out # zero_length_edges = [e for e in old_dtree.preorder_edge_iter() if e.length == 0 and not e.head_node.is_leaf()] # if len(zero_length_edges) > 0: # rerooting to remove dummy branches screws up the tree in some cases with zero length branches (see comment in that fcn) # old_dtree = copy.deepcopy(old_dtree) # could maybe do this by default, but it'll probably be really slow on large trees (at least iterating through the trees is; although I suppose maybe deepcopy is smater than that) # print ' %s found %d zero length branches in tree, so deep copying before adding dummy branches (this is probably ok ish, but in general it\'s a bad idea to have zero length branches in your trees): %s' % (utils.color('yellow', 'warning'), len(zero_length_edges), ' '.join([e.head_node.taxon.label for e in zero_length_edges])) dummy_edge_length = n_tau_lengths * tau new_root_taxon = dendropy.Taxon(dummy_str + '-root') old_dtree.taxon_namespace.add_taxon(new_root_taxon) new_root_node = dendropy.Node(taxon=new_root_taxon) new_dtree = dendropy.Tree(seed_node=new_root_node, taxon_namespace=old_dtree.taxon_namespace, is_rooted=True) # then add the entire old tree under this new tree new_root_node.add_child(old_dtree.seed_node) for edge in new_root_node.child_edge_iter(): edge.length = dummy_edge_length if add_dummy_leaves: # add dummy child branches to each leaf for lnode in new_dtree.leaf_node_iter(): new_label = '%s-%s' % (dummy_str, lnode.taxon.label) tns.add_taxon(dendropy.Taxon(new_label)) new_child_node = lnode.new_child(taxon=tns.get_taxon(new_label), edge_length=dummy_edge_length) # TODO commenting this because it gets triggered way too much, but I'm not actually sure that I can really just ignore the problem (but maybe I can) # zero_len_edge_nodes = [e.head_node for n in new_dtree.preorder_node_iter() for e in n.child_edge_iter() if e.length == 0 and not e.head_node.is_leaf()] # zero len edges above leaves are fine, since leaves don't count for lbr # if len(zero_len_edge_nodes) > 0: # print ' %s found %d zero length internal edges in tree, which means lb ratio may mis-categorize branches: %s' % (utils.color('red', 'warning'), len(zero_len_edge_nodes), ' '.join([n.taxon.label for n in zero_len_edge_nodes])) # # for node in zero_len_edge_nodes: # we don't really want to modify the tree this drastically here (and a.t.m. this causes a crash later on), but I'm leaving it as a placeholder for how to remove zero length edges # # collapse_nodes(new_dtree, node.taxon.label, node.parent_node.taxon.label) # keep the child, since it can be a leaf # # print utils.pad_lines(get_ascii_tree(dendro_tree=new_dtree)) new_dtree.update_bipartitions(suppress_unifurcations=False) # not sure if I need this? (suppress_unifurcations is because otherwise it removes the branch between the old and new root nodes) if debug: print ' added dummy branches to tree:' print get_ascii_tree(dendro_tree=new_dtree, extra_str=' ', width=350) return new_dtree # ---------------------------------------------------------------------------------------- def remove_dummy_branches(dtree, initial_labels, add_dummy_leaves=False, debug=False): if add_dummy_leaves: raise Exception('not implemented (shouldn\'t be too hard, but a.t.m. I don\'t think I\'ll need it)') if len(dtree.seed_node.child_nodes()) != 1: print ' %s root node has more than one child when removing dummy branches: %s' % (utils.color('yellow', 'warning'), ' '.join([n.taxon.label for n in dtree.seed_node.child_nodes()])) new_root_node = dtree.seed_node.child_nodes()[0] if debug: print ' rerooting at %s' % new_root_node.taxon.label print ' current children: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()]) # NOTE if the new root has a child separated by a zero-length edge, this reroot call for some reason deletes that child from the tree (both with and without suppress_unifurcations set). After messing around a bunch to try to fix it, the message I'm taking is just that zero length branches (and unifurcations) are a bad idea and I should just forbid them # UPDATE I think I was just missing the suppress_unifurcations=False in update_bipartitions(), but leaving these comments here in case there was another problem # UPDATE actually the reroot still seems to eat a node sometimes if the tree is unrooted (so adding the extra reroot above) # UPDATE this is more or less expectd, from dendropy's perspective; see https://github.com/jeetsukumaran/DendroPy/issues/118 assert dtree.is_rooted # make sure it's rooted, to avoid unifurcations getting suppressed (even with the arg set to false) dtree.reroot_at_node(new_root_node, suppress_unifurcations=False) # reroot at old root node if debug: print ' children after reroot: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()]) dtree.prune_taxa_with_labels([dummy_str + '-root'], suppress_unifurcations=False) dtree.purge_taxon_namespace() # I'm sure there's a good reason the previous line doesn't do this dtree.update_bipartitions(suppress_unifurcations=False) if debug: print ' children after purge: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()]) final_labels = set([n.taxon.label for n in dtree.preorder_node_iter()]) if initial_labels != final_labels: # this was only happening with a zero-length node hanging off root (see above), which probably won't happen any more since I'm now removing zero length (non-leaf) branches in bcr-phylo simulator.py print ' %s nodes after dummy branch addition and removal not the same as before:' % utils.color('red', 'error') print ' missing: %s' % ' '.join(initial_labels - final_labels) print ' extra: %s' % ' '.join(final_labels - initial_labels) print ' tree:' print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400)) # ---------------------------------------------------------------------------------------- def get_aa_tree(dtree, annotation, extra_str=None, debug=False): very_different_frac = 0.5 if debug: print ' converting nuc tree (mean depth %.3f) to aa' % get_mean_leaf_height(dtree) # print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400)) changes = {} aa_dtree = copy.deepcopy(dtree) nuc_seqs = {uid : seq for uid, seq in zip(annotation['unique_ids'], annotation['seqs'])} aa_seqs = {uid : seq for uid, seq in zip(annotation['unique_ids'], annotation['seqs_aa'])} skipped_edges = [] if debug > 1: print ' N mutations branch length' print ' nuc aa nuc aa child node' for edge in aa_dtree.preorder_edge_iter(): if edge.tail_node is None: # edge above root (no, i don't know why root has an edge above it, but that's how it is) continue cnode = edge.head_node # child of this edge clabel, plabel = cnode.taxon.label, cnode.parent_node.taxon.label # turns out there's also a .tail_node attribute of the edge that isn't listed properly in the docs if clabel not in aa_seqs or plabel not in aa_seqs: # if either of the seqs are missing, leave the existing (presumably nucleotide-based) branch length unchanged skipped_edges.append(edge) continue nuc_branch_length = edge.length # nucleotide distance from parent node (only used for debug, but we have to grab it before we change the edge length) aa_mut_frac, aa_n_muts = utils.hamming_fraction(aa_seqs[plabel], aa_seqs[clabel], amino_acid=True, also_return_distance=True) edge.length = aa_mut_frac if debug: nuc_mut_frac, nuc_n_muts = utils.hamming_fraction(nuc_seqs[plabel], nuc_seqs[clabel], also_return_distance=True) if nuc_mut_frac > 0 and abs(nuc_branch_length - nuc_mut_frac) / nuc_mut_frac > very_different_frac: print ' %s nuc branch length %.4f and mut frac %.4f very different for branch between %s --> %s' % (utils.color('red', 'warning'), nuc_branch_length, nuc_mut_frac, clabel, plabel) changes[edge] = (nuc_n_muts, aa_n_muts) if debug > 1: print ' %3d %3d %.3f %.3f %s' % (nuc_n_muts, aa_n_muts, nuc_branch_length, aa_mut_frac, clabel) aa_dtree.update_bipartitions(suppress_unifurcations=False) if len(skipped_edges) > 0: print ' %s get_aa_tree()%s: skipped %d/%d edges for which we didn\'t have sequences for both nodes (i.e. left the original branch length unmodified)' % (utils.color('yellow', 'warning'), '' if extra_str is None else ' %s'%extra_str, len(skipped_edges), len(list(aa_dtree.preorder_edge_iter()))) if debug: assert len(changes) + len(skipped_edges) + 1 == len(list(aa_dtree.preorder_edge_iter())) # +1 is for root edge print ' rescaled %d/%d edges' % (len(changes), len(list(aa_dtree.preorder_edge_iter()))) print ' aa tree mean depth: %.3f' % get_mean_leaf_height(aa_dtree) n_to_print = 10 print ' child nodes with %d largest differences between N nuc and N aa changes' % n_to_print print ' nuc aa parent node child node' for edge in sorted(changes, key=lambda k: changes[k][1] - changes[k][0])[:n_to_print]: nuc_n_muts, aa_n_muts = changes[edge] print ' %3d %3d %-15s %s' % (nuc_n_muts, aa_n_muts, edge.tail_node.taxon.label, edge.head_node.taxon.label) # print utils.pad_lines(get_ascii_tree(dendro_tree=aa_dtree, width=400)) return aa_dtree # ---------------------------------------------------------------------------------------- # check whether 1) node depth and 2) node pairwise distances are super different when calculated with tree vs sequences (not really sure why it's so different sometimes, best guess is fasttree sucks, partly because it doesn't put the root node anywhere near the root of the tree) def compare_tree_distance_to_shm(dtree, annotation, max_frac_diff=0.5, min_warn_frac=0.25, extra_str=None, debug=False): common_nodes = [n for n in dtree.preorder_node_iter() if n.taxon.label in annotation['unique_ids']] tdepths, mfreqs, fracs = {}, {}, {} for node in common_nodes: tdepth = node.distance_from_root() mfreq = utils.per_seq_val(annotation, 'mut_freqs', node.taxon.label) frac_diff = abs(tdepth - mfreq) / tdepth if tdepth > 0 else 0 if frac_diff > max_frac_diff: key = node.taxon.label tdepths[key] = tdepth mfreqs[key] = mfreq fracs[key] = frac_diff if debug or len(fracs) > 0: warnstr = utils.color('yellow', 'warning ') if len(fracs) / float(len(common_nodes)) > min_warn_frac else '' if debug or warnstr != '': print ' %stree depth and mfreq differ by more than %.0f%% for %d/%d nodes%s' % (warnstr, 100*max_frac_diff, len(fracs), len(common_nodes), '' if extra_str is None else ' for %s' % extra_str) if debug and len(fracs) > 0: print ' tree depth mfreq frac diff' for key, frac in sorted(fracs.items(), key=operator.itemgetter(1), reverse=True): print ' %.4f %.4f %.4f %s' % (tdepths[key], mfreqs[key], frac, key) dmatrix = dtree.phylogenetic_distance_matrix() dmx_taxa = set(dmatrix.taxon_iter()) # phylogenetic_distance_matrix() seems to only return values for leaves, which maybe I'm supposed to expect? tdists, mdists, fracs = {}, {}, {} # NOTE reusing these names is kind of dangerous for n1, n2 in itertools.combinations([n for n in common_nodes if n.taxon in dmx_taxa], 2): tdist = dmatrix.distance(n1.taxon, n2.taxon) mdist = utils.hamming_fraction(utils.per_seq_val(annotation, 'seqs', n1.taxon.label), utils.per_seq_val(annotation, 'seqs', n2.taxon.label)) frac_diff = abs(tdist - mdist) / tdist if tdist > 0 else 0 if frac_diff > max_frac_diff: key = (n1.taxon.label, n2.taxon.label) tdists[key] = tdist mdists[key] = mdist fracs[key] = frac_diff if debug or len(fracs) > 0: warnstr = utils.color('yellow', 'warning ') if len(fracs) / float(len(common_nodes)) > min_warn_frac else '' if debug or warnstr != '': print ' %spairwise distance from tree and sequence differ by more than %.f%% for %d/%d node pairs%s' % (warnstr, 100*max_frac_diff, len(fracs), 0.5 * len(common_nodes) * (len(common_nodes)-1), '' if extra_str is None else ' for %s' % extra_str) if debug and len(fracs) > 0: print ' pairwise' print ' tree dist seq dist frac diff' for key, frac_diff in sorted(fracs.items(), key=operator.itemgetter(1), reverse=True): print ' %.4f %.4f %.4f %s %s' % (tdists[key], mdists[key], frac_diff, key[0], key[1]) if debug: print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400)) utils.print_reco_event(annotation) # ---------------------------------------------------------------------------------------- def calculate_lb_values(dtree, tau, lbr_tau_factor=None, only_calc_metric=None, dont_normalize=False, annotation=None, extra_str=None, iclust=None, debug=False): # if <only_calc_metric> is None, we use <tau> and <lbr_tau_factor> to calculate both lbi and lbr (i.e. with different tau) # - whereas if <only_calc_metric> is set, we use <tau> to calculate only the given metric # note that it's a little weird to do all this tree manipulation here, but then do the dummy branch tree manipulation in set_lb_values(), but the dummy branch stuff depends on tau so it's better this way # <iclust> is just to give a little more granularity in dbg # TODO this is too slow (although it would be easy to have an option for it to only spot check a random subset of nodes) # if annotation is not None: # check that the observed shm rate and tree depth are similar (we're still worried that they're different if we don't have the annotation, but we have no way to check it) # compare_tree_distance_to_shm(dtree, annotation, extra_str=extra_str) if max(get_leaf_depths(dtree).values()) > 1: # should only happen on old simulation files if annotation is None: raise Exception('tree needs rescaling in lb calculation (metrics will be wrong): found leaf depth greater than 1 (even when less than 1 they can be wrong, but we can be fairly certain that your BCR sequences don\'t have real mutation frequencty greater than 1, so this case we can actually check). If you pass in annotations we can rescale to the observed mutation frequencty.') print ' %s leaf depths greater than 1, so rescaling by sequence length' % utils.color('yellow', 'warning') dtree.scale_edges(1. / numpy.mean([len(s) for s in annotation['seqs']])) # using treeutils.rescale_tree() breaks, it seems because the update_bipartitions() call removes nodes near root on unrooted trees if debug: print ' calculating %s%s with tree:' % (' and '.join(lb_metrics if only_calc_metric is None else [only_calc_metric]), '' if extra_str is None else ' for %s' % extra_str) print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400)) multifo = None if annotation is not None: multifo = {} # NOTE now that I'm always doing this, it might make sense to rearrange things a bit, but i don't want to look at it right now for node in dtree.postorder_node_iter(): multifo[node.taxon.label] = utils.get_multiplicity(annotation, uid=node.taxon.label) if node.taxon.label in annotation['unique_ids'] else 1 # if it's not in there, it could be from wonky names from lonr.r, also could be from FastTree tree where we don't get inferred intermediate sequences treestr = dtree.as_string(schema='newick') # get this before the dummy branch stuff to make more sure it isn't modified normstr = 'unnormalized' if dont_normalize else 'normalized' if only_calc_metric is None: assert lbr_tau_factor is not None # has to be set if we're calculating both metrics if iclust is None or iclust == 0: print ' %scalculating %s lb metrics with tau values %.4f (lbi) and %.4f * %d = %.4f (lbr)' % ('' if extra_str is None else '%s: '%extra_str, normstr, tau, tau, lbr_tau_factor, tau*lbr_tau_factor) lbvals = set_lb_values(dtree, tau, only_calc_metric='lbi', dont_normalize=dont_normalize, multifo=multifo, debug=debug) tmpvals = set_lb_values(dtree, tau*lbr_tau_factor, only_calc_metric='lbr', dont_normalize=dont_normalize, multifo=multifo, debug=debug) lbvals['lbr'] = tmpvals['lbr'] else: assert lbr_tau_factor is None or dont_normalize # we need to make sure that we weren't accidentally called with lbr_tau_factor set, but then we ignore it because the caller forgot that we ignore it if only_calc_metric is also set if iclust is None or iclust == 0: print ' calculating %s %s with tau %.4f' % (normstr, lb_metrics[only_calc_metric], tau) lbvals = set_lb_values(dtree, tau, only_calc_metric=only_calc_metric, dont_normalize=dont_normalize, multifo=multifo, debug=debug) lbvals['tree'] = treestr return lbvals # ---------------------------------------------------------------------------------------- def set_n_generations(seq_len, tau, n_tau_lengths, n_generations, debug=False): if n_generations is None: assert n_tau_lengths is not None # have to specify one or the other n_generations = max(1, int(seq_len * tau * n_tau_lengths)) if debug: print ' %d generations = seq_len * tau * n_tau_lengths = %d * %.4f * %d = max(1, int(%.2f))' % (n_generations, seq_len, tau, n_tau_lengths, seq_len * tau * n_tau_lengths) # else: # if debug: # print ' %d generations' % n_generations return n_generations # ---------------------------------------------------------------------------------------- def get_tree_for_lb_bounds(bound, metric, seq_len, tau, n_generations, n_offspring, debug=False): dtree = dendropy.Tree(is_rooted=True) # note that using a taxon namespace while you build the tree is *much* slower than labeling it afterward (and we do need labels when we calculate lb values) if bound == 'min': leaf_node = dtree.seed_node # pretty similar to the dummy root stuff for igen in range(n_generations): leaf_node = leaf_node.new_child(edge_length=1./seq_len) elif bound == 'max': old_leaf_nodes = [l for l in dtree.leaf_node_iter()] assert len(old_leaf_nodes) == 1 new_leaf_nodes = [] for igen in range(n_generations): for ileaf in range(len(old_leaf_nodes)): for ioff in range(n_offspring): new_leaf_nodes += [old_leaf_nodes[ileaf].new_child(edge_length=1./seq_len)] old_leaf_nodes = new_leaf_nodes new_leaf_nodes = [] else: assert False return dtree # ---------------------------------------------------------------------------------------- def calculate_lb_bounds(seq_len, tau, n_tau_lengths=10, n_generations=None, n_offspring=2, only_metrics=None, btypes=None, debug=False): # NOTE the min is just tau, but I don't feel like deleting this fcn just to keep clear what the min means info = {m : {} for m in lb_metrics} n_generations = set_n_generations(seq_len, tau, n_tau_lengths, n_generations, debug=debug) for metric in [m for m in lb_metrics if only_metrics is None or m in only_metrics]: for bound in [b for b in ['min', 'max'] if btypes is None or b in btypes]: if metric == 'lbr' and bound == 'min': # lbr min is always zero (leaves) info[metric][bound] = {metric : 0., 'vals' : None} continue if debug: print ' %s %s for seq len %d' % (utils.color('red', bound), utils.color('yellow', metric), seq_len) start = time.time() dtree = get_tree_for_lb_bounds(bound, metric, seq_len, tau, n_generations, n_offspring, debug=debug) label_nodes(dtree) lbvals = calculate_lb_values(dtree, tau, only_calc_metric=metric, dont_normalize=True, debug=debug) bfcn = __builtins__[bound] # min() or max() info[metric][bound] = {metric : bfcn(lbvals[metric].values()), 'vals' : lbvals} if debug: bname, bval = bfcn(lbvals[metric].items(), key=operator.itemgetter(1)) print ' %s of %d %s values (%.1fs): %s %.4f' % (bound, len(lbvals[metric]), metric, time.time() - start, bname, bval) return info # ---------------------------------------------------------------------------------------- def get_n_ancestors_to_affy_change(node, dtree, line, affinity_changes=None, min_affinity_change=1e-6, n_max_steps=15, also_return_branch_len=False, debug=False): # find number of steps/ancestors to the nearest ancestor with lower affinity than <node>'s # - also finds the corresponding distance, which is to the lower end of the branch containing the corresponding affinity-increasing mutation # - this is chosen so that <n_steps> and <branch_len> are both 0 for the node at the bottom of a branch on which affinity increases, and are *not* the distance *to* the lower-affinity node # - because it's so common for affinity to get worse from ancestor to descendent, it's important to remember that here we are looking for the first ancestor with lower affinity than the node in question, which is *different* to looking for the first ancestor that has lower affinity than one of its immediate descendents (which we could also plot, but it probably wouldn't be significantly different to the metric performance, since for the metric performance we only really care about the left side of the plot, but this only affects the right side) # - <min_affinity_change> is just to eliminate floating point precision issues (especially since we're deriving affinity by inverting kd) (note that at least for now, and with default settings, the affinity changes should all be pretty similar, and not small) this_affinity = utils.per_seq_val(line, 'affinities', node.taxon.label) if debug: print ' %12s %12s %8s %9.4f' % (node.taxon.label, '', '', this_affinity) ancestor_node = node chosen_ancestor_affinity = None n_steps, branch_len = 0, 0. while n_steps < n_max_steps: # note that if we can't find an ancestor with worse affinity, we don't plot the node if ancestor_node is dtree.seed_node: break ancestor_distance = ancestor_node.edge_length # distance from current <ancestor_node> to its parent (who in the next line becomes <ancestor_node>) ancestor_node = ancestor_node.parent_node # move one more step up the tree ancestor_uid = ancestor_node.taxon.label if ancestor_uid not in line['unique_ids']: print ' %s ancestor %s of %s not in true line' % (utils.color('yellow', 'warning'), ancestor_uid, node.taxon.label) break ancestor_affinity = utils.per_seq_val(line, 'affinities', ancestor_uid) if this_affinity - ancestor_affinity > min_affinity_change: # if we found an ancestor with lower affinity, we're done chosen_ancestor_affinity = ancestor_affinity if affinity_changes is not None: affinity_changes.append(this_affinity - ancestor_affinity) break if debug: print ' %12s %12s %8.4f %9.4f%s' % ('', ancestor_uid, branch_len, ancestor_affinity, utils.color('green', ' x') if ancestor_node is dtree.seed_node else '') n_steps += 1 branch_len += ancestor_distance if chosen_ancestor_affinity is None: # couldn't find ancestor with lower affinity return (None, None) if also_return_branch_len else None if debug: print ' %12s %12s %8.4f %9.4f %s%-9.4f' % ('', ancestor_uid, branch_len, chosen_ancestor_affinity, utils.color('red', '+'), this_affinity - chosen_ancestor_affinity) if also_return_branch_len: # kind of hackey, but we only want the branch length for plotting atm, and actually we aren't even making those plots by default any more return n_steps, branch_len else: return n_steps # ---------------------------------------------------------------------------------------- lonr_files = { # this is kind of ugly, but it's the cleanest way I can think of to have both this code and the R code know what they're called 'phy.outfname' : 'phy_out.txt', 'phy.treefname' : 'phy_tree.nwk', 'outseqs.fname' : 'outseqs.fasta', 'edgefname' : 'edges.tab', 'names.fname' : 'names.tab', 'lonrfname' : 'lonr.csv', } # ---------------------------------------------------------------------------------------- def build_lonr_tree(edgefos, debug=False): # NOTE have to build the tree from the edge file, since the lonr code seems to add nodes that aren't in the newick file (which is just from phylip). all_nodes = set([e['from'] for e in edgefos] + [e['to'] for e in edgefos]) effective_root_nodes = set([e['from'] for e in edgefos]) - set([e['to'] for e in edgefos]) # "effective" because it can be in an unrooted tree. Not sure if there's always exactly one node that has no inbound edges though if len(effective_root_nodes) != 1: raise Exception('too many effective root nodes: %s' % effective_root_nodes) root_label = list(effective_root_nodes)[0] # should be '1' for dnapars if debug: print ' chose \'%s\' as root node' % root_label tns = dendropy.TaxonNamespace(all_nodes) root_node = dendropy.Node(taxon=tns.get_taxon(root_label)) # NOTE this sets node.label and node.taxon.label to the same thing, which may or may not be what we want # label=root_label, (if you start setting the node labels again, you also have to translate them below) dtree = dendropy.Tree(taxon_namespace=tns, seed_node=root_node, is_rooted=True) remaining_nodes = copy.deepcopy(all_nodes) - set([root_label]) # a.t.m. I'm not actually using <all_nodes> after this, but I still want to keep them separate in case I start using it weight_or_distance_key = 'distance' # maybe should I be using the 'weight' column? I think they're just proportional though so I guess it shouldn't matter (same thing in the line below) # root_edgefos = [efo for efo in edgefos if efo['from'] == root_label] for efo in root_edgefos: dtree.seed_node.new_child(taxon=tns.get_taxon(efo['to']), edge_length=efo[weight_or_distance_key]) # label=efo['to'], (if you start setting the node labels again, you also have to translate them below) remaining_nodes.remove(efo['to']) while len(remaining_nodes) > 0: n_removed = 0 # I think I don't need this any more (it only happened before I remembered to remove the root node), but it doesn't seem like it'll hurt) for lnode in dtree.leaf_node_iter(): children = [efo for efo in edgefos if efo['from'] == lnode.taxon.label] if debug > 1 and len(children) > 0: print ' adding children to %s:' % lnode.taxon.label for chfo in children: lnode.new_child(taxon=tns.get_taxon(chfo['to']), edge_length=chfo[weight_or_distance_key]) # label=chfo['to'], (if you start setting the node labels again, you also have to translate them below) remaining_nodes.remove(chfo['to']) n_removed += 1 if debug > 1: print ' %s' % chfo['to'] if debug > 1: print ' remaining: %d' % len(remaining_nodes) if len(remaining_nodes) > 0 and n_removed == 0: # if there's zero remaining, we're just about to break anyway if debug > 1: print ' didn\'t remove any, so breaking: %s' % remaining_nodes break return dtree # ---------------------------------------------------------------------------------------- def parse_lonr(outdir, input_seqfos, naive_seq_name, reco_info=None, debug=False): def get_node_type_from_name(name, debug=False): # internal nodes in simulated trees should be labeled like 'mrca-<stuff>' (has to correspond to what bcr-phylo-benchmark did) if 'mrca' in name: return 'internal' elif 'leaf' in name: return 'leaf' else: if debug: print ' not sure of node type for \'%s\'' % name return None # get lonr names (lonr replaces them with shorter versions, I think because of phylip) lonr_names, input_names = {}, {} with open(outdir + '/' + lonr_files['names.fname']) as namefile: # headers: "head head2" reader = csv.DictReader(namefile, delimiter='\t') for line in reader: if line['head'][0] != 'L' and line['head'] != naive_seq_name: # internal node dummy_int = int(line['head']) # check that it's just a (string of a) number assert line['head2'] == '-' continue input_names[line['head']] = line['head2'] # head2 is our names lonr_names[line['head2']] = line['head'] def final_name(lonr_name): return input_names.get(lonr_name, lonr_name) # read edge info (i.e., implicitly, the tree that lonr.r used) edgefos = [] # headers: "from to weight distance" with open(outdir + '/' + lonr_files['edgefname']) as edgefile: reader = csv.DictReader(edgefile, delimiter='\t') for line in reader: line['distance'] = int(line['distance']) line['weight'] = float(line['weight']) edgefos.append(line) dtree = build_lonr_tree(edgefos, debug=debug) # switch leaves to input names for node in dtree.leaf_node_iter(): node.taxon.label = input_names[node.taxon.label] assert node.label is None # (if you start setting the node labels again, you also have to translate them here) # node.label = node.taxon.label # (if you start setting the node labels again, you also have to translate them here) if debug: print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=250)) nodefos = {node.taxon.label : {} for node in dtree.postorder_node_iter()} # info for each node (internal and leaf), destined for output # read the sequences for both leaves and inferred (internal) ancestors seqfos = {final_name(sfo['name']) : sfo['seq'] for sfo in utils.read_fastx(outdir + '/' + lonr_files['outseqs.fname'])} input_seqfo_dict = {sfo['name'] : sfo['seq'] for sfo in input_seqfos} # just to make sure lonr didn't modify the input sequences for node in dtree.postorder_node_iter(): label = node.taxon.label if label not in seqfos: raise Exception('unexpected sequence name %s' % label) if node.is_leaf() or label == naive_seq_name: if label not in input_seqfo_dict: raise Exception('leaf node \'%s\' not found in input seqs' % label) if seqfos[label] != input_seqfo_dict[label]: print 'input: %s' % input_seqfo_dict[label] print ' lonr: %s' % utils.color_mutants(input_seqfo_dict[label], seqfos[label], align=True) raise Exception('lonr leaf sequence doesn\'t match input sequence (see above)') nodefos[label]['seq'] = seqfos[label] # read actual lonr info lonrfos = [] if debug: print ' pos mutation lonr syn./a.b.d. parent child' with open(outdir + '/' + lonr_files['lonrfname']) as lonrfile: # heads: "mutation,LONR,mutation.type,position,father,son,flag" reader = csv.DictReader(lonrfile) for line in reader: assert len(line['mutation']) == 2 assert line['mutation.type'] in ('S', 'R') assert line['flag'] in ('TRUE', 'FALSE') mutation = line['mutation'].upper() # dnapars has it upper case already, but neighbor has it lower case parent_name = final_name(line['father']) child_name = final_name(line['son']) parent_seq = nodefos[parent_name]['seq'] pos = int(line['position']) - 1 # switch from one- to zero-indexing child_seq = nodefos[child_name]['seq'] if parent_seq[pos] != mutation[0] or child_seq[pos] != mutation[1]: print 'parent: %s' % parent_seq print ' child: %s' % utils.color_mutants(parent_seq, child_seq, align=True) raise Exception('mutation info (%s at %d) doesn\'t match sequences (see above)' % (mutation, pos)) lonrfos.append({ 'mutation' : mutation, 'lonr' : float(line['LONR']), 'synonymous' : line['mutation.type'] == 'S', 'position' : pos, 'parent' : parent_name, 'child' : child_name, 'affected_by_descendents' : line['flag'] == 'TRUE', }) if debug: lfo = lonrfos[-1] print ' %3d %2s %5.2f %s / %s %4s %-20s' % (lfo['position'], lfo['mutation'], lfo['lonr'], 'x' if lfo['synonymous'] else ' ', 'x' if lfo['affected_by_descendents'] else ' ', lfo['parent'], lfo['child']) # check for duplicate nodes (not sure why lonr.r kicks these, but I should probably collapse them at some point) # in simulation, we sample internal nodes, but then lonr.r's tree construction forces these to be leaves, but then frequently they're immediately adjacent to internal nodes in lonr.r's tree... so we try to collapse them duplicate_groups = utils.group_seqs_by_value(nodefos.keys(), keyfunc=lambda q: nodefos[q]['seq']) duplicate_groups = [g for g in duplicate_groups if len(g) > 1] if len(duplicate_groups) > 0: n_max = 15 dbg_str = ', '.join([' '.join(g) for g in duplicate_groups[:n_max]]) # only print the first 15 of 'em, if there's more if len(duplicate_groups) > n_max: dbg_str += utils.color('blue', ' [...]') print ' collapsing %d groups of nodes with duplicate sequences (probably just internal nodes that were renamed by lonr.r): %s' % (len(duplicate_groups), dbg_str) for dgroup in duplicate_groups: non_phylip_names = [n for n in dgroup if get_node_type_from_name(n) is not None] if len(non_phylip_names) == 0: # and phylip internal node names are of form str(<integer>), so just choose the first alphabetically, because whatever name_to_use = sorted(dgroup)[0] elif len(non_phylip_names) == 1: name_to_use = non_phylip_names[0] else: raise Exception('wtf %s (should\'ve been either one or zero non-phylip names)' % non_phylip_names) names_to_remove = [n for n in dgroup if n != name_to_use] for rname in names_to_remove: # only info in here a.t.m. is the sequence del nodefos[rname] # NOTE not collapsing nodes in tree to match <nodefos> (see comment on next line) # collapse_nodes(dtree, name_to_use, rname, allow_failure=True, debug=True) # holy fuckballs this is not worth the effort (it doesn't really work because the tree is too screwed up) [just gave up and added the duplicate info to the return dict] for lfo in lonrfos: for key in ('parent', 'child'): if lfo[key] in names_to_remove: lfo[key] = name_to_use return {'tree' : dtree.as_string(schema='newick'), 'nodes' : nodefos, 'values' : lonrfos} # ---------------------------------------------------------------------------------------- def run_lonr(input_seqfos, naive_seq_name, workdir, tree_method, lonr_code_file=None, phylip_treefile=None, phylip_seqfile=None, seed=1, debug=False): if lonr_code_file is None: lonr_code_file = os.path.dirname(os.path.realpath(__file__)).replace('/python', '/bin/lonr.r') if not os.path.exists(lonr_code_file): raise Exception('lonr code file %s d.n.e.' % lonr_code_file) if tree_method not in ('dnapars', 'neighbor'): raise Exception('unexpected lonr tree method %s' % tree_method) # # installation stuff # rcmds = [ # 'source("https://bioconductor.org/biocLite.R")', # 'biocLite("Biostrings")', # 'install.packages("seqinr", repos="http://cran.rstudio.com/")', # ] # utils.run_r(rcmds, workdir) input_seqfile = workdir + '/input-seqs.fa' with open(input_seqfile, 'w') as iseqfile: for sfo in input_seqfos: iseqfile.write('>%s\n%s\n' % (sfo['name'], sfo['seq'])) existing_phylip_output_str = '' if phylip_treefile is not None: # using existing phylip output, e.g. from cft tree = get_dendro_tree(treefname=phylip_treefile) edgefos = [] for node in tree.preorder_node_iter(): for edge in node.child_edge_iter(): edgefos.append({'from' : node.taxon.label, 'to' : edge.head_node.taxon.label, 'weight' : edge.length}) existing_edgefname = workdir + '/edges.csv' existing_node_seqfname = workdir + '/infered-node-seqs.fa' with open(existing_edgefname, 'w') as edgefile: writer = csv.DictWriter(edgefile, ('from', 'to', 'weight')) writer.writeheader() for line in edgefos: writer.writerow(line) with open(existing_node_seqfname, 'w') as node_seqfile: writer = csv.DictWriter(node_seqfile, ('head', 'seq')) writer.writeheader() for sfo in utils.read_fastx(phylip_seqfile): writer.writerow({'head' : sfo['name'], 'seq' : sfo['seq']}) existing_phylip_output_str = ', existing.edgefile="%s", existing.node.seqfile="%s"' % (existing_edgefname, existing_node_seqfname) rcmds = [ 'source("%s")' % lonr_code_file, 'set.seed(%d)' % seed, 'G.phy.outfname = "%s"' % lonr_files['phy.outfname'], # this is a pretty shitty way to do this, but the underlying problem is that there's too many files, but I don't want to parse them all into one or two files in R, so I need to pass all of 'em to the calling python script 'G.phy.treefname = "%s"' % lonr_files['phy.treefname'], 'G.outseqs.fname = "%s"' % lonr_files['outseqs.fname'], 'G.edgefname = "%s"' % lonr_files['edgefname'], 'G.names.fname = "%s"' % lonr_files['names.fname'], 'G.lonrfname = "%s"' % lonr_files['lonrfname'], 'compute.LONR(method="%s", infile="%s", workdir="%s/", outgroup="%s"%s)' % (tree_method, input_seqfile, workdir, naive_seq_name, existing_phylip_output_str), ] outstr, errstr = utils.run_r(rcmds, workdir, extra_str=' ', return_out_err=True, debug=debug) if debug: print utils.pad_lines(outstr) print utils.pad_lines(errstr) os.remove(input_seqfile) if phylip_treefile is not None: os.remove(existing_edgefname) os.remove(existing_node_seqfname) # ---------------------------------------------------------------------------------------- def calculate_liberman_lonr(input_seqfos=None, line=None, reco_info=None, phylip_treefile=None, phylip_seqfile=None, tree_method=None, naive_seq_name='X-naive-X', seed=1, debug=False): # NOTE see issues/notes in bin/lonr.r if phylip_treefile is not None or phylip_seqfile is not None: raise Exception('never got this (passing phylip output files to lonr.r) to work -- lonr.r kept barfing, although if you were running exactly the same phylip commands as lonr.r does, it would probably work.') assert input_seqfos is None or line is None if input_seqfos is None: input_seqfos = [{'name' : line['unique_ids'][iseq], 'seq' : line['seqs'][iseq]} for iseq in range(len(line['unique_ids']))] input_seqfos.insert(0, {'name' : naive_seq_name, 'seq' : line['naive_seq']}) if tree_method is None: tree_method = 'dnapars' if len(input_seqfos) < 500 else 'neighbor' workdir = utils.choose_random_subdir('/tmp/%s' % os.getenv('USER', default='partis-work')) os.makedirs(workdir) if debug: print ' %s' % utils.color('green', 'lonr:') run_lonr(input_seqfos, naive_seq_name, workdir, tree_method, phylip_treefile=phylip_treefile, phylip_seqfile=phylip_seqfile, seed=seed, debug=debug) lonr_info = parse_lonr(workdir, input_seqfos, naive_seq_name, reco_info=reco_info, debug=debug) for fn in lonr_files.values(): os.remove(workdir + '/' + fn) os.rmdir(workdir) return lonr_info # ---------------------------------------------------------------------------------------- def get_tree_metric_lines(annotations, cpath, reco_info, use_true_clusters, min_overlap_fraction=0.5, only_use_best_partition=False, only_plot_uids_with_affinity_info=False, glfo=None, debug=False): # collect inferred and true events inf_lines_to_use, true_lines_to_use = None, None if use_true_clusters: # use clusters from the true partition, rather than inferred one assert reco_info is not None true_partition = utils.get_partition_from_reco_info(reco_info) print ' using %d true clusters to calculate inferred selection metrics (sizes: %s)' % (len(true_partition), ' '.join(str(l) for l in sorted([len(c) for c in true_partition], reverse=True))) if debug: print ' choosing N N N frac (N chosen)' print ' from true & chosen = in common in common (w/out duplicates)' inf_lines_to_use, true_lines_to_use = [], [] chosen_ustrs = set() # now that we're using the fraction instead of the raw total, we mostly shouldn't get multiple true clusters corresponding to the same inferred cluster, but maybe it'll still happen occasionally for cluster in true_partition: true_lines_to_use.append(utils.synthesize_multi_seq_line_from_reco_info(cluster, reco_info)) # note: duplicates (a tiny bit of) code in utils.print_true_events() n_max_in_common, max_frac_in_common, ustr_to_use = None, None, None # look for the inferred cluster that has the most uids in common with this true cluster for ustr in set(annotations) - chosen_ustrs: # order will be different in reco info and inferred clusters n_in_common = len(set(utils.uids_and_dups(annotations[ustr])) & set(cluster)) # can't just look for the actual cluster since we collapse duplicates, but bcr-phylo doesn't (but maybe I should throw them out when parsing bcr-phylo output) frac_in_common = n_in_common**2 / float(len(utils.uids_and_dups(annotations[ustr])) * len(cluster)) # and have to use frac instead of total to guard against inferred clusters that include several true clusters (reminder: these inferred clusters may have been run with --n-final-clusters 1 or something similar) if max_frac_in_common is None or frac_in_common > max_frac_in_common: ustr_to_use = ustr n_max_in_common = n_in_common max_frac_in_common = frac_in_common if max_frac_in_common is None: raise Exception('cluster \'%s\' not found in inferred annotations (probably because use_true_clusters was set)' % ':'.join(cluster)) if max_frac_in_common < min_overlap_fraction: raise Exception('overlap fraction %.3f too small: for true cluster (size %d), highest was for inferred cluster with size %d (%d including duplicates). Maybe need to set --simultaneous-true-clonal-seqs (if you did set --simultaneous-true-clonal-seqs, you probably need to set --no-indels, i.e. a true cluster got split apart because of incorrect indel calls).' % (max_frac_in_common, len(cluster), len(annotations[ustr_to_use]['unique_ids']), len(utils.uids_and_dups(annotations[ustr_to_use])))) if debug: print ' %4d %4d %4d %4d %4.2f (%d)' % (len(set(annotations) - chosen_ustrs), len(cluster), len(utils.uids_and_dups(annotations[ustr_to_use])), n_max_in_common, max_frac_in_common, len(annotations[ustr_to_use]['unique_ids'])) if max_frac_in_common < 1: print ' note: couldn\'t find an inferred cluster that corresponded exactly to the true cluster (best was %d & %d = %d (frac %.2f), where the inferred includes %d duplicates)' % (len(utils.uids_and_dups(annotations[ustr_to_use])), len(cluster), n_max_in_common, max_frac_in_common, utils.n_dups(annotations[ustr_to_use])) if ustr_to_use in chosen_ustrs: raise Exception('chose the same inferred cluster to correspond to two different true clusters') chosen_ustrs.add(ustr_to_use) inf_lines_to_use.append(annotations[ustr_to_use]) else: # use clusters from the inferred partition (whether from <cpath> or <annotations>), and synthesize clusters exactly matching these using single true annotations from <reco_info> (to repeat: these are *not* true clusters) inf_lines_to_use = annotations.values() # we used to restrict it to clusters in the best partition, but I'm switching since I think whenever there are extra ones in <annotations> we always actually want their tree metrics (at the moment there will only be extra ones if either --calculate-alternative-annotations or --write-additional-cluster-annotations are set, but in the future it could also be the default) if only_use_best_partition: assert cpath is not None and cpath.i_best is not None inf_lines_to_use = [l for l in inf_lines_to_use if l['unique_ids'] in cpath.partitions[cpath.i_best]] if only_plot_uids_with_affinity_info: assert False # should work fine as is, but needs to be checked and integrated with things tmplines = [] for line in inf_lines_to_use: iseqs_to_keep = [i for i, a in enumerate(line['affinities']) if a is not None] if len(iseqs_to_keep) == 0: continue print ' keeping %d/%d' % (len(iseqs_to_keep), len(line['unique_ids'])) new_line = copy.deepcopy(line) # *really* don't want to modify the annotations from partitiondriver utils.restrict_to_iseqs(new_line, iseqs_to_keep, glfo) tmplines.append(new_line) inf_lines_to_use = tmplines if reco_info is not None: for line in inf_lines_to_use: true_line = utils.synthesize_multi_seq_line_from_reco_info(line['unique_ids'], reco_info) true_lines_to_use.append(true_line) return inf_lines_to_use, true_lines_to_use # ---------------------------------------------------------------------------------------- def plot_tree_metrics(base_plotdir, inf_lines_to_use, true_lines_to_use, ete_path=None, workdir=None, include_relative_affy_plots=False, only_csv=False, queries_to_include=None, debug=False): import plotting import lbplotting start = time.time() print ' plotting to %s' % base_plotdir # inferred plots if true_lines_to_use is None: # at least for now I'm turning off inferred plots when we have true lines, the only reason we want it (I think) is to compare the effect of true vs inferred tree, which I'm not doing now, and it's slow af has_affinities = any('affinities' in l for l in inf_lines_to_use) # we'd expect that either all or none of the families have affinity info, but oh well this makes it more general inf_plotdir = base_plotdir + '/inferred-tree-metrics' utils.prep_dir(inf_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lb_metrics.keys()) fnames = [] if has_affinities: lbplotting.plot_lb_vs_affinity(inf_plotdir, inf_lines_to_use, 'aa-lbi', only_csv=only_csv, fnames=fnames, is_true_line=False, debug=debug) if not only_csv: lbplotting.plot_lb_distributions('aa-lbi', inf_plotdir, inf_lines_to_use, fnames=fnames, only_overall=False, iclust_fnames=None if has_affinities else 8) if has_affinities: lbplotting.plot_lb_vs_affinity(inf_plotdir, inf_lines_to_use, 'cons-dist-aa', only_csv=only_csv, fnames=fnames, is_true_line=False, debug=debug) if not only_csv: # all the various scatter plots are really slow lbplotting.plot_lb_distributions('cons-dist-aa', inf_plotdir, inf_lines_to_use, fnames=fnames, only_overall=False, iclust_fnames=None if has_affinities else 8) lbplotting.make_lb_scatter_plots('cons-dist-aa', inf_plotdir, 'aa-lbi', inf_lines_to_use, fnames=fnames, is_true_line=False, colorvar='affinity' if has_affinities else 'edge-dist', add_jitter=False, iclust_fnames=None if has_affinities else 8, queries_to_include=queries_to_include) # it's important to have nuc-lbi vs aa-lbi so you can see if they're super correlated (which means we didn't have any of the internal nodes): lbplotting.make_lb_scatter_plots('aa-lbi', inf_plotdir, 'lbi', inf_lines_to_use, fnames=fnames, is_true_line=False, add_jitter=False, iclust_fnames=None if has_affinities else 8, queries_to_include=queries_to_include, add_stats='correlation') lbplotting.plot_lb_distributions('lbr', inf_plotdir, inf_lines_to_use, fnames=fnames, only_overall=False, iclust_fnames=None if has_affinities else 8) if ete_path is not None: lbplotting.plot_lb_trees(['aa-lbi', 'lbr', 'cons-dist-aa'], inf_plotdir, inf_lines_to_use, ete_path, workdir, is_true_line=False, queries_to_include=queries_to_include) subdirs = [d for d in os.listdir(inf_plotdir) if os.path.isdir(inf_plotdir + '/' + d)] plotting.make_html(inf_plotdir, fnames=fnames, new_table_each_row=True, htmlfname=inf_plotdir + '/overview.html', extra_links=[(subd, '%s/%s/' % (inf_plotdir, subd)) for subd in subdirs]) # true plots if true_lines_to_use is not None: if 'affinities' not in true_lines_to_use[0] or all(affy is None for affy in true_lines_to_use[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody # print ' %s no affinity information in this simulation, so can\'t plot lb/affinity stuff' % utils.color('yellow', 'note') print ' selection metric plotting time (no true plots)): %.1f sec' % (time.time() - start) return true_plotdir = base_plotdir + '/true-tree-metrics' utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lb_metrics.keys()) fnames = [] for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']): lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, 'aa-lbi', is_true_line=True, affy_key=affy_key, only_csv=only_csv, fnames=fnames, debug=debug) lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, 'cons-dist-aa', is_true_line=True, affy_key=affy_key, only_csv=only_csv, fnames=fnames, debug=debug) if not only_csv: lbplotting.make_lb_scatter_plots('cons-dist-aa', true_plotdir, 'aa-lbi', true_lines_to_use, fnames=fnames, is_true_line=True, colorvar='affinity', only_overall=True, add_jitter=False) lbplotting.make_lb_scatter_plots('aa-lbi', true_plotdir, 'lbi', true_lines_to_use, fnames=fnames, is_true_line=True, only_overall=True, add_jitter=False, add_stats='correlation') lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir + '/lbr', true_lines_to_use, 'lbr', is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug) if not only_csv: # mtmp = 'lbi' # lbplotting.make_lb_scatter_plots('affinity-ptile', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, yvar='%s-ptile'%mtmp, colorvar='edge-dist', add_jitter=True) # lbplotting.make_lb_scatter_plots('affinity-ptile', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, yvar='%s-ptile'%mtmp, colorvar='edge-dist', only_overall=False, choose_among_families=True) # lbplotting.make_lb_scatter_plots('shm', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, colorvar='edge-dist', only_overall=True, add_jitter=False) # lbplotting.make_lb_scatter_plots('affinity-ptile', true_plotdir, mtmp, true_lines_to_use, fnames=fnames, is_true_line=True, yvar='cons-dist-nuc-ptile', colorvar='edge-dist', add_jitter=True) for lb_metric in lb_metrics: lbplotting.make_lb_affinity_joyplots(true_plotdir + '/joyplots', true_lines_to_use, lb_metric, fnames=fnames) # lbplotting.plot_lb_distributions('lbi', true_plotdir, true_lines_to_use, fnames=fnames, is_true_line=True, only_overall=True) # lbplotting.plot_lb_distributions('lbr', true_plotdir, true_lines_to_use, fnames=fnames, is_true_line=True, only_overall=True) if ete_path is not None: lbplotting.plot_lb_trees(['aa-lbi', 'lbr', 'cons-dist-aa'], true_plotdir, true_lines_to_use, ete_path, workdir, is_true_line=True) # for lb_metric in lb_metrics: # lbplotting.plot_true_vs_inferred_lb(true_plotdir + '/' + lb_metric, true_lines_to_use, inf_lines_to_use, lb_metric, fnames=fnames) # lbplotting.plot_cons_seq_accuracy(true_plotdir, true_lines_to_use, fnames=fnames) subdirs = [d for d in os.listdir(true_plotdir) if os.path.isdir(true_plotdir + '/' + d)] plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(subd, '%s/%s/' % (true_plotdir, subd)) for subd in subdirs]) print ' selection metric plotting time: %.1f sec' % (time.time() - start) # ---------------------------------------------------------------------------------------- def get_tree_for_line(line, treefname=None, cpath=None, annotations=None, use_true_clusters=False, ignore_existing_internal_node_labels=False, debug=False): # figure out how we want to get the inferred tree if treefname is not None: dtree = get_dendro_tree(treefname=treefname, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, debug=debug) origin = 'treefname' if len(set([n.taxon.label for n in dtree.preorder_node_iter()]) & set(line['unique_ids'])) == 0: # if no nodes in common between line and tree in file (e.g. you passed in the wrong file or didn't set --cluster-indices) dtree = None origin = 'no-uids' elif False: # use_liberman_lonr_tree: # NOTE see issues/notes in bin/lonr.r lonr_info = calculate_liberman_lonr(line=line, reco_info=reco_info, debug=debug) dtree = get_dendro_tree(treestr=lonr_info['tree']) # line['tree-info']['lonr'] = lonr_info origin = 'lonr' elif cpath is not None and cpath.i_best is not None and not use_true_clusters and line['unique_ids'] in cpath.partitions[cpath.i_best]: # if <use_true_clusters> is set, then the clusters in <inf_lines_to_use> won't correspond to the history in <cpath>, so this won't work NOTE now that I've added the direct check if the unique ids are in the best partition, i can probably remove the use_true_clusters check, but I don't want to mess with it a.t.m. assert annotations is not None i_only_cluster = cpath.partitions[cpath.i_best].index(line['unique_ids']) cpath.make_trees(annotations=annotations, i_only_cluster=i_only_cluster, get_fasttrees=True, debug=False) dtree = cpath.trees[i_only_cluster] # as we go through the loop, the <cpath> is presumably filling all of these in origin = 'cpath' else: seqfos = [{'name' : uid, 'seq' : seq} for uid, seq in zip(line['unique_ids'], line['seqs'])] dtree = get_fasttree_tree(seqfos, naive_seq=line['naive_seq'], debug=debug) origin = 'fasttree' return {'tree' : dtree, 'origin' : origin} # ---------------------------------------------------------------------------------------- def check_lb_values(line, lbvals): for metric in [m for m in lbvals if m in lb_metrics]: missing = set(line['unique_ids']) - set(lbvals[metric]) if len(missing) > 0: # we expect to get extra ones in the tree, for inferred ancestral nodes for which we don't have sequences, but missing ones probabliy indicate something's up # raise Exception('uids in annotation not the same as lb info keys\n missing: %s\n extra: %s' % (' '.join(set(line['unique_ids']) - set(lbvals[metric])), ' '.join(set(lbvals[metric]) - set(line['unique_ids'])))) extra = set(lbvals[metric]) - set(line['unique_ids']) common = set(line['unique_ids']) & set(lbvals[metric]) print ' %s uids in annotation not the same as lb info keys for \'%s\': %d missing %d extra (%d in common)' % (utils.color('red', 'error'), metric, len(missing), len(extra), len(common)) if len(missing) + len(extra) < 35: print ' missing: %s\n extra: %s\n common: %s' % (' '.join(missing), ' '.join(extra), ' '.join(common)) # NOTE this is not tested, but might be worth using in the future # # ---------------------------------------------------------------------------------------- # def get_trees_for_annotations(annotations, cpath=None, workdir=None, min_cluster_size=default_min_selection_metric_cluster_size, cluster_indices=None, debug=False): # NOTE this duplicates some code in the following function (but I want them separate since I don't really care about this fcn much) # print 'getting trees' # inf_lines_to_use = annotations.values() # n_before = len(inf_lines_to_use) # inf_lines_to_use = sorted([l for l in inf_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True) # n_after = len(inf_lines_to_use) # after removing the small ones # tree_origin_counts = {n : {'count' : 0, 'label' : l} for n, l in (('treefname', 'read from %s' % treefname), ('cpath', 'made from cpath'), ('fasttree', 'ran fasttree'), ('lonr', 'ran liberman lonr'))} # print ' calculating selection metrics for %d cluster%s with size%s: %s' % (n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in inf_lines_to_use)) # print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size) # if cluster_indices is not None: # if min(cluster_indices) < 0 or max(cluster_indices) >= len(inf_lines_to_use): # raise Exception('invalid cluster indices %s for partition with %d clusters' % (cluster_indices, len(inf_lines_to_use))) # print ' skipped all iclusts except %s (size%s %s)' % (' '.join(str(i) for i in cluster_indices), utils.plural(len(cluster_indices)), ' '.join(str(len(inf_lines_to_use[i]['unique_ids'])) for i in cluster_indices)) # n_already_there = 0 # for iclust, line in enumerate(inf_lines_to_use): # if cluster_indices is not None and iclust not in cluster_indices: # continue # if debug: # print ' %s sequence cluster' % utils.color('green', str(len(line['unique_ids']))) # if 'tree-info' in line: # NOTE we used to continue here, but now I've decided we really want to overwrite what's there (although I'm a little worried that there was a reason I'm forgetting not to overwrite them) # if debug: # print ' %s overwriting tree that was already in <line>' % utils.color('yellow', 'warning') # n_already_there += 1 # treefo = get_tree_for_line(line, cpath=cpath, annotations=annotations, debug=debug) # if treefo is None: # continue # tree_origin_counts[treefo['origin']]['count'] += 1 # line['tree-info'] = {} # NOTE <treefo> has a dendro tree, but what we put in the <line> (at least for now) is a newick string # line['tree-info']['tree'] = treefo['tree'].as_string(schema='newick') # print ' tree origins: %s' % ', '.join(('%d %s' % (nfo['count'], nfo['label'])) for n, nfo in tree_origin_counts.items() if nfo['count'] > 0) # if n_already_there > 0: # print ' %s overwriting %d / %d that already had trees' % (utils.color('yellow', 'warning'), n_already_there, n_after) # ---------------------------------------------------------------------------------------- def get_aa_lb_metrics(line, nuc_dtree, lb_tau, lbr_tau_factor=None, only_calc_metric=None, dont_normalize_lbi=False, extra_str=None, iclust=None, debug=False): # and add them to <line> utils.add_seqs_aa(line) aa_dtree = get_aa_tree(nuc_dtree, line, extra_str=extra_str, debug=debug) aa_lb_info = calculate_lb_values(aa_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, only_calc_metric=only_calc_metric, annotation=line, dont_normalize=dont_normalize_lbi, extra_str=extra_str, iclust=iclust, debug=debug) if 'tree-info' not in line: line['tree-info'] = {'lb' : {}} line['tree-info']['lb']['aa-tree'] = aa_dtree.as_string(schema='newick') for nuc_metric in [k for k in aa_lb_info if k != 'tree']: line['tree-info']['lb']['aa-'+nuc_metric] = aa_lb_info[nuc_metric] # ---------------------------------------------------------------------------------------- def calculate_tree_metrics(annotations, lb_tau, lbr_tau_factor=None, cpath=None, treefname=None, reco_info=None, use_true_clusters=False, base_plotdir=None, ete_path=None, workdir=None, dont_normalize_lbi=False, only_csv=False, min_cluster_size=default_min_selection_metric_cluster_size, dtr_path=None, train_dtr=False, dtr_cfg=None, add_aa_consensus_distance=False, add_aa_lb_metrics=False, true_lines_to_use=None, include_relative_affy_plots=False, cluster_indices=None, outfname=None, only_use_best_partition=False, glfo=None, queries_to_include=None, ignore_existing_internal_node_labels=False, debug=False): print 'getting selection metrics' if reco_info is not None: if not use_true_clusters: print ' note: getting selection metrics on simulation without setting <use_true_clusters> (i.e. probably without setting --simultaneous-true-clonal-seqs)' for tmpline in reco_info.values(): assert len(tmpline['unique_ids']) == 1 # at least for the moment, we're splitting apart true multi-seq lines when reading in seqfileopener.py if dtr_path is not None: assert not dont_normalize_lbi # it's trained on normalized lbi, so results are garbage if you don't normalize dtr_cfgvals, trainfo, skmodels, pmml_models, missing_models = init_dtr(train_dtr, dtr_path, cfg_fname=dtr_cfg) if true_lines_to_use is not None: # i.e. being called by bin/dtr-run.py assert reco_info is None inf_lines_to_use = None else: # called from python/partitiondriver.py inf_lines_to_use, true_lines_to_use = get_tree_metric_lines(annotations, cpath, reco_info, use_true_clusters, only_use_best_partition=only_use_best_partition, glfo=glfo) # NOTE these continue to be modified (by removing clusters we don't want) further down, and then they get passed to the plotting functions # get tree and calculate metrics for inferred lines if inf_lines_to_use is not None: n_before = len(inf_lines_to_use) inf_lines_to_use = sorted([l for l in inf_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True) n_after = len(inf_lines_to_use) # after removing the small ones tree_origin_counts = {n : {'count' : 0, 'label' : l} for n, l in (('treefname', 'read from %s' % treefname), ('cpath', 'made from cpath'), ('fasttree', 'ran fasttree'), ('lonr', 'ran liberman lonr'))} print ' calculating selection metrics for %d cluster%s with size%s: %s' % (n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in inf_lines_to_use)) print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size) if cluster_indices is not None: if min(cluster_indices) < 0 or max(cluster_indices) >= len(inf_lines_to_use): raise Exception('invalid cluster indices %s for partition with %d clusters' % (cluster_indices, len(inf_lines_to_use))) print ' skipped all iclusts except %s (size%s %s)' % (' '.join(str(i) for i in cluster_indices), utils.plural(len(cluster_indices)), ' '.join(str(len(inf_lines_to_use[i]['unique_ids'])) for i in cluster_indices)) n_already_there, n_skipped_uid = 0, 0 final_inf_lines = [] for iclust, line in enumerate(inf_lines_to_use): if cluster_indices is not None and iclust not in cluster_indices: continue if debug: print ' %s sequence cluster' % utils.color('green', str(len(line['unique_ids']))) treefo = get_tree_for_line(line, treefname=treefname, cpath=cpath, annotations=annotations, use_true_clusters=use_true_clusters, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, debug=debug) if treefo['tree'] is None and treefo['origin'] == 'no-uids': n_skipped_uid += 1 continue tree_origin_counts[treefo['origin']]['count'] += 1 if 'tree-info' in line: # NOTE we used to continue here, but now I've decided we really want to overwrite what's there (although I'm a little worried that there was a reason I'm forgetting not to overwrite them) if debug: print ' %s overwriting selection metric info that was already in <line>' % utils.color('yellow', 'warning') n_already_there += 1 line['tree-info'] = {} # NOTE <treefo> has a dendro tree, but what we put in the <line> (at least for now) is a newick string line['tree-info']['lb'] = calculate_lb_values(treefo['tree'], lb_tau, lbr_tau_factor=lbr_tau_factor, annotation=line, dont_normalize=dont_normalize_lbi, extra_str='inf tree', iclust=iclust, debug=debug) check_lb_values(line, line['tree-info']['lb']) # would be nice to remove this eventually, but I keep runnining into instances where dendropy is silently removing nodes if add_aa_consensus_distance: add_cdists_to_lbfo(line, line['tree-info']['lb'], 'cons-dist-aa', debug=debug) # this adds the values both directly to the <line>, and to <line['tree-info']['lb']>, but the former won't end up in the output file unless the corresponding keys are specified as extra annotation columns (this distinction/duplication is worth having, although it's not ideal) if add_aa_lb_metrics: get_aa_lb_metrics(line, treefo['tree'], lb_tau, lbr_tau_factor=lbr_tau_factor, dont_normalize_lbi=dont_normalize_lbi, extra_str='(AA inf tree, iclust %d)'%iclust, iclust=iclust, debug=debug) if dtr_path is not None and not train_dtr: # don't want to train on data calc_dtr(False, line, line['tree-info']['lb'], treefo['tree'], None, pmml_models, dtr_cfgvals) # adds predicted dtr values to lbfo (hardcoded False and None are to make sure we don't train on data) final_inf_lines.append(line) print ' tree origins: %s' % ', '.join(('%d %s' % (nfo['count'], nfo['label'])) for n, nfo in tree_origin_counts.items() if nfo['count'] > 0) if n_skipped_uid > 0: print ' skipped %d/%d clusters that had no uids in common with tree in %s' % (n_skipped_uid, n_after, treefname) if n_already_there > 0: print ' %s replaced tree info in %d / %d that already had it' % (utils.color('yellow', 'warning'), n_already_there, n_after) inf_lines_to_use = final_inf_lines # replace it with a new list that only has the clusters we really want # calculate lb values for true lines/trees if true_lines_to_use is not None: # note that if <base_plotdir> *isn't* set, we don't actually do anything with the true lb values n_true_before = len(true_lines_to_use) true_lines_to_use = sorted([l for l in true_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True) n_true_after = len(true_lines_to_use) print ' also doing %d true cluster%s with size%s: %s' % (n_true_after, utils.plural(n_true_after), utils.plural(n_true_after), ' '.join(str(len(l['unique_ids'])) for l in true_lines_to_use)) print ' skipping %d smaller than %d' % (n_true_before - n_true_after, min_cluster_size) final_true_lines = [] for iclust, true_line in enumerate(true_lines_to_use): if cluster_indices is not None and iclust not in cluster_indices: continue true_dtree = get_dendro_tree(treestr=true_line['tree']) true_lb_info = calculate_lb_values(true_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, annotation=true_line, dont_normalize=dont_normalize_lbi, extra_str='true tree', iclust=iclust, debug=debug) true_line['tree-info'] = {'lb' : true_lb_info} check_lb_values(true_line, true_line['tree-info']['lb']) # would be nice to remove this eventually, but I keep runnining into instances where dendropy is silently removing nodes if add_aa_lb_metrics: get_aa_lb_metrics(true_line, true_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, dont_normalize_lbi=dont_normalize_lbi, extra_str='(AA true tree, iclust %d)'%iclust, iclust=iclust, debug=debug) if add_aa_consensus_distance: add_cdists_to_lbfo(true_line, true_line['tree-info']['lb'], 'cons-dist-aa', debug=debug) # see comment in previous call above if dtr_path is not None: calc_dtr(train_dtr, true_line, true_lb_info, true_dtree, trainfo, pmml_models, dtr_cfgvals) # either adds training values to trainfo, or adds predicted dtr values to lbfo final_true_lines.append(true_line) true_lines_to_use = final_true_lines # replace it with a new list that only has the clusters we really want if dtr_path is not None: # it would be nice to eventually merge these two blocks, i.e. use the same code to plot dtr and lbi/lbr if train_dtr: print ' training decision trees into %s' % dtr_path if dtr_cfgvals['n_train_per_family'] is not None: print ' n_train_per_family: using only %d from each family for among-families dtr' % dtr_cfgvals['n_train_per_family'] for cg in cgroups: for tvar in dtr_targets[cg]: train_dtr_model(trainfo[cg][tvar], dtr_path, dtr_cfgvals, cg, tvar) elif base_plotdir is not None: assert true_lines_to_use is not None plstart = time.time() assert ete_path is None or workdir is not None # need the workdir to make the ete trees import plotting import lbplotting # if 'affinities' not in annotations[0] or all(affy is None for affy in annotations[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody # return print ' plotting to %s' % base_plotdir true_plotdir = base_plotdir + '/true-tree-metrics' lbmlist = sorted(m for m in dtr_metrics if m not in missing_models) # sorted() is just so the order in the html file matches that in the lb metric one utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lbmlist) fnames = [] for lbm in lbmlist: if 'delta-affinity' in lbm: lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir+'/'+lbm, true_lines_to_use, lbm, is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug) else: for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']): lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, lbm, is_true_line=True, only_csv=only_csv, fnames=fnames, affy_key=affy_key) if not only_csv: plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(subd, '%s/%s/' % (true_plotdir, subd)) for subd in lbmlist]) print ' dtr plotting time %.1fs' % (time.time() - plstart) elif base_plotdir is not None: assert ete_path is None or workdir is not None # need the workdir to make the ete trees plot_tree_metrics(base_plotdir, inf_lines_to_use, true_lines_to_use, ete_path=ete_path, workdir=workdir, include_relative_affy_plots=include_relative_affy_plots, only_csv=only_csv, queries_to_include=queries_to_include, debug=debug) if outfname is not None: print ' writing selection metrics to %s' % outfname utils.prep_dir(None, fname=outfname, allow_other_files=True) def dumpfo(tl): dumpfo = {'unique_ids' : l['unique_ids']} dumpfo.update(l['tree-info']) return dumpfo with open(outfname, 'w') as tfile: json.dump([dumpfo(l) for l in inf_lines_to_use if 'tree-info' in l], tfile) # ---------------------------------------------------------------------------------------- def init_dtr(train_dtr, dtr_path, cfg_fname=None): # ---------------------------------------------------------------------------------------- def read_cfg(): if cfg_fname is None: # just use the defaults dtr_cfgvals = {} else: # read cfg values from a file with open(cfg_fname) as yfile: dtr_cfgvals = yaml.load(yfile, Loader=Loader) if 'vars' in dtr_cfgvals: # format is slightly different in the file (in the file we don't require the explicit split between per-seq and per-cluster variables) allowed_vars = set(v for cg in cgroups for pc in dtr_vars[cg] for v in dtr_vars[cg][pc]) cfg_vars = set(v for cg in cgroups for v in dtr_cfgvals['vars'][cg]) bad_vars = cfg_vars - allowed_vars if len(bad_vars) > 0: raise Exception('unexpected dtr var%s (%s) in cfg file %s' % (utils.plural(len(bad_vars)), ', '.join(bad_vars), cfg_fname)) for cg in cgroups: dtr_cfgvals['vars'][cg] = {pc : [v for v in dtr_vars[cg][pc] if v in dtr_cfgvals['vars'][cg]] for pc in pchoices} # loop over the allowed vars here so the order is always the same for tk in set(default_dtr_options) - set(dtr_cfgvals): # set any missing ones to the defaults if tk == 'vars': dtr_cfgvals[tk] = dtr_vars elif tk == 'n_jobs': dtr_cfgvals[tk] = utils.auto_n_procs() # isn't working when I put it up top, not sure why else: dtr_cfgvals[tk] = default_dtr_options[tk] return dtr_cfgvals # ---------------------------------------------------------------------------------------- def read_model(cg, tvar): if 'pypmml' not in sys.modules: import pypmml picklefname, pmmlfname = dtrfname(dtr_path, cg, tvar), dtrfname(dtr_path, cg, tvar, suffix='pmml') if os.path.exists(picklefname): # pickle file (i.e. with entire model class written to disk, but *must* be read with the same version of sklearn that was used to write it) [these should always be there, since on old ones they were all we had, and on new ones we write both pickle and pmml] if os.path.exists(pmmlfname): # pmml file (i.e. just with the info to make predictions, but can be read with other software versions) pmml_models[cg][tvar] = sys.modules['pypmml'].Model.fromFile(pmmlfname) else: # if the pmml file isn't there, this must be old files, so we read the pickle, convert to pmml, then read that new pmml file if 'joblib' not in sys.modules: # just so people don't need to install it unless they're training (also scons seems to break it https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp) import joblib with open(picklefname) as dfile: skmodels[cg][tvar] = sys.modules['joblib'].load(dfile) write_pmml(pmmlfname, skmodels[cg][tvar], get_dtr_varnames(cg, dtr_cfgvals['vars']), tvar) pmml_models[cg][tvar] = sys.modules['pypmml'].Model.fromFile(pmmlfname) else: if cg == 'among-families' and tvar == 'delta-affinity': # this is the only one that should be missing, since we added it last missing_models.append('-'.join([cg, tvar, metric_method])) # this is fucking dumb, but I need it later when I have the full name, not cg and tvar print ' %s %s doesn\'t exist, skipping (%s)' % (cg, tvar, dtrfname(dtr_path, cg, tvar)) return raise Exception('model file doesn\'t exist: %s' % picklefname) # ---------------------------------------------------------------------------------------- dtr_cfgvals = read_cfg() skmodels = {cg : {tv : None for tv in dtr_targets[cg]} for cg in cgroups} pmml_models = {cg : {tv : None for tv in dtr_targets[cg]} for cg in cgroups} missing_models = [] trainfo = None if train_dtr: trainfo = {cg : {tv : {'in' : [], 'out' : []} for tv in dtr_targets[cg]} for cg in cgroups} # , 'weights' : []} else: rstart = time.time() for cg in cgroups: for tvar in dtr_targets[cg]: read_model(cg, tvar) print ' read decision trees from %s (%.1fs)' % (dtr_path, time.time() - rstart) return dtr_cfgvals, trainfo, skmodels, pmml_models, missing_models # ---------------------------------------------------------------------------------------- def calc_dtr(train_dtr, line, lbfo, dtree, trainfo, pmml_models, dtr_cfgvals, skmodels=None): # either add training values for <line>, or predict on it # ---------------------------------------------------------------------------------------- def add_dtr_training_vals(cg, tvar, dtr_invals): # transfer dtr input values to tfo['in'], and add output (affinity stuff) values to tfo['out'] # trainfo[XXX]['weights'] += line['affinities'] def get_delta_affinity_vals(): tmpvals = {s : [] for s in tfo} for iseq, uid in enumerate(line['unique_ids']): n_steps = get_n_ancestors_to_affy_change(dtree.find_node_with_taxon_label(uid), dtree, line) if n_steps is None: # can't train on None-type values continue tmpvals['in'].append(dtr_invals[cg][iseq]) tmpvals['out'].append(-n_steps) return tmpvals tfo = trainfo[cg][tvar] if cg == 'within-families': if tvar == 'affinity': tfo['in'] += dtr_invals[cg] max_affy = max(line['affinities']) tfo['out'] += [a / max_affy for a in line['affinities']] elif tvar == 'delta-affinity': tmpvals = get_delta_affinity_vals() tfo['in'] += tmpvals['in'] tfo['out'] += tmpvals['out'] else: assert False elif cg == 'among-families': if dtr_cfgvals['n_train_per_family'] is None: assert tvar == 'affinity' # eh why bother doing the other one tfo['in'] += dtr_invals[cg] tfo['out'] += line['affinities'] else: if tvar == 'affinity': i_to_keep = numpy.random.choice(range(len(line['unique_ids'])), size=dtr_cfgvals['n_train_per_family'], replace=False) tfo['in'] += [dtr_invals[cg][i] for i in i_to_keep] tfo['out'] += [line['affinities'][i] for i in i_to_keep] elif tvar == 'delta-affinity': tmpvals = get_delta_affinity_vals() if len(tmpvals['in']) == 0: # no affinity increases return i_to_keep = numpy.random.choice(range(len(tmpvals['in'])), size=dtr_cfgvals['n_train_per_family'], replace=False) tfo['in'] += [tmpvals['in'][i] for i in i_to_keep] tfo['out'] += [tmpvals['out'][i] for i in i_to_keep] else: assert False else: assert False # ---------------------------------------------------------------------------------------- utils.add_naive_seq_aa(line) utils.add_seqs_aa(line) for mtmp in ['cons-dist-nuc', 'cons-dist-aa']: add_cdists_to_lbfo(line, lbfo, mtmp) dtr_invals = {cg : get_dtr_vals(cg, dtr_cfgvals['vars'], line, lbfo, dtree) for cg in cgroups} # all dtr input variable values, before we fiddle with them for the different dtrs if train_dtr: # train and write new model for cg in cgroups: for tvar in dtr_targets[cg]: add_dtr_training_vals(cg, tvar, dtr_invals) else: # read existing model for cg in cgroups: for tvar in dtr_targets[cg]: if pmml_models[cg][tvar] is None: # only way this can happen atm is old dirs that don't have among-families delta-affinity continue outfo = {} for iseq, uid in enumerate(line['unique_ids']): pmml_invals = {var : val for var, val in zip(get_dtr_varnames(cg, dtr_cfgvals['vars']), dtr_invals[cg][iseq])} # convert from format for sklearn to format for pmml outfo[uid] = pmml_models[cg][tvar].predict(pmml_invals)['predicted_%s'%tvar] # if skmodels[cg][tvar] is not None: # leaving this here cause maybe we'll want to fall back to it or something if pmml ends up having problems # sk_val = skmodels[cg][tvar].predict([dtr_invals[cg][iseq]]) # assert utils.is_normed(sk_val / outfo[uid]) lbfo['-'.join([cg, tvar, 'dtr'])] = outfo # NOTE it would be nice to automate this '-'.join() conversion, it happens in a few places already # ---------------------------------------------------------------------------------------- # differences to calculate_tree_metrics(): this fcn # 1) can run a bunch of metrics that the other can't # 2) mosty focuses on running one metric at a time (as opposed to running all the ones that we typically want on data) # 3) doesn't plot as many things # 4) only runs on simulation (as opposed to making two sets of things, for simulation and data) def calculate_individual_tree_metrics(metric_method, annotations, base_plotdir=None, ete_path=None, workdir=None, lb_tau=None, lbr_tau_factor=None, only_csv=False, min_cluster_size=None, include_relative_affy_plots=False, dont_normalize_lbi=False, debug=False): # ---------------------------------------------------------------------------------------- def get_combo_lbfo(varlist, iclust, line, is_aa_lb=False): if 'shm-aa' in varlist and 'seqs_aa' not in line: utils.add_naive_seq_aa(line) utils.add_seqs_aa(line) lbfo = {} for mtmp in [m for m in varlist if 'cons-dist-' in m]: add_cdists_to_lbfo(line, lbfo, mtmp) dtree = get_dendro_tree(treestr=line['tree']) lbvars = set(varlist) & set(['lbi', 'lbr']) # although if is_aa_lb is set, we're really calculating aa-lbi/aa-lbr tmp_tau, tmp_factor = lb_tau, lbr_tau_factor # weird/terrible hack (necessary to allow the calculation fcn to enforce that either a) we're calculating both metrics, so we probably want the factor applied or b) we're only calculating one, and we're not normalizing (i.e. we're probably calculating the bounds) if len(lbvars) == 2: only_calc_metric = None elif len(lbvars) == 1: only_calc_metric = list(lbvars)[0] if only_calc_metric == 'lbr': tmp_tau *= lbr_tau_factor tmp_factor = None else: raise Exception('unexpected combination of variables %s' % varlist) if is_aa_lb: get_aa_lb_metrics(line, dtree, tmp_tau, lbr_tau_factor=tmp_factor, only_calc_metric=only_calc_metric, dont_normalize_lbi=dont_normalize_lbi, extra_str='true tree', iclust=iclust) lbfo.update(line['tree-info']['lb']) else: tmp_lb_info = calculate_lb_values(dtree, tmp_tau, only_calc_metric=only_calc_metric, lbr_tau_factor=tmp_factor, annotation=line, dont_normalize=dont_normalize_lbi, extra_str='true tree', iclust=iclust) for lbm in [m for m in lb_metrics if m in varlist]: # this skips the tree, which I guess isn't a big deal lbfo[lbm] = {u : tmp_lb_info[lbm][u] for u in line['unique_ids']} # remove the ones that aren't in <line> (since we don't have sequences for them, so also no consensus distance) return dtree, lbfo # ---------------------------------------------------------------------------------------- if min_cluster_size is None: min_cluster_size = default_min_selection_metric_cluster_size n_before = len(annotations) annotations = sorted([l for l in annotations if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True) n_after = len(annotations) print ' getting non-lb metric %s for %d true cluster%s with size%s: %s' % (metric_method, n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in annotations)) print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size) pstart = time.time() for iclust, line in enumerate(annotations): assert 'tree-info' not in line # could handle it, but don't feel like thinking about it a.t.m. if metric_method == 'shm': metric_info = {u : -utils.per_seq_val(line, 'n_mutations', u) for u in line['unique_ids']} line['tree-info'] = {'lb' : {metric_method : metric_info}} elif metric_method == 'fay-wu-h': # NOTE this isn't actually tree info, but I"m comparing it to things calculated with a tree, so putting it in the same place at least for now fwh = -utils.fay_wu_h(line) line['tree-info'] = {'lb' : {metric_method : {u : fwh for i, u in enumerate(line['unique_ids'])}}} # kind of weird to set it individually for each sequence when they all have the same value (i.e. it's a per-family metric), but I don't want to do actual per-family comparisons any more, and this way we can at least look at it elif metric_method in ['cons-dist-nuc', 'cons-dist-aa']: lbfo = {} add_cdists_to_lbfo(line, lbfo, metric_method) line['tree-info'] = {'lb' : lbfo} elif metric_method == 'delta-lbi': dtree, lbfo = get_combo_lbfo(['lbi'], iclust, line) delta_lbfo = {} for uid in line['unique_ids']: node = dtree.find_node_with_taxon_label(uid) if node is dtree.seed_node: continue # maybe I should add it as something? not sure delta_lbfo[uid] = lbfo['lbi'][uid] - lbfo['lbi'][node.parent_node.taxon.label] # I think the parent should always be in here, since I think we should calculate lbi for every node in the tree line['tree-info'] = {'lb' : {metric_method : delta_lbfo}} elif 'aa-lb' in metric_method: # aa versions of lbi and lbr _, _ = get_combo_lbfo([metric_method.lstrip('aa-')], iclust, line, is_aa_lb=True) elif metric_method == 'cons-lbi': # now uses aa-lbi as a tiebreaker for cons-dist-aa, but used to be old z-score style combination of (nuc-)lbi and cons-dist def tiefcn(uid): cdist, aalbi = lbfo['cons-dist-aa'][uid], lbfo['aa-lbi'][uid] return cdist + aalbi / max_aa_lbi _, lbfo = get_combo_lbfo(['cons-dist-aa', 'lbi'], iclust, line, is_aa_lb=True) max_aa_lbi = max(lbfo['aa-lbi'].values()) line['tree-info'] = {'lb' : {metric_method : {u : tiefcn(u) for u in line['unique_ids']}}} else: assert False print ' tree quantity calculation/prediction time: %.1fs' % (time.time() - pstart) if base_plotdir is not None: plstart = time.time() assert ete_path is None or workdir is not None # need the workdir to make the ete trees import plotting import lbplotting if 'affinities' not in annotations[0] or all(affy is None for affy in annotations[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody return true_plotdir = base_plotdir + '/true-tree-metrics' utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=[metric_method]) fnames = [] if metric_method in ['delta-lbi', 'aa-lbr']: lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir+'/'+metric_method, annotations, metric_method, is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug) else: for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']): lbplotting.plot_lb_vs_affinity(true_plotdir, annotations, metric_method, is_true_line=True, only_csv=only_csv, fnames=fnames, affy_key=affy_key) if not only_csv: plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(metric_method, '%s/%s/' % (true_plotdir, metric_method)),]) print ' non-lb metric plotting time %.1fs' % (time.time() - plstart) # ---------------------------------------------------------------------------------------- def run_laplacian_spectra(treestr, workdir=None, plotdir=None, plotname=None, title=None, debug=False): # - https://www.ncbi.nlm.nih.gov/pubmed/26658901/ # - instructions here: https://besjournals.onlinelibrary.wiley.com/doi/full/10.1111/2041-210X.12526 # I think this is what ended up working (thought probably not in docker): # apt-get install libgmp-dev libmpfr-dev # > install.packages("RPANDA",dependencies=TRUE) # ok but then I needed to modify the code, so downloaded the source from cran, and swapped out for the spectR.R that eric sent, then installed with: # R CMD INSTALL -l packages/RPANDA/lib packages/RPANDA/ # NOTE needs to happen whenever you modify the R source # condensation of docs from the above paper: # - > res<-spectR(Phyllostomidae) # compute eigenvalues (and some metrics describing the distribution, e.g. skewness, kurtosis, eigengap) # - > plot_spectR(res) # make plots for eigenvalue spectrum # - if eigengap (largest gap between sorted eigenvalues) is e.g. between 3 and 4, then the tree can be separated into three regions, and you use the BIC stuff to find those regions # - > res<-BICompare(Phyllostomidae,3) # - > plot_BICompare(Phyllostomidae,res) # - > res<-JSDtree(Phyllostomidae_genera) # pairwise jensen-shannon distances between the 25 phylogenies # - > JSDtree_cluster(res) # plots heatmap and hierarchical cluster if debug: print utils.pad_lines(get_ascii_tree(treestr=treestr)) print treestr if workdir is None: workdir = utils.choose_random_subdir('/tmp/%s' % os.getenv('USER', default='partis-work')) eigenfname = '%s/eigenvalues.txt' % workdir os.makedirs(workdir) cmdlines = [ 'library(ape, quiet=TRUE)', # 'library(RPANDA, quiet=TRUE)', # old way, before I had to modify the source code because the CRAN version removes all eigenvalues <1 (for method="standard" -- with method="normal" it's <0, which is probably better, but it also seems to smoosh all the eigenvalues to be almost exactly 1) 'library("RPANDA", lib.loc="%s/packages/RPANDA/lib", quiet=TRUE)' % os.path.dirname(os.path.realpath(__file__)).replace('/python', ''), 'tree <- read.tree(text = "%s")' % treestr, # 'print(tree)', 'specvals <- spectR(tree, method=c("standard"))', # compute eigenvalues (and some metrics describing the distribution, e.g. skewness, kurtosis, eigengap) # 'print(specvals)', 'capture.output(specvals$eigenvalues, file="%s")' % eigenfname, ] outstr, errstr = utils.run_r(cmdlines, workdir, return_out_err=True) # if it crashes, call it without return_out_err, so it prints stuff as it goes errstr = '\n'.join([l.strip() for l in errstr.split('\n') if 'This is vegan' not in l]) for oestr in (outstr, errstr): if oestr.strip() == '': continue print utils.pad_lines(outstr) eigenvalues = [] with open(eigenfname) as efile: for line in efile: for tstr in line.split(): if '[' in tstr: if int(tstr.strip('[]')) != len(eigenvalues) + 1: raise Exception('couldn\'t process line:\n%s' % line) else: eigenvalues.append(float(tstr)) os.remove(eigenfname) os.rmdir(workdir) if plotdir is not None: import plotting plotting.plot_laplacian_spectra(plotdir, plotname, eigenvalues, title) # ---------------------------------------------------------------------------------------- def combine_selection_metrics(lp_infos, min_cluster_size=default_min_selection_metric_cluster_size, plotdir=None, ig_or_tr='ig', args=None, is_simu=False): # don't really like passing <args> like this, but it's the easiest cfg convention atm # ---------------------------------------------------------------------------------------- def getpids(line): all_ids = [] for ip, pids in enumerate(line['paired-uids']): if pids is None or len(pids) == 0: continue elif len(pids) == 1: # assert pids[0] not in all_ids # this is kind of slow, and maybe it's ok to comment it? all_ids.append(pids[0]) else: raise Exception('too many paired ids (%d) for %s: %s' % (len(pids), line['unique_ids'][ip], ' '.join(pids))) return all_ids # ---------------------------------------------------------------------------------------- def find_cluster_pairs(lpair): # the annotation lists should just be in the same order, but after adding back in all the unpaired sequences to each chain they could be a bit wonky lp_antn_pairs = [] lpk = tuple(lpair) if None in lp_infos[lpk].values(): return lp_antn_pairs h_part, l_part = [sorted(lp_infos[lpk]['cpaths'][l].best(), key=len, reverse=True) for l in lpair] h_atn_dict, l_atn_dict = [utils.get_annotation_dict(lp_infos[lpk]['antn_lists'][l]) for l in lpair] n_no_info = 0 for h_clust in h_part: h_atn = h_atn_dict[':'.join(h_clust)] if 'tree-info' not in h_atn: # skip (presumably) the smaller ones n_no_info += 1 continue if 'paired-uids' not in h_atn: # seems to just be single-seq clusters, so i don't care continue l_clusts = [c for c in l_part if len(set(getpids(h_atn)) & set(c)) > 0] if len(l_clusts) != 1: print ' %s couldn\'t find a unique light cluster (found %d, looked in %d) for heavy cluster with size %d and %d paired ids (heavy: %s pids: %s)' % (utils.color('yellow', 'warning'), len(l_clusts), len(l_part), len(h_atn), len(getpids(h_atn)), ':'.join(h_clust), ':'.join(getpids(h_atn))) continue assert len(l_clusts) == 1 l_atn = l_atn_dict[':'.join(l_clusts[0])] h_atn['loci'] = [lpair[0] for _ in h_atn['unique_ids']] # this kind of sucks, but it seems like the best option a.t.m. (see note in event.py) l_atn['loci'] = [lpair[1] for _ in l_atn['unique_ids']] lp_antn_pairs.append((h_atn, l_atn)) if n_no_info > 0: print ' no tree info in %d annotations (probably smaller than min tree metric cluster size)' % n_no_info return lp_antn_pairs # ---------------------------------------------------------------------------------------- def gsval(mfo, tch, vname): cln, iseq = mfo[tch], mfo[tch+'_iseq'] if vname in cln: assert vname in utils.linekeys['per_seq'] return cln[vname][iseq] elif vname == 'cell-types': return None elif vname == 'aa-cfrac': return lb_cons_dist(cln, iseq, aa=True, frac=True) elif vname == 'shm-aa': return utils.shm_aa(cln, iseq=iseq) elif vname == 'aa-cdist': return -smvals(cln, 'cons-dist-aa', iseq=iseq) elif vname in selection_metrics: return smvals(cln, vname, iseq=iseq) elif vname == 'multipy': # multiplicity return utils.get_multiplicity(cln, iseq=iseq) else: raise Exception('unsupported sort var \'%s\'' % vname) # ---------------------------------------------------------------------------------------- def sumv(mfo, kstr): return sum(gsval(mfo, c, kstr) for c in 'hl') # ---------------------------------------------------------------------------------------- def sum_nuc_shm_pct(mpfo): total_len = sum(len(gsval(mpfo, c, 'seqs')) - gsval(mpfo, c, 'seqs').count(utils.ambig_base) for c in 'hl') return 100 * sumv(mpfo, 'n_mutations') / float(total_len) # ---------------------------------------------------------------------------------------- def read_cfgfo(): allowed_keys = set(['n-families', 'n-per-family', 'include-unobs-cons-seqs', 'vars', 'cell-types', 'max-ambig-positions', 'min-umis', 'min-median-nuc-shm-%', 'min-hdist-to-already-chosen']) # allowed_vars = [] if debug: print ' ab choice cfg:' outstr, _ = utils.simplerun('cat %s'%args.ab_choice_cfg, return_out_err=True) print utils.pad_lines(outstr) with open(args.ab_choice_cfg) as cfile: cfgfo = yaml.load(cfile, Loader=Loader) if len(set(cfgfo) - allowed_keys) > 0: raise Exception('unexpected key[s] in ab choice cfg: %s (choose from: %s)' % (' '.join(set(cfgfo) - allowed_keys), ' '.join(allowed_keys))) return cfgfo # ---------------------------------------------------------------------------------------- def add_unobs_cseqs(metric_pairs, chosen_mfos, all_chosen_seqs, tdbg=False): # ---------------------------------------------------------------------------------------- def use_iseqs(tch, mtmp): # if any observed seqs in the family have shm indels, we need to figure out whether the indel should be included in the cons seq hsil = mtmp[tch]['has_shm_indels'] tstr = '(%d / %d = %.2f)' % (hsil.count(True), len(hsil), hsil.count(True) / float(len(hsil))) if hsil.count(True) / float(len(hsil)) > 0.5: print ' %s more than half %s of %s seqs have indels, so using *input* cons seq (note that if there\'s more than one indel, this may well be wrong, since you probably only want indels that are in a majority of the family [which is probably not all of them])' % (utils.color('yellow', 'warning'), tstr, tch) return True else: if any(hsil): # if none of them have indels, don't print anything print ' less than half %s of %s seqs have indels, so not using input seqs for cons seq' % (tstr, tch) return False # ---------------------------------------------------------------------------------------- def getcseqs(tch, use_input_seqs, aa=False): if use_input_seqs: return utils.cons_seq_of_line(mtmp[tch], aa=aa, use_input_seqs=True) else: return mtmp[tch]['consensus_seq'+('_aa' if aa else '')] # ---------------------------------------------------------------------------------------- mtmp = metric_pairs[0] uis = {c : use_iseqs(c, mtmp) for c in 'hl'} # if any observed seqs in the family have shm indels, we need to figure out whether the indel should be included in the cons seq cseqs = {c : getcseqs(c, uis[c], aa=True) for c in 'hl'} # aa cons seqs def nambig(c): return utils.n_variable_ambig_aa(mtmp[c], cseqs[c], getcseqs(c, uis[c], aa=False)) if 'max-ambig-positions' in cfgfo and sum(nambig(c) for c in 'hl') > cfgfo['max-ambig-positions']: print ' cons seq: too many ambiguous bases in h+l (%d > %d)' % (sum(nambig(c) for c in 'hl'), cfgfo['max-ambig-positions']) return consfo = {c : mtmp[c] for c in 'hl'} consfo.update({'iclust' : iclust, 'consensus' : True}) consfo.update({c+'_use_input_seqs' : uis[c] for c in 'hl'}) consfo.update({c+'_cseq_aa' : cseqs[c] for c in 'hl'}) consfo.update({c+'_cseq_nuc' : getcseqs(c, uis[c], aa=False) for c in 'hl'}) chosen_mfos.append(consfo) all_chosen_seqs.add(tuple(cseqs[c] for c in 'hl')) if tdbg: print ' %s: added cons seq%s' % (utils.color('green', 'x'), (' (using %s input seq[s] becuase of indels)'%' '.join(c for c in 'hl' if consfo[c+'_use_input_seqs'])) if any(consfo[c+'_use_input_seqs'] for c in 'hl') else '') # ---------------------------------------------------------------------------------------- def local_hdist_aa(s1, s2, defval=None, frac=False): # ick, this is ugly, but I think makes sense for now if len(s1) == len(s2): hfcn = utils.hamming_fraction if frac else utils.hamming_distance return hfcn(s1, s2, amino_acid=True) elif defval is not None: return defval else: return max([len(s1), len(s2)]) # NOTE it's kind of weird and arbitrary to return the max seq len if they're different lengths, but if they're different lengths we don't care anyway cause we're just looking for very similar sequences # ---------------------------------------------------------------------------------------- def choose_abs(metric_pairs, iclust, tdbg=False): # ---------------------------------------------------------------------------------------- def get_n_choose(tcfg, key): if key not in tcfg: return None if isinstance(tcfg[key], int): # take the same number from each family return tcfg[key] else: # specify a different number for each family assert len(tcfg[key]) == cfgfo['n-families'] return tcfg[key][iclust] # ---------------------------------------------------------------------------------------- def in_chosen_seqs(all_chosen_seqs, mfo): mfseqs = tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl') return mfseqs in all_chosen_seqs # ---------------------------------------------------------------------------------------- def too_close_to_chosen_seqs(all_chosen_seqs, mfo, hdist, ttdbg=False): if len(all_chosen_seqs) == 0: return False mfseqs = tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl') if ttdbg: h_min, l_min = [min(local_hdist_aa(acseqs[i], mseq) for acseqs in all_chosen_seqs) for i, mseq in enumerate(mfseqs)] print ' %d %d %s' % (h_min, l_min, utils.color('red', 'x') if sum([h_min, l_min]) < hdist else '') return any(sum(local_hdist_aa(cseq, mseq) for mseq, cseq in zip(mfseqs, acseqs)) < hdist for acseqs in all_chosen_seqs) # ---------------------------------------------------------------------------------------- # run through a bunch of options for skipping seqs/families if iclust >= cfgfo['n-families']: return [] if tdbg: print ' iclust %d: choosing abs from joint cluster with size %d (marked with %s)' % (iclust, len(metric_pairs), utils.color('green', 'x')) for ctk, ntk in [('cell-types', 'cell-types'), ('min-umis', 'umis')]: if len(metric_pairs) > 0 and ctk in cfgfo and ntk not in metric_pairs[0]['h']: print ' %s \'%s\' in cfgfo but \'%s\' info not in annotation' % (utils.color('yellow', 'warning'), ctk, ntk) if 'cell-types' in cfgfo and len(metric_pairs) > 0 and 'cell-types' in metric_pairs[0]['h']: def keepfcn(m): return all(gsval(m, c, 'cell-types') in cfgfo['cell-types'] for c in 'hl') # kind of dumb to check both, they should be the same, but whatever it'll crash in the debug printing below if they're different n_before = len(metric_pairs) metric_pairs = [m for m in metric_pairs if keepfcn(m)] if tdbg and n_before - len(metric_pairs) > 0: print ' skipped %d with cell type not among %s' % (n_before - len(metric_pairs), cfgfo['cell-types']) if 'min-umis' in cfgfo and len(metric_pairs) > 0 and 'umis' in metric_pairs[0]['h']: def keepfcn(m): return sum(gsval(m, c, 'umis') for c in 'hl') > cfgfo['min-umis'] n_before = len(metric_pairs) metric_pairs = [m for m in metric_pairs if keepfcn(m)] if tdbg and n_before - len(metric_pairs) > 0: print ' skipped %d with umis less than %d' % (n_before - len(metric_pairs), cfgfo['min-umis']) if 'min-median-nuc-shm-%' in cfgfo and len(metric_pairs) > 0: median_shm = numpy.median([sum_nuc_shm_pct(m) for m in metric_pairs]) skip_family = median_shm < cfgfo['min-median-nuc-shm-%'] if tdbg: print ' %s family: median h+l nuc shm %.2f%% %s than %.2f%%' % (utils.color('yellow', 'skipping entire') if skip_family else 'keeping', median_shm, 'less' if skip_family else 'greater', cfgfo['min-median-nuc-shm-%']) if skip_family: return [] if 'max-ambig-positions' in cfgfo: # max number of ambiguous amino acid positions summed over h+l def keepfcn(m): def nambig(c): return utils.n_variable_ambig_aa(m[c], gsval(m, c, 'input_seqs_aa'), gsval(m, c, 'input_seqs')) return sum(nambig(c) for c in 'hl') <= cfgfo['max-ambig-positions'] n_before = len(metric_pairs) metric_pairs = [m for m in metric_pairs if keepfcn(m)] if tdbg and n_before - len(metric_pairs): print ' skipped %d with too many ambiguous bases (>%d)' % (n_before - len(metric_pairs), cfgfo['max-ambig-positions']) if len(metric_pairs) == 0: return [] chosen_mfos = [] # includes unobs cons seqs plus seqs chosen from all sortvars all_chosen_seqs = set() # just for keeping track of the seqs we've already chosen # maybe add the unobserved cons seq if 'include-unobs-cons-seqs' in cfgfo and cfgfo['include-unobs-cons-seqs']: add_unobs_cseqs(metric_pairs, chosen_mfos, all_chosen_seqs, tdbg=tdbg) # well, doesn't necessarily add it, but at least checks to see if we should # actually choose them, sorted by the various specified vars for sortvar, vcfg in cfgfo['vars'].items(): assert vcfg['sort'] in ['low', 'high'] if [get_n_choose(cfo, k) for cfo, k in [(vcfg, 'n'), (cfgfo, 'n-per-family')]].count(None) != 1: raise Exception('specify exactly one of \'n-per-family\' and/or \'vars\': \'n\'') n_already_chosen, n_same_seqs, n_too_close, n_newly_chosen = 0, 0, 0, 0 sorted_mfos = metric_pairs sorted_mfos = sorted(sorted_mfos, key=lambda m: sum(mtpys[c][gsval(m, c, 'input_seqs_aa')] for c in 'hl'), reverse=True) sorted_mfos = sorted(sorted_mfos, key=lambda m: sum(gsval(m, c, sortvar) for c in 'hl'), reverse=vcfg['sort']=='high') for mfo in sorted_mfos: if mfo in chosen_mfos: n_already_chosen += 1 continue if in_chosen_seqs(all_chosen_seqs, mfo): n_same_seqs += 1 continue if 'min-hdist-to-already-chosen' in cfgfo and too_close_to_chosen_seqs(all_chosen_seqs, mfo, cfgfo['min-hdist-to-already-chosen']): n_too_close += 1 continue if any(gsval(mfo, c, 'has_shm_indels') for c in 'hl'): print ' %s choosing ab with shm indel: the consensus sequence may or may not reflect the indels (see above). uids: %s %s' % (utils.color('yellow', 'warning'), gsval(mfo, 'h', 'unique_ids'), gsval(mfo, 'l', 'unique_ids')) chosen_mfos.append(mfo) all_chosen_seqs.add(tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl')) n_newly_chosen += 1 # number chosen from this sortvar # this takes the top <n> by <sortvar> (not including any unobs cons seq) if get_n_choose(vcfg, 'n') is not None and n_newly_chosen >= get_n_choose(vcfg, 'n'): # number to choose for this var in this family break # whereas this makes sure we have N from the family over all sort vars (including any unobs cons seq), while still sorting by <sortvar>. It probably does *not* make sense to specify both versions if get_n_choose(cfgfo, 'n-per-family') is not None and len(chosen_mfos) >= get_n_choose(cfgfo, 'n-per-family'): # number to choose for all vars in this family (it's kind of weird/confusing to have this inside the sortvar loop, but i think it actually makes sense) break if tdbg: print ' %s: chose %d%s%s%s' % (sortvar, n_newly_chosen, '' if n_already_chosen==0 else ' (%d were in common with a previous var)'%n_already_chosen, '' if n_same_seqs==0 else ' (%d had seqs identical to previously-chosen ones)'%n_same_seqs, '' if n_too_close==0 else ' (%d had seqs too close to previously-chosen ones)'%n_too_close) if tdbg: print ' chose %d total' % len(chosen_mfos) return chosen_mfos # ---------------------------------------------------------------------------------------- def add_plotval_uids(iclust_plotvals, iclust_mfos, metric_pairs): def waschosen(m): return 'chosen' if all(gsval(m, c, 'unique_ids') in iclust_chosen_ids for c in 'hl') else 'nope' def ustr(m): rstr = '' if waschosen(m) == 'chosen': # if this is commented, i think i can simplify this fcn a lot? UPDATE need the extra text for cases where lots of dots are on top of each other rstr = 'x' if args.queries_to_include is not None and all(gsval(m, c, 'unique_ids') in args.queries_to_include for c in 'hl'): common_chars = ''.join(c for c, d in zip(gsval(m, 'h', 'unique_ids'), gsval(m, 'l', 'unique_ids')) if c==d) common_chars = common_chars.rstrip('-ig') if len(common_chars) > 0: rstr += ' ' + common_chars else: rstr += ' ' + ' '.join(gsval(m, c, 'unique__ids') for c in 'hl') return None if rstr == '' else rstr non_cons_mfos = [m for m in iclust_mfos if 'consensus' not in m] iclust_chosen_ids = [gsval(m, c, 'unique_ids') for m in non_cons_mfos for c in 'hl'] iclust_plotvals['uids'] = [ustr(m) for m in metric_pairs] iclust_plotvals['chosen'] = [waschosen(m) for m in metric_pairs] # ---------------------------------------------------------------------------------------- def write_chosen_file(all_chosen_mfos, hash_len=8): # ---------------------------------------------------------------------------------------- def getofo(mfo): ofo = collections.OrderedDict([('iclust', mfo['iclust'])]) if 'consensus' in mfo: def consid(mfo, c): return '%s-cons-%s' % (utils.uidhashstr(mfo[c]['consensus_seq_aa'])[:hash_len], mfo[c]['loci'][0]) ofo.update([(c+'_id', consid(mfo, c)) for c in 'hl']) else: ofo.update([(c+'_id', gsval(mfo, c, 'unique_ids')) for c in 'hl']) for kn in ['aa-cfrac', 'shm-aa', 'aa-cdist']: ofo.update([('sum_'+kn, sum(gsval(mfo, c, kn) for c in 'hl'))]) ofo.update([(c+'_family_size', len(mfo[c]['unique_ids'])) for c in 'hl']) ofo.update([(c+'_'+r+'_gene' , mfo[c][r+'_gene']) for r in utils.regions for c in 'hl']) if 'consensus' in mfo: for tch in 'hl': ofo[tch+'_seq_aa'] = mfo[tch+'_cseq_aa'] ofo[tch+'_seq_nuc'] = mfo[tch+'_cseq_nuc'] ofo[tch+'_has_shm_indels'] = mfo[tch+'_use_input_seqs'] else: for ok, lk in [('has_shm_indels', None), ('cell_type', 'cell-types'), ('aa-cfrac', None), ('aa-cdist', None), ('shm-aa', None), ('seq_nuc', 'input_seqs'), ('seq_aa', 'input_seqs_aa')]: ofo.update([(c+'_'+ok, gsval(mfo, c, utils.non_none([lk, ok]))) for c in 'hl']) if 'consensus' not in mfo: # check that the aa seqs are actually translations of the nuc seqs (for unobs cons seqs, we expect them to differ) NOTE i don't know if this is really worthwhile long term, but it makes me feel warm and fuzzy atm that it's here for tch in 'hl': if utils.ltranslate(ofo[tch+'_seq_nuc']) != ofo[tch+'_seq_aa']: print ' %s aa seq not translation of nuc seq for %s %s:' % (utils.color('yellow', 'warning'), tch, ofo[tch+'_id']) utils.color_mutants(utils.ltranslate(ofo[tch+'_seq_nuc']), ofo[tch+'_seq_aa'], amino_acid=True, print_result=True, extra_str=' ') return ofo # ---------------------------------------------------------------------------------------- if debug: print ' writing %d chosen abs to %s' % (len(all_chosen_mfos), args.chosen_ab_fname) with open(args.chosen_ab_fname, 'w') as cfile: outfos, fieldnames = [], None for mfo in all_chosen_mfos: outfos.append(getofo(mfo)) if fieldnames is None or len(outfos[-1].keys()) > len(fieldnames): fieldnames = outfos[-1].keys() if len(all_chosen_mfos) > 0: writer = csv.DictWriter(cfile, fieldnames) writer.writeheader() for ofo in outfos: writer.writerow(ofo) # ---------------------------------------------------------------------------------------- def print_dbg(metric_pairs): # ---------------------------------------------------------------------------------------- def init_xtras(): xtra_heads = [('cell-types', ['cell', 'type']), ('umis', ['umis', 'h+l']), ('c_genes', ['c_gene', ''])] xheads, xtrafo, xlens = [[], []], [], {} for xn, xh in xtra_heads: if all(xn not in mpfo[c] for mpfo in metric_pairs for c in 'hl'): continue xtrafo.append(xn) ctlens = [len(str(gsval(m, c, xn))) for m in metric_pairs for c in 'hl'] xlens[xn] = max([len(h) for h in xh] + ctlens) + 1 xheads = [x + [utils.wfmt(s, xlens[xn])] for x, s in zip(xheads, xh)] return xtrafo, xheads, xlens # ---------------------------------------------------------------------------------------- def get_xstr(mpfo, xlens): xstr = [] # don't try to condense these into a block, they're too different if 'cell-types' in xtrafo: ctval = utils.get_single_entry(list(set(gsval(mpfo, c, 'cell-types') for c in 'hl'))) xstr += [utils.wfmt(utils.non_none([ctval, '?']), xlens['cell-types'])] if 'umis' in xtrafo: uvals = [gsval(mpfo, c, 'umis') for c in 'hl'] xstr += [utils.wfmt('?' if None in uvals else sum(uvals), xlens['umis'])] if 'c_genes' in xtrafo: cg = gsval(mpfo, 'h', 'c_genes') xstr += [utils.wfmt('?' if cg in [None, 'None'] else cg.replace('IGH', ''), xlens['c_genes'])] return xstr # ---------------------------------------------------------------------------------------- def get_didstr(dids): if len(set(dids)) == 1: # make sure they're from the same droplet didstr = dids[0] if args.queries_to_include is not None and hid in args.queries_to_include and lid in args.queries_to_include: didstr = utils.color('red', didstr, width=20) else: print ' %s paired seqs %s %s have different droplet ids (i.e. they were probably mis-paired) %s' % (utils.color('red', 'error'), hid, lid, dids) didstr = 'see error' return didstr # ---------------------------------------------------------------------------------------- def getcdist(cons_mfo, mpfo, tch, frac=False): # can't just use gsval() for cases where we used the "input" (indel'd) cons seq (although note that there's probably some other places where the orginal/indel-reversed version is used) defval = gsval(mpfo, tch, 'aa-c'+('frac' if frac else 'dist')) if cons_mfo is None: return defval return local_hdist_aa(gsval(mpfo, tch, 'input_seqs_aa'), cons_mfo[c+'_cseq_aa'], defval=defval, frac=frac) # ---------------------------------------------------------------------------------------- xtrafo, xheads, xlens = init_xtras() lstr = '%s %s' % (utils.locstr(h_atn['loci'][0]), utils.locstr(l_atn['loci'][0])) h_cshm, l_cshm = [lb_cons_seq_shm(l, aa=True) for l in [h_atn, l_atn]] cdstr = '%2d %2d' % (h_cshm, l_cshm) sstr = ' %3d %3d %3d' % (len(metric_pairs), len(h_atn['unique_ids']), len(l_atn['unique_ids'])) gstrs = ['%s %s' % (utils.color_gene(h_atn[r+'_gene']), utils.color_gene(l_atn[r+'_gene']) if r!='d' else '') for r in utils.regions] gstr_len = max(utils.len_excluding_colors(s) for s in gstrs) # don't really need this as long as it's the last column gstrs = ['%s%s' % (g, ' '*(gstr_len - utils.len_excluding_colors(g))) for g in gstrs] h_cseq, l_cseq = [l['consensus_seq_aa'] for l in (h_atn, l_atn)] cons_mfo = None if any('consensus' in m for m in iclust_mfos): cons_mfo = utils.get_single_entry([m for m in iclust_mfos if 'consensus' in m]) h_cseq, l_cseq = [cons_mfo[c+'_cseq_aa'] if cons_mfo[c+'_use_input_seqs'] else cs for c, cs in zip('hl', (h_cseq, l_cseq))] h_cseq_str, l_cseq_str = [utils.color_mutants(cs, cs, amino_acid=True) for cs in (h_cseq, l_cseq)] h_nseq, l_nseq = [utils.color_mutants(cs, l['naive_seq_aa'], amino_acid=True, align_if_necessary=True) for l, cs in zip((h_atn, l_atn), (h_cseq, l_cseq))] print (' aa-cfrac (%%) aa-cdist droplet contig indels%s N %%shm N aa mutations sizes %s %s %s') % (' '.join(xheads[0]), utils.wfmt('genes cons:', gstr_len), h_cseq_str, l_cseq_str) print (' sum h l h l h l h l %s h l nuc cons. obs. both h l %s %s %s') % (' '.join(xheads[1]), utils.wfmt('naive:', gstr_len), h_nseq, l_nseq) sorted_mfos = sorted(metric_pairs, key=lambda m: sum(mtpys[c][gsval(m, c, 'input_seqs_aa')] for c in 'hl'), reverse=True) for imp, mpfo in enumerate(sorted(sorted_mfos, key=lambda x: sum(getcdist(cons_mfo, x, c, frac=True) for c in 'hl'))): hid, lid = [gsval(mpfo, c, 'unique_ids') for c in 'hl'] dids, cids = zip(*[utils.get_droplet_id(u, return_contigs=True) for u in (hid, lid)]) indelstr = ' '.join(utils.color('red', 'y') if utils.per_seq_val(l, 'has_shm_indels', u) else ' ' for c, u, l in zip('hl', [hid, lid], [h_atn, l_atn])) h_seq, l_seq = [utils.color_mutants(cs, utils.per_seq_val(l, 'input_seqs_aa', u), amino_acid=True, align_if_necessary=True) for u, l, cs in zip((hid, lid), (h_atn, l_atn), (h_cseq, l_cseq))] h_cfrac, l_cfrac = [getcdist(cons_mfo, mpfo, c, frac=True) for c in 'hl'] h_cdist, l_cdist = [getcdist(cons_mfo, mpfo, c) for c in 'hl'] print ' %s %4.1f %4.1f %4.1f %4d%4d %s %20s %s %s %s' % (lstr if imp==0 else ' '*utils.len_excluding_colors(lstr), 100*sum([h_cfrac, l_cfrac]), 100*h_cfrac, 100*l_cfrac, h_cdist, l_cdist, utils.color('green', 'x') if mpfo in iclust_mfos else ' ', get_didstr(dids), cids[0], cids[1], indelstr), print ' %s %3d %3d %4.1f %s %2d %2d %2d %s %s %s %s' % (' '.join(get_xstr(mpfo, xlens)), mtpys['h'][gsval(mpfo, 'h', 'input_seqs_aa')], mtpys['l'][gsval(mpfo, 'l', 'input_seqs_aa')], sum_nuc_shm_pct(mpfo), cdstr if imp==0 else ' '*len(cdstr), sumv(mpfo, 'shm-aa'), gsval(mpfo, 'h', 'shm-aa'), gsval(mpfo, 'l', 'shm-aa'), sstr if imp==0 else ' '*utils.len_excluding_colors(sstr), gstrs[imp] if imp<len(gstrs) else ' '*gstr_len, h_seq, l_seq) for gs in gstrs[imp+1:]: # if the cluster was smaller than gstrs, need to print the extra gstrs (this shouldn't really ever happen unless i make gstrs much longer)) print '%81s%s' % ('', gs) # this width will sometimes be wrong print '' # ---------------------------------------------------------------------------------------- def makeplots(metric_pairs, h_atn): import plotting import lbplotting if is_simu: # make performance plots for sum of h+l aa-cdist mm = 'sum-cons-dist-aa' h_atn['tree-info']['lb'][mm] = {} # NOTE it's kind of hackey to only add it to the heavy annotation, but i'm not doing anything with it after plotting right here, anyway for mfo in metric_pairs: h_atn['tree-info']['lb'][mm][gsval(mfo, 'h', 'unique_ids')] = -sum(gsval(mfo, c, 'aa-cdist') for c in 'hl') fnames = [] lbplotting.plot_lb_vs_affinity(plotdir, [h_atn], mm, is_true_line=is_simu, fnames=fnames) plotting.make_html(plotdir, fnames=fnames, extra_links=[(mm, '%s/%s/' % (plotdir, mm)),]) iclust_plotvals = {c+'_aa-cfrac' : [gsval(m, c, 'aa-cfrac') for m in metric_pairs] for c in 'hl'} if any(vl.count(0)==len(vl) for vl in iclust_plotvals.values()): # doesn't plot anything useful, and gives a pyplot warning to std err which is annoying return add_plotval_uids(iclust_plotvals, iclust_mfos, metric_pairs) # add uids for the chosen ones mstr = legtexts['cons-frac-aa'] lbplotting.plot_2d_scatter('h-vs-l-cfrac-iclust-%d'%iclust, plotdir, iclust_plotvals, 'l_aa-cfrac', 'light %s'%mstr, mstr, xvar='h_aa-cfrac', xlabel='heavy %s'%mstr, colorvar='chosen', stats='correlation') # NOTE this iclust will in general *not* correspond to the one in partition plots # for k in iclust_plotvals: # if k not in all_plotvals: all_plotvals[k] = [] # just for 'uids' # all_plotvals[k] += iclust_plotvals[k] # ---------------------------------------------------------------------------------------- def get_mtpys(metric_pairs): # NOTE this is the sum of utils.get_multiplicity() over identical sequences mtpys = {} for c in 'hl': seqlist = [gsval(m, c, 'input_seqs_aa') for m in metric_pairs for _ in range(gsval(m, c, 'multipy'))] mtpys[c] = {s : seqlist.count(s) for s in set(seqlist)} return mtpys # ---------------------------------------------------------------------------------------- debug = not is_simu or args.debug all_chosen_mfos = [] cfgfo = read_cfgfo() antn_pairs = [] for lpair in [lpk for lpk in utils.locus_pairs[ig_or_tr] if tuple(lpk) in lp_infos]: antn_pairs += find_cluster_pairs(lpair) # all_plotvals = {k : [] for k in ('h_aa-cfrac', 'l_aa-cfrac')} n_too_small = 0 if debug: print ' %d h/l pairs: %s' % (len(antn_pairs), ', '.join(' '.join(str(len(l['unique_ids'])) for l in p) for p in antn_pairs)) for iclust, (h_atn, l_atn) in enumerate(sorted(antn_pairs, key=lambda x: sum(len(l['unique_ids']) for l in x), reverse=True)): for ltmp in (h_atn, l_atn): utils.add_seqs_aa(ltmp) add_cons_seqs(ltmp, aa=True) # this also adds the nuc one if it isn't there metric_pairs = [] for hid, pids in zip(h_atn['unique_ids'], h_atn['paired-uids']): if pids is None or len(pids) == 0: # should only have the latter now (set with .get() call in rewrite_input_metafo()) continue lid = pids[0] if lid not in l_atn['unique_ids']: print ' paired light id %s missing' % lid continue if any(len(l['unique_ids']) < min_cluster_size for l in (h_atn, l_atn)): n_too_small += 1 continue mpfo = {'iclust' : iclust} for tch, uid, ltmp in zip(('h', 'l'), (hid, lid), (h_atn, l_atn)): mpfo[tch] = ltmp mpfo[tch+'_iseq'] = ltmp['unique_ids'].index(uid) metric_pairs.append(mpfo) if len(metric_pairs) == 0: continue mtpys = get_mtpys(metric_pairs) iclust_mfos = choose_abs(metric_pairs, iclust, tdbg=debug) if len(iclust_mfos) > 0: all_chosen_mfos += iclust_mfos if debug: print_dbg(metric_pairs) if n_too_small > 0: print ' skipped %d clusters smaller than %d' % (n_too_small, min_cluster_size) if plotdir is not None: makeplots(metric_pairs, h_atn) if args.chosen_ab_fname is not None: write_chosen_file(all_chosen_mfos) # if plotdir is not None: # eh, maybe there isn't a big reason for an overall one # lbplotting.plot_2d_scatter('h-vs-l-cfrac-iclust-all', plotdir, all_plotvals, 'l_aa-cfrac', 'light %s'%mstr, mstr, xvar='h_aa-cfrac', xlabel='heavy %s'%mstr, colorvar='chosen', stats='correlation')
gpl-3.0
-1,847,442,446,347,377,700
71.570919
556
0.596853
false
3.465434
false
false
false
DavidCain/mitoc-trips
ws/migrations/0020_typo_corrections.py
1
1360
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [('ws', '0019_2020_ws_application')] operations = [ migrations.AlterField( model_name='climbingleaderapplication', name='familiarity_spotting', field=models.CharField( choices=[ ('none', 'not at all'), ('some', 'some exposure'), ('comfortable', 'comfortable'), ('very comfortable', 'very comfortable'), ], max_length=16, verbose_name='Familiarity with spotting boulder problems', ), ), migrations.AlterField( model_name='winterschoolleaderapplication', name='winter_experience', field=models.TextField( blank=True, help_text='Details of previous winter outdoors experience. ' 'Include the type of trip (x-country skiing, above treeline, snowshoeing, ice climbing, etc), ' 'approximate dates and locations, numbers of participants, notable trail and weather conditions. ' 'Please also give details of whether you participated, led, or co-led these trips.', max_length=5000, ), ), ]
gpl-3.0
5,556,380,076,097,705,000
37.857143
114
0.546324
false
4.62585
false
false
false
tomfa/flashcard-json-maker
simplequiz_interpreter.py
1
3968
#coding: utf-8 ''' This script reads a Q-A txt-file and generates JSON output. USAGE: Save your Q-A file FORMAT: Chapter 1 - cakes. The whole line is a part of the chapter title. Q: Question that we wonder about? A: Answer telling us what we want to know? Q: Empty lines are ignored. If the question goes across multiple lines, that's perfectly fine. We just add the new line to what we added last. A: That goes for answers as well. Chapters do however need to be on a single line. ''' class Question: """Et spørsmål""" def __init__(self, number, question): self.question = question # String self.explaination = "Ingen forklaring gitt" self.chapter = "" self.qnum = number def append_line_to_question(self, more_question): self.question += " " + more_question def add_explaination(self, explaination): self.explaination = explaination def append_explaination(self, explaination): self.explaination += " " + explaination def setChapter(self, chapter): self.chapter = chapter def readQuiz(path): f = open(path, 'r') lines = f.read().split('\n') questions = [] readingQ = False # Are we currently reading a question? readingA = False # Are we currently reading an answer? currentchapter = "" chapters = [] qnum = -1 for line in lines: line = line.strip().replace('"', '\\"') if is_ignorable(line): continue if line_defines_chapter(line): # omit 'Chapter ' currentchapter = line[8:] if currentchapter not in chapters: chapters.append(currentchapter) continue if line_defines_question(line): qnum += 1 readingQ = True readingA = False # line[3:] is to skip the 'q: '. Not pretty questions.append(Question(qnum, line[3:])) questions[len(questions)-1].setChapter(currentchapter) elif line_defines_answer(line): readingA = True readingQ = False # line[3:] is to skip the 'a: '. Not pretty questions[len(questions)-1].add_explaination(line[3:]) # If the line doesn't start with anything interesting, we append to elif (readingA): questions[len(questions)-1].append_explaination(line) elif (readingQ): questions[len(questions)-1].append_line_to_question(line) return questions, chapters def is_ignorable(line): ''' returns true if line can safely be ignored parameter: * line: string ''' return len(line) < 1 def line_defines_question(line): return line.lower().startswith('q: ') def line_defines_answer(line): return line.lower().startswith('a: ') def line_defines_chapter(line): return line.lower().startswith('chapter ') if __name__ == "__main__": import sys exams = {} args = sys.argv[1:] if not args: print("Usage: Run as 'python flash_interpreter.py myfile.txt'") sys.exit() questions, chapters = readQuiz(args[0]) f = open('data.js', 'w') f.write('var chapters = [') for i in range(len(chapters)): f.write('"' + chapters[i] + '"') if (i + 1) < len(chapters): f.write(', ') f.write(']\n\n') f.write('var questions = [\n') for q in questions: f.write('{\n') f.write(' "id":"' + str(q.qnum) + '",\n') f.write(' "chapter":"' + str(q.chapter) + '",\n') f.write(' "question":"' + q.question + '",\n') f.write(' "answer":"' + q.explaination + '"\n') f.write('},\n\n') f.write('];') print "We're fine. totes fine." print "Output saved as data.js"
mit
7,440,680,661,345,437,000
27.161765
75
0.554463
false
3.806142
false
false
false
dana-i2cat/felix
modules/resource/utilities/rspecs/serm/request_parser.py
1
4179
from rspecs.parser_base import ParserBase from rspecs.commons_se import SELink from rspecs.commons_tn import Node, Interface import core logger = core.log.getLogger("utility-rspec") class SERMv3RequestParser(ParserBase): def __init__(self, from_file=None, from_string=None): super(SERMv3RequestParser, self).__init__(from_file, from_string) self.__sv = self.rspec.nsmap.get('sharedvlan') self.__felix = self.rspec.nsmap.get('felix') self.__proto = self.rspec.nsmap.get('protogeni') def check_se_node_resource(self, node): # according to the proposed URNs structure, a SE-node MUST have # "serm" as resource-name (client_id) and authority # (component_manager_id) fields # At least we verify the autority field here! if node.attrib.get("component_manager_id", None) is not None and \ node.attrib.get("client_id", None) is not None: if "serm" in node.attrib.get("component_manager_id", "") or \ "serm" in node.attrib.get("client_id", ""): return True return False def check_se_link_resource(self, link, c_manager): # according to the proposed URNs structure, a TN-link MUST have # "serm" as resource-name (client_id) and authority # (component_manager_name) fields # At least we verify the autority field here! if not c_manager.attrib.get("name"): return False if "serm" in c_manager.attrib.get("name"): return True return False def update_protogeni_cm_uuid(self, tag, obj): cmuuid = tag.attrib.get("{%s}component_manager_uuid" % (self.__proto)) if cmuuid is not None: obj.add_component_manager_uuid(cmuuid) def get_nodes(self, rspec): nodes = [] for n in rspec.findall(".//{%s}node" % (self.none)): if not self.check_se_node_resource(n): logger.info("Skipping this node, not a SE-res: %s", (n,)) continue node = Node(n.attrib.get("client_id"), n.attrib.get("component_manager_id"), n.attrib.get("exclusive")) self.update_protogeni_cm_uuid(n, node) for i in n.iterfind("{%s}interface" % (self.none)): interface = Interface(i.attrib.get("client_id")) for sv in i.iterfind("{%s}link_shared_vlan" % (self.__sv)): interface.add_vlan(sv.attrib.get("vlantag"), sv.attrib.get("name")) node.add_interface(interface.serialize()) nodes.append(node.serialize()) return nodes def nodes(self): return self.get_nodes(self.rspec) def get_links(self, rspec): links_ = [] for l in rspec.findall(".//{%s}link" % (self.none)): manager_ = l.find("{%s}component_manager" % (self.none)) if manager_ is None: self.raise_exception("Component-Mgr tag not found in link!") if not self.check_se_link_resource(l, manager_): logger.info("Skipping this link, not a SE-res: %s", (l,)) continue type_ = l.find("{%s}link_type" % (self.none)) if type_ is None: self.raise_exception("Link-Type tag not found in link!") l_ = SELink(l.attrib.get("client_id"), type_.attrib.get("name"), manager_.attrib.get("name")) self.update_protogeni_cm_uuid(l, l_) # FIXME: VLAN seems not properly added to interface [l_.add_interface_ref(i.attrib.get("client_id"), i.attrib.get("{%s}vlan" % (self.__felix))) for i in l.iterfind("{%s}interface_ref" % (self.none))] [l_.add_property(p.attrib.get("source_id"), p.attrib.get("dest_id"), p.attrib.get("capacity")) for p in l.iterfind("{%s}property" % (self.none))] links_.append(l_.serialize()) return links_ def links(self): return self.get_links(self.rspec)
apache-2.0
6,311,328,318,198,974,000
38.8
78
0.556832
false
3.704787
false
false
false
katajakasa/utuputki2
alembic/versions/4690204e5a62_initial.py
1
5560
"""Initial Revision ID: 4690204e5a62 Revises: Create Date: 2015-10-28 18:43:54.656000 """ # revision identifiers, used by Alembic. revision = '4690204e5a62' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('event', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=32), nullable=True), sa.Column('visible', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('source', sa.Column('id', sa.Integer(), nullable=False), sa.Column('hash', sa.String(length=64), nullable=True), sa.Column('file_name', sa.String(length=256), nullable=True), sa.Column('file_ext', sa.String(length=4), nullable=True), sa.Column('mime_type', sa.String(length=32), nullable=True), sa.Column('size_bytes', sa.Integer(), nullable=True), sa.Column('media_type', sa.Integer(), nullable=True), sa.Column('youtube_hash', sa.String(length=32), nullable=True), sa.Column('other_url', sa.String(length=512), nullable=True), sa.Column('length_seconds', sa.Integer(), nullable=True), sa.Column('created_at', sa.DateTime(timezone=True), nullable=True), sa.Column('title', sa.String(length=100), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('status', sa.Integer(), nullable=True), sa.Column('message', sa.String(length=64), nullable=True), sa.Column('video_codec', sa.String(length=16), nullable=True), sa.Column('video_bitrate', sa.Integer(), nullable=True), sa.Column('video_w', sa.Integer(), nullable=True), sa.Column('video_h', sa.Integer(), nullable=True), sa.Column('audio_codec', sa.String(length=16), nullable=True), sa.Column('audio_bitrate', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('user', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=32), nullable=True), sa.Column('password', sa.String(length=255), nullable=True), sa.Column('nickname', sa.String(length=32), nullable=True), sa.Column('email', sa.String(length=128), nullable=True), sa.Column('level', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('username') ) op.create_table('setting', sa.Column('id', sa.Integer(), nullable=False), sa.Column('user', sa.Integer(), nullable=True), sa.Column('key', sa.String(length=32), nullable=True), sa.Column('value', sa.String(length=32), nullable=True), sa.Column('type', sa.Integer(), nullable=True), sa.Column('max', sa.Integer(), nullable=True), sa.Column('min', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['user'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('session', sa.Column('key', sa.String(length=32), nullable=False), sa.Column('user', sa.Integer(), nullable=True), sa.Column('start', sa.DateTime(timezone=True), nullable=True), sa.ForeignKeyConstraint(['user'], ['user.id'], ), sa.PrimaryKeyConstraint('key') ) op.create_table('sourcequeue', sa.Column('id', sa.Integer(), nullable=False), sa.Column('user', sa.Integer(), nullable=True), sa.Column('created_at', sa.DateTime(timezone=True), nullable=True), sa.ForeignKeyConstraint(['user'], ['user.id'], ), sa.PrimaryKeyConstraint('id'), ) op.create_table('media', sa.Column('id', sa.Integer(), nullable=False), sa.Column('source', sa.Integer(), nullable=True), sa.Column('user', sa.Integer(), nullable=True), sa.Column('queue', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['queue'], ['sourcequeue.id'], ), sa.ForeignKeyConstraint(['source'], ['source.id'], ), sa.ForeignKeyConstraint(['user'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('player', sa.Column('id', sa.Integer(), nullable=False), sa.Column('token', sa.String(length=16), nullable=True), sa.Column('event', sa.Integer(), nullable=True), sa.Column('name', sa.String(length=32), nullable=True), sa.Column('last', sa.Integer(), nullable=True), sa.Column('status', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['event'], ['event.id'], ), sa.ForeignKeyConstraint(['last'], ['media.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_player_token'), 'player', ['token'], unique=True) op.create_table('skip', sa.Column('id', sa.Integer(), nullable=False), sa.Column('user', sa.Integer(), nullable=True), sa.Column('media', sa.Integer(), nullable=True), sa.Column('player', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['media'], ['media.id'], ), sa.ForeignKeyConstraint(['player'], ['player.id'], ), sa.ForeignKeyConstraint(['user'], ['user.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('user', 'media', 'player', name='_user_media_player_uc') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('user') op.drop_table('sourcequeue') op.drop_table('source') op.drop_table('skip') op.drop_table('setting') op.drop_table('session') op.drop_index(op.f('ix_player_token'), table_name='player') op.drop_table('player') op.drop_table('media') op.drop_table('event') ### end Alembic commands ###
mit
-8,873,998,268,785,954,000
40.185185
80
0.648741
false
3.442724
false
false
false
optiflows/nyuki
nyuki/workflow/tasks/trigger_workflow.py
1
7024
import json import asyncio import logging from enum import Enum from aiohttp import ClientSession from tukio.task import register from tukio.task.holder import TaskHolder from tukio.workflow import WorkflowExecState, Workflow from .utils import runtime from .utils.uri import URI log = logging.getLogger(__name__) class WorkflowStatus(Enum): PENDING = 'pending' RUNNING = 'running' TIMEOUT = 'timeout' DONE = 'done' @register('trigger_workflow', 'execute') class TriggerWorkflowTask(TaskHolder): __slots__ = ( 'template', 'blocking', 'task', '_engine', 'data', 'status', 'triggered_id', 'async_future', ) SCHEMA = { 'type': 'object', 'required': ['template'], 'additionalProperties': False, 'properties': { 'template': { 'type': 'object', 'required': ['service', 'id'], 'additionalProperties': False, 'properties': { 'service': {'type': 'string', 'minLength': 1}, 'id': {'type': 'string', 'minLength': 1}, 'draft': {'type': 'boolean', 'default': False}, }, }, 'blocking': {'type': 'boolean', 'default': True}, }, } def __init__(self, config): super().__init__(config) self.template = self.config['template'] self.blocking = self.config.get('blocking', True) self.task = None self._engine = 'http://{}/{}/api/v1/workflow'.format( runtime.config.get('http_host', 'localhost'), self.template['service'], ) # Reporting self.status = WorkflowStatus.PENDING.value self.data = None self.triggered_id = None self.async_future = None def report(self): return { 'exec_id': self.triggered_id, 'status': self.status, } async def async_exec(self, topic, data): log.debug( "Received data for async trigger_workflow in '%s': %s", topic, data, ) if not self.async_future.done(): self.async_future.set_result(data) await runtime.bus.unsubscribe(topic) async def execute(self, event): """ Entrypoint execution method. """ self.data = event.data self.task = asyncio.Task.current_task() is_draft = self.template.get('draft', False) # Send the HTTP request log.info('Triggering template %s%s on service %s', self.template['id'], ' (draft)' if is_draft else '', self.template['service']) # Setup headers (set requester and exec-track to avoid workflow loops) workflow = runtime.workflows[Workflow.current_workflow().uid] parent = workflow.exec.get('requester') track = list(workflow.exec.get('track', [])) if parent: track.append(parent) headers = { 'Content-Type': 'application/json', 'Referer': URI.instance(workflow.instance), 'X-Surycat-Exec-Track': ','.join(track) } # Handle blocking trigger_workflow using mqtt if self.blocking: topic = '{}/async/{}'.format(runtime.bus.name, self.uid[:8]) headers['X-Surycat-Async-Topic'] = topic headers['X-Surycat-Async-Events'] = ','.join([ WorkflowExecState.END.value, WorkflowExecState.ERROR.value, ]) self.async_future = asyncio.Future() await runtime.bus.subscribe(topic, self.async_exec) def _unsub(f): asyncio.ensure_future(runtime.bus.unsubscribe(topic)) self.task.add_done_callback(_unsub) async with ClientSession() as session: # Compute data to send to sub-workflows url = '{}/vars/{}{}'.format( self._engine, self.template['id'], '/draft' if is_draft else '', ) async with session.get(url) as response: if response.status != 200: raise RuntimeError("Can't load template info") wf_vars = await response.json() lightened_data = { key: self.data[key] for key in wf_vars if key in self.data } params = { 'url': '{}/instances'.format(self._engine), 'headers': headers, 'data': json.dumps({ 'id': self.template['id'], 'draft': is_draft, 'inputs': lightened_data, }) } async with session.put(**params) as response: if response.status != 200: msg = "Can't process workflow template {} on {}".format( self.template, self.nyuki_api ) if response.status % 400 < 100: reason = await response.json() msg = "{}, reason: {}".format(msg, reason['error']) raise RuntimeError(msg) resp_body = await response.json() self.triggered_id = resp_body['id'] wf_id = '@'.join([self.triggered_id[:8], self.template['service']]) self.status = WorkflowStatus.RUNNING.value log.info('Successfully started %s', wf_id) self.task.dispatch_progress(self.report()) # Block until task completed if self.blocking: log.info('Waiting for workflow %s to complete', wf_id) await self.async_future self.status = WorkflowStatus.DONE.value log.info('Workflow %s is done', wf_id) self.task.dispatch_progress({'status': self.status}) return self.data async def _end_triggered_workflow(self): """ Asynchronously cancel the triggered workflow. """ wf_id = '@'.join([self.triggered_id[:8], self.template['service']]) async with ClientSession() as session: url = '{}/instances/{}'.format(self._engine, self.triggered_id) async with session.delete(url) as response: if response.status != 200: log.warning('Failed to cancel workflow %s', wf_id) else: log.info('Workflow %s has been cancelled', wf_id) def teardown(self): """ Called when this task is cancelled or timed out. """ if self.task.timed_out is True: self.status = WorkflowStatus.TIMEOUT.value else: self.status = WorkflowStatus.DONE.value self.task.dispatch_progress({'status': self.status}) if not self.triggered_id: log.debug('No workflow to cancel') return self.data asyncio.ensure_future(self._end_triggered_workflow()) return self.data
apache-2.0
5,176,207,135,158,172,000
33.431373
79
0.530325
false
4.384519
false
false
false
petroniocandido/pyFTS
pyFTS/models/seasonal/msfts.py
1
1921
import numpy as np from pyFTS.common import FLR from pyFTS.models.seasonal import sfts class MultiSeasonalFTS(sfts.SeasonalFTS): """ Multi-Seasonal Fuzzy Time Series """ def __init__(self, name, indexer, **kwargs): super(MultiSeasonalFTS, self).__init__("MSFTS") self.name = "Multi Seasonal FTS" self.shortname = "MSFTS " + name self.detail = "" self.seasonality = 1 self.has_seasonality = True self.has_point_forecasting = True self.is_high_order = False self.is_multivariate = True self.indexer = indexer self.flrgs = {} def generate_flrg(self, flrs): for flr in flrs: if str(flr.index) not in self.flrgs: self.flrgs[str(flr.index)] = sfts.SeasonalFLRG(flr.index) self.flrgs[str(flr.index)].append_rhs(flr.RHS) def train(self, data, **kwargs): if kwargs.get('sets', None) is not None: self.sets = kwargs.get('sets', None) if kwargs.get('parameters', None) is not None: self.seasonality = kwargs.get('parameters', None) #ndata = self.indexer.set_data(data,self.doTransformations(self.indexer.get_data(data))) flrs = FLR.generate_indexed_flrs(self.sets, self.indexer, data) self.generate_flrg(flrs) def forecast(self, data, **kwargs): ret = [] index = self.indexer.get_season_of_data(data) ndata = self.indexer.get_data(data) for k in np.arange(0, len(index)): flrg = self.flrgs[str(index[k])] mp = self.getMidpoints(flrg) ret.append(sum(mp) / len(mp)) return ret def forecast_ahead(self, data, steps, **kwargs): ret = [] for i in steps: flrg = self.flrgs[str(i)] mp = self.getMidpoints(flrg) ret.append(sum(mp) / len(mp)) return ret
gpl-3.0
-2,375,264,770,700,917,000
28.106061
96
0.580947
false
3.306368
false
false
false
neep305/swordfish
text_analysis/fileutil.py
1
1183
from konlpy.tag import Hannanum from collections import Counter import pandas as pd import csv import json def read_localcsv(path): result = pd.read_csv(path, encoding='UTF-8') print(result) return result def get_json_data(path): #r = requests.get(URL) #data = r.text RESULTS = {"children": []} with open(path) as csvfile: reader = csv.DictReader(csvfile) for line in reader: RESULTS['children'].append({ "name": line['Name'], "symbol": line['Symbol'], "symbol": line['Symbol'], "price": line['lastsale'], "net_change": line['netchange'], "percent_change": line['pctchange'], "volume": line['share_volume'], "value": line['Nasdaq100_points'] }) # print(json.dumps(RESULTS)) return json.dumps(RESULTS) def get_tags(text, ntags=50, multiplier=10): h = Hannanum() nouns = h.nouns(text) count = Counter(nouns) # for word,cnt in count.most_common(ntags): # print(word,cnt) return count def get_csv_data(path, column): # localcsv = read_localcsv(path) with open(path) as csvfile: reader = csv.DictReader(csvfile) content = '' for line in reader: content += ' ' + line[column] tags = get_tags(content) return tags
mit
-798,070,984,540,722,700
22.196078
45
0.667794
false
2.823389
false
false
false
dtrodrigues/nifi-minifi-cpp
libminifi/test/script-tests/test_scripts/stateful_processor.py
2
1602
# # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def describe(processor): processor.setDescription("Processor used for testing in ExecutePythonProcessorTests.cpp") state = 0 class WriteCallback(object): def process(self, output_stream): global state new_content = str(state).encode('utf-8') output_stream.write(new_content) state = state + 1 return len(new_content) def onTrigger(context, session): global state log.info('Vrrm, vrrrm, processor is running, vrrrm!!') # flow_file = session.get() flow_file = session.create() flow_file.setAttribute("filename", str(state)) log.info('created flow file: %s' % flow_file.getAttribute('filename')) if flow_file is not None: session.write(flow_file, WriteCallback()) session.transfer(flow_file, REL_SUCCESS)
apache-2.0
7,008,879,742,411,832,000
33.826087
93
0.714107
false
3.945813
false
false
false
prymitive/upaas-admin
upaas_admin/features/cron.py
1
1583
# -*- coding: utf-8 -*- """ :copyright: Copyright 2014 by Łukasz Mierzwa :contact: [email protected] """ from __future__ import unicode_literals import logging from django.utils.translation import ugettext as _ from upaas.config.base import (Config, ConfigurationError, StringEntry, IntegerEntry) from upaas_admin.features.base import Feature log = logging.getLogger(__name__) class CronEntryConfig(Config): schema = { "command": StringEntry(required=True), "minute": IntegerEntry(min_value=0, max_value=59, default=-1), "hour": IntegerEntry(min_value=0, max_value=23, default=-1), "day": IntegerEntry(min_value=1, max_value=31, default=-1), "month": IntegerEntry(min_value=1, max_value=12, default=-1), "weekday": IntegerEntry(min_value=1, max_value=7, default=-1), } class CronFeature(Feature): def parse_crons(self): crons = [] for item in self.value: try: cron = CronEntryConfig(item) except ConfigurationError as e: log.error(_('Invalid cron configuration in {item}: ' '{e}').format(item=item, e=e)) else: crons.append('cron = %d %d %d %d %d %s' % ( cron.minute, cron.hour, cron.day, cron.month, cron.weekday, cron.command)) return crons def update_vassal(self, application, options): for cron in self.parse_crons(): options.append(cron) return options
gpl-3.0
2,626,893,547,294,586,400
28.296296
79
0.584071
false
3.67907
true
false
false
Zerknechterer/pyload
module/plugins/hoster/GoogledriveCom.py
1
1839
# -*- coding: utf-8 -* # # Test links: # https://drive.google.com/file/d/0B6RNTe4ygItBQm15RnJiTmMyckU/view?pli=1 import re import urlparse from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo from module.utils import html_unescape class GoogledriveCom(SimpleHoster): __name__ = "GoogledriveCom" __type__ = "hoster" __version__ = "0.12" __pattern__ = r'https?://(?:www\.)?(drive|docs)\.google\.com/(file/d/\w+|uc\?.*id=)' __config__ = [("use_premium", "bool", "Use premium account if available", True)] __description__ = """Drive.google.com hoster plugin""" __license__ = "GPLv3" __authors__ = [("zapp-brannigan", "[email protected]")] DISPOSITION = False #: Remove in 0.4.10 NAME_PATTERN = r'(?:<title>|class="uc-name-size".*>)(?P<N>.+?)(?: - Google Drive</title>|</a> \()' OFFLINE_PATTERN = r'align="center"><p class="errorMessage"' LINK_FREE_PATTERN = r'"([^"]+uc\?.*?)"' def setup(self): self.multiDL = True self.resumeDownload = True self.chunkLimit = 1 def handleFree(self, pyfile): for _i in xrange(2): m = re.search(self.LINK_FREE_PATTERN, self.html) if m is None: self.error(_("Free download link not found")) else: link = html_unescape(m.group(1).decode('unicode-escape')) if not urlparse.urlparse(link).scheme: link = urlparse.urljoin("https://docs.google.com/", link) direct_link = self.directLink(link, False) if not direct_link: self.html = self.load(link, decode=True) else: self.link = direct_link break getInfo = create_getInfo(GoogledriveCom)
gpl-3.0
-4,720,897,857,208,252,000
29.65
105
0.559543
false
3.411874
false
false
false
jgrundstad/viewer
admin.py
1
1761
from django.contrib import admin from models import Project, Bnid, Sample, Study, Caller, Report, Variant, \ Genome, Contact, SharedData class ProjectAdmin(admin.ModelAdmin): model = Project list_display = ('id', 'name', 'description', 'creation_date') filter_horizontal = ('user', ) class BnidAdmin(admin.ModelAdmin): model = Bnid class SampleAdmin(admin.ModelAdmin): model = Sample list_display =('id', 'name') class CallerAdmin(admin.ModelAdmin): display = ['name'] class ReportAdmin(admin.ModelAdmin): model = Report list_display = ('caller', 'report_file', 'upload_date') class StudyAdmin(admin.ModelAdmin): model = Study list_display = ('name', 'description') class GenomeAdmin(admin.ModelAdmin): model = Genome list_display = ('id', 'name') class VariantAdmin(admin.ModelAdmin): model = Variant list_display = ('__str__', 'report', 'gene_name', 'chrom', 'pos', 'ref', 'alt', 'normal_ref_count', 'normal_alt_count', 'tumor_ref_count', 'tumor_alt_count') class ContactAdmin(admin.ModelAdmin): model = Contact list_display = ('full_name', 'email', 'project') class SharedDataAdmin(admin.ModelAdmin): model = SharedData list_display = ('uuid', 'field_lookup', 'user', 'creation_date', 'inactive_date') admin.site.register(Project, ProjectAdmin) admin.site.register(Sample, SampleAdmin) admin.site.register(Bnid, BnidAdmin) admin.site.register(Study, StudyAdmin) admin.site.register(Caller, CallerAdmin) admin.site.register(Report, ReportAdmin) admin.site.register(Genome, GenomeAdmin) admin.site.register(Variant, VariantAdmin) admin.site.register(Contact, ContactAdmin) admin.site.register(SharedData, SharedDataAdmin)
apache-2.0
-6,679,012,992,260,252,000
26.968254
85
0.693924
false
3.487129
false
false
false
yongshengwang/builthue
apps/oozie/src/oozie/models.py
1
80907
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import copy import logging import re import StringIO import time import zipfile from datetime import datetime, timedelta from string import Template from itertools import chain from django.db import models from django.db.models import Q from django.core.urlresolvers import reverse from django.core.validators import RegexValidator from django.contrib.auth.models import User from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.forms.models import inlineformset_factory from django.utils.encoding import force_unicode, smart_str from django.utils.translation import ugettext as _, ugettext_lazy as _t from desktop.log.access import access_warn from desktop.lib import django_mako from desktop.lib.exceptions_renderable import PopupException from desktop.lib.json_utils import JSONEncoderForHTML from desktop.models import Document from hadoop.fs.exceptions import WebHdfsException from hadoop.fs.hadoopfs import Hdfs from liboozie.submittion import Submission from liboozie.submittion import create_directories from oozie.conf import REMOTE_SAMPLE_DIR from oozie.utils import utc_datetime_format from oozie.timezones import TIMEZONES LOG = logging.getLogger(__name__) PATH_MAX = 512 name_validator = RegexValidator(regex='^[a-zA-Z_][\-_a-zA-Z0-9]{1,39}$', message=_('Enter a valid value: combination of 2 - 40 letters and digits starting by a letter')) # To sync in worklow.models.js DEFAULT_SLA = [ {'key': 'enabled', 'value': False}, {'key': 'nominal-time', 'value': ''}, {'key': 'should-start', 'value': ''}, {'key': 'should-end', 'value': ''}, {'key': 'max-duration', 'value': ''}, {'key': 'alert-events', 'value': ''}, {'key': 'alert-contact', 'value': ''}, {'key': 'notification-msg', 'value': ''}, {'key': 'upstream-apps', 'value': ''}, ] class JobManager(models.Manager): def can_read(self, user, job_id): job = Job.objects.select_related().get(pk=job_id).get_full_node() return job.can_read(user) def can_read_or_exception(self, request, job_id, exception_class=PopupException): if job_id is None: return try: job = Job.objects.select_related().get(pk=job_id).get_full_node() if job.can_read(request.user): return job else: message = _("Permission denied. %(username)s does not have the permissions required to access job %(id)s") % \ {'username': request.user.username, 'id': job.id} access_warn(request, message) request.error(message) raise exception_class(message) except Job.DoesNotExist: raise exception_class(_('job %(id)s does not exist') % {'id': job_id}) def can_edit_or_exception(self, request, job, exception_class=PopupException): if job.is_editable(request.user): return True else: raise exception_class(_('Not allowed to modified this job')) class Job(models.Model): """ Base class for Oozie Workflows, Coordinators and Bundles. """ owner = models.ForeignKey(User, db_index=True, verbose_name=_t('Owner'), help_text=_t('Person who can modify the job.')) # Deprecated name = models.CharField(max_length=40, blank=False, validators=[name_validator], # Deprecated help_text=_t('Name of the job, which must be unique per user.'), verbose_name=_t('Name')) description = models.CharField(max_length=1024, blank=True, verbose_name=_t('Description'), # Deprecated help_text=_t('The purpose of the job.')) last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Last modified')) schema_version = models.CharField(max_length=128, verbose_name=_t('Schema version'), help_text=_t('The version of the XML schema used to talk to Oozie.')) deployment_dir = models.CharField(max_length=1024, blank=True, verbose_name=_t('HDFS deployment directory'), help_text=_t('The path on the HDFS where all the workflows and ' 'dependencies must be uploaded.')) is_shared = models.BooleanField(default=False, db_index=True, verbose_name=_t('Is shared'), # Deprecated help_text=_t('Enable other users to have access to this job.')) parameters = models.TextField(default='[{"name":"oozie.use.system.libpath","value":"true"}]', verbose_name=_t('Oozie parameters'), help_text=_t('Parameters used at the submission time (e.g. market=US, oozie.use.system.libpath=true).')) is_trashed = models.BooleanField(default=False, db_index=True, verbose_name=_t('Is trashed'), blank=True, # Deprecated help_text=_t('If this job is trashed.')) doc = generic.GenericRelation(Document, related_name='oozie_doc') data = models.TextField(blank=True, default=json.dumps({})) # e.g. data=json.dumps({'sla': [python data], ...}) objects = JobManager() unique_together = ('owner', 'name') def delete(self, skip_trash=False, *args, **kwargs): if skip_trash: self.doc.all().delete() return super(Job, self).delete(*args, **kwargs) else: for job in self.doc.all(): job.send_to_trash() return self def restore(self): self.doc.get().restore_from_trash() return self def save(self): super(Job, self).save() if not self.deployment_dir: default_dir = Hdfs.join(REMOTE_SAMPLE_DIR.get(), '_%s_-oozie-%s-%s' % (self.owner.username, self.id, time.time())) self.deployment_dir = default_dir super(Job, self).save() def is_deployed(self, fs): return self.deployment_dir != '' and fs.exists(self.deployment_dir) def __str__(self): res = '%s - %s' % (force_unicode(self.name), self.owner) return force_unicode(res) def get_full_node(self): try: return self.workflow except Workflow.DoesNotExist: pass try: return self.coordinator except Coordinator.DoesNotExist: pass try: return self.bundle except Bundle.DoesNotExist: pass def get_type(self): return self.get_full_node().get_type() def get_absolute_url(self): return self.get_full_node().get_absolute_url() def get_parameters(self): return json.loads(self.parameters) def add_parameter(self, name, value): oozie_parameters = self.get_parameters() oozie_parameters.append({"name": name, "value": value}) self.parameters = json.dumps(oozie_parameters) @property def parameters_escapejs(self): return self._escapejs_parameters_list(self.parameters) def _escapejs_parameters_list(self, parameters): return json.dumps(json.loads(parameters), cls=JSONEncoderForHTML) @property def status(self): # TODO if self.is_shared: return _('shared') else: return _('personal') def find_all_parameters(self): params = self.find_parameters() if hasattr(self, 'sla') and self.sla_enabled: for param in find_json_parameters(self.sla): if param not in params: params[param] = '' for param in self.get_parameters(): params[param['name'].strip()] = param['value'] return [{'name': name, 'value': value} for name, value in params.iteritems()] def can_read(self, user): try: return self.doc.get().can_read(user) except Exception, e: LOG.error('can_read failed because the object has more than one document: %s' % self.doc.all()) raise e def is_editable(self, user): return user.is_superuser or self.owner == user or self.doc.get().can_write(user) @property def data_dict(self): if not self.data: self.data = json.dumps({}) data_python = json.loads(self.data) # Backward compatibility if 'sla' not in data_python: data_python['sla'] = copy.deepcopy(DEFAULT_SLA) if 'credentials' not in data_python: data_python['credentials'] = [] return data_python @property def data_js_escaped(self): return json.dumps(self.data_dict, cls=JSONEncoderForHTML) @property def sla(self): return self.data_dict['sla'] @sla.setter def sla(self, sla): data_ = self.data_dict data_['sla'] = sla self.data = json.dumps(data_) @property def sla_enabled(self): return self.sla[0]['value'] # #1 is enabled class WorkflowManager(models.Manager): SCHEMA_VERSION = { '0.4': 'uri:oozie:workflow:0.4', '0.5': 'uri:oozie:workflow:0.5' } def new_workflow(self, owner): workflow = Workflow(owner=owner, schema_version=WorkflowManager.SCHEMA_VERSION['0.4']) kill = Kill(name='kill', workflow=workflow, node_type=Kill.node_type) end = End(name='end', workflow=workflow, node_type=End.node_type) start = Start(name='start', workflow=workflow, node_type=Start.node_type) to = Link(parent=start, child=end, name='to') related = Link(parent=start, child=end, name='related') workflow.start = start workflow.end = end return workflow def initialize(self, workflow, fs=None): Kill.objects.create(name='kill', workflow=workflow, node_type=Kill.node_type) end = End.objects.create(name='end', workflow=workflow, node_type=End.node_type) start = Start.objects.create(name='start', workflow=workflow, node_type=Start.node_type) link = Link(parent=start, child=end, name='to') link.save() Link.objects.create(parent=start, child=end, name='related') workflow.start = start workflow.end = end workflow.save() Document.objects.link(workflow, owner=workflow.owner, name=workflow.name, description=workflow.description) if fs: self.check_workspace(workflow, fs) def check_workspace(self, workflow, fs): create_directories(fs, [REMOTE_SAMPLE_DIR.get()]) create_directories(fs) if workflow.is_shared: perms = 0755 else: perms = 0711 Submission(workflow.owner, workflow, fs, None, {})._create_dir(workflow.deployment_dir, perms=perms) def destroy(self, workflow, fs): Submission(workflow.owner, workflow, fs, None, {}).remove_deployment_dir() try: workflow.coordinator_set.update(workflow=None) # In Django 1.3 could do ON DELETE set NULL except: pass workflow.save() workflow.delete(skip_trash=True) def managed(self): return self.filter(managed=True) def unmanaged(self): return self.filter(managed=False) class Workflow(Job): is_single = models.BooleanField(default=False) start = models.ForeignKey('Start', related_name='start_workflow', blank=True, null=True) end = models.ForeignKey('End', related_name='end_workflow', blank=True, null=True) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('Job configuration properties used by all the actions of the workflow ' '(e.g. mapred.job.queue.name=production)')) managed = models.BooleanField(default=True) objects = WorkflowManager() HUE_ID = 'hue-id-w' ICON = '/oozie/static/art/icon_oozie_workflow_48.png' METADATA_FORMAT_VERSION = "0.0.1" def get_type(self): return 'workflow' def get_properties(self): return json.loads(self.job_properties) def clone(self, fs, new_owner=None): source_deployment_dir = self.deployment_dir # Needed nodes = self.node_set.all() links = Link.objects.filter(parent__workflow=self) name = self.name + '-copy' if new_owner is not None: owner = new_owner else: owner = self.owner copy_doc = self.doc.get().copy(name=name, owner=owner) copy = self copy.pk = None copy.id = None copy.name = name copy.deployment_dir = '' copy.owner = owner copy.save() copy.doc.all().delete() copy.doc.add(copy_doc) old_nodes_mapping = {} for node in nodes: prev_id = node.id node = node.get_full_node() node.pk = None node.id = None node.workflow = copy node.save() old_nodes_mapping[prev_id] = node for link in links: link.pk = None link.id = None link.parent = old_nodes_mapping[link.parent.id] link.child = old_nodes_mapping[link.child.id] link.save() copy.start = old_nodes_mapping[self.start.id] copy.end = old_nodes_mapping[self.end.id] copy.save() try: if copy.is_shared: perms = 0755 else: perms = 0711 fs.copy_remote_dir(source_deployment_dir, copy.deployment_dir, owner=copy.owner, dir_mode=perms) except WebHdfsException, e: msg = _('The copy of the deployment directory failed: %s.') % e LOG.error(msg) raise PopupException(msg) # Reload workflow from DB... clears relationship cache copy = Workflow.objects.get(id=copy.id) return copy @property def job_properties_escapejs(self): return self._escapejs_parameters_list(self.job_properties) def has_cycle(self): """ Topological sort for detecting cycles in the directed graph. """ queue = set([self.start]) removed_edges = set() while queue: node = queue.pop() edges = set(node.get_children_links()) for edge in edges: removed_edges.add(edge) # Edge has no other incoming edges if not set(edge.child.get_parent_links()) - removed_edges: queue.add(edge.child) graph_edges = set([edge for node in self.node_set.all() for edge in node.get_children_links()]) return len(graph_edges - removed_edges) > 0 # Graph does not have unseen edges def find_parameters(self): params = set() if self.sla_enabled: for param in find_json_parameters(self.sla): params.add(param) for node in self.node_list: if hasattr(node, 'find_parameters'): params.update(node.find_parameters()) return dict([(param, '') for param in list(params)]) @property def actions(self): return Action.objects.filter(workflow=self, node_type__in=Action.types) @property def node_list(self): """Return a flatten node list ordered by the hierarchy of the nodes in the workflow""" def flatten(nodes): flat = [] if type(nodes) == list: for node in nodes: flat.extend(flatten(node)) else: flat.append(nodes) return flat def from_iterable(iterables): # Python 2.6 chain.from_iterable(['ABC', 'DEF']) --> A B C D E F for it in iterables: for element in it: yield element return list(chain(from_iterable([flatten(row) for row in self.get_hierarchy()]))) @classmethod def get_application_path_key(cls): return 'oozie.wf.application.path' @classmethod def get_application_filename(cls): return 'workflow.xml' def get_absolute_url(self): if self.doc.only('extra').get().extra == 'jobsub': return '/jobsub/#edit-design/%s' % self.id else: return reverse('oozie:edit_workflow', kwargs={'workflow': self.id}) + '#editWorkflow' def get_hierarchy(self): node = Start.objects.get(workflow=self) # Uncached version of start. kill = Kill.objects.get(workflow=node.workflow) # Special case: manage error email actions separately try: kill_nodes = [Link.objects.filter(child=kill).get(name='ok').parent, kill] except Link.DoesNotExist: kill_nodes = [kill] return self.get_hierarchy_rec(node=node) + [kill_nodes, [End.objects.get(workflow=node.workflow)]] def get_hierarchy_rec(self, node=None): if node is None: node = self.start if node.id is None: return [] node = node.get_full_node() parents = node.get_parents() if isinstance(node, End): return [] # Not returning the end node elif isinstance(node, Decision): children = node.get_children('start') return [[node] + [[self.get_hierarchy_rec(node=child) for child in children], node.get_child_end()]] + self.get_hierarchy_rec(node.get_child_end().get_child('to')) elif isinstance(node, DecisionEnd): return [] elif isinstance(node, Fork): children = node.get_children('start') return [[node] + [[self.get_hierarchy_rec(node=child) for child in children], node.get_child_join()]] + self.get_hierarchy_rec(node.get_child_join().get_child('to')) elif isinstance(node, Join): return [] else: child = Link.objects.filter(parent=node).exclude(name__in=['related', 'kill', 'error'])[0].child return [node] + self.get_hierarchy_rec(child) def gen_status_graph(self, oozie_workflow): from oozie.forms import NodeMetaForm # Circular dependency actions = oozie_workflow.get_working_actions() controls = oozie_workflow.get_control_flow_actions() WorkflowFormSet = inlineformset_factory(Workflow, Node, form=NodeMetaForm, max_num=0, can_order=False, can_delete=False) forms = WorkflowFormSet(instance=self).forms template = 'editor/gen/workflow-graph-status.xml.mako' index = dict([(form.instance.id, form) for form in forms]) actions_index = dict([(action.name, action) for action in actions]) controls_index = dict([(control.name.strip(':'), control) for control in controls]) return django_mako.render_to_string(template, {'nodes': self.get_hierarchy(), 'index': index, 'actions': actions_index, 'controls': controls_index}) @classmethod def gen_status_graph_from_xml(cls, user, oozie_workflow): from oozie.importlib.workflows import import_workflow # Circular dependency try: workflow = Workflow.objects.new_workflow(user) workflow.save() try: import_workflow(workflow, oozie_workflow.definition) graph = workflow.gen_status_graph(oozie_workflow) return graph, workflow.node_list except Exception, e: LOG.warn('Workflow %s could not be converted to a graph: %s' % (oozie_workflow.id, e)) finally: if workflow.pk is not None: workflow.delete(skip_trash=True) return None, [] def to_xml(self, mapping=None): if mapping is None: mapping = {} tmpl = 'editor/gen/workflow.xml.mako' xml = re.sub(re.compile('\s*\n+', re.MULTILINE), '\n', django_mako.render_to_string(tmpl, {'workflow': self, 'mapping': mapping})) return force_unicode(xml) def compress(self, mapping=None, fp=StringIO.StringIO()): metadata = { 'version': Workflow.METADATA_FORMAT_VERSION, 'nodes': {}, 'attributes': { 'description': self.description, 'deployment_dir': self.deployment_dir } } for node in self.node_list: if hasattr(node, 'jar_path'): metadata['nodes'][node.name] = { 'attributes': { 'jar_path': node.jar_path } } xml = self.to_xml(mapping=mapping) zfile = zipfile.ZipFile(fp, 'w') zfile.writestr("workflow.xml", smart_str(xml)) zfile.writestr("workflow-metadata.json", smart_str(json.dumps(metadata))) zfile.close() return fp @classmethod def decompress(cls, fp): zfile = zipfile.ZipFile(fp, 'r') metadata_json = zfile.read('workflow-metadata.json') metadata = json.loads(metadata_json) workflow_xml = zfile.read('workflow.xml') return workflow_xml, metadata @property def sla_workflow_enabled(self): return self.sla_enabled or any([node.sla_enabled for node in self.node_list if hasattr(node, 'sla_enabled')]) @property def credentials(self): sub_lists = [node.credentials for node in self.node_list if hasattr(node, 'credentials')] return set([item['name'] for l in sub_lists for item in l if item['value']]) class Link(models.Model): # Links to exclude when using get_children_link(), get_parent_links() in the API META_LINKS = ('related',) parent = models.ForeignKey('Node', related_name='child_node') child = models.ForeignKey('Node', related_name='parent_node', verbose_name='') name = models.CharField(max_length=40) comment = models.CharField(max_length=1024, default='', blank=True) def __unicode__(self): return '%s %s %s' % (self.parent, self.child, self.name) class Node(models.Model): """ Base class for the Oozie WorkflowAction or ControlFlow Nodes. http://nightly.cloudera.com/cdh4/cdh/4/oozie-3.1.3-cdh4.0.0-SNAPSHOT/WorkflowFunctionalSpec.html#a3_Workflow_Nodes The Node model is an abstract base class. All concrete actions derive from it. And it provides something for the Action or ControlFlow to reference. See https://docs.djangoproject.com/en/dev/topics/db/models/#multi-table-inheritance """ PARAM_FIELDS = () name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name'), help_text=_t('Name of the action, which must be unique by workflow.')) description = models.CharField(max_length=1024, blank=True, default='', verbose_name=_t('Description'), help_text=_t('The purpose of the action.')) node_type = models.CharField(max_length=64, blank=False, verbose_name=_t('Type'), help_text=_t('The type of action (e.g. MapReduce, Pig...)')) workflow = models.ForeignKey(Workflow) children = models.ManyToManyField('self', related_name='parents', symmetrical=False, through=Link) data = models.TextField(blank=True, default=json.dumps({})) unique_together = ('workflow', 'name') def get_full_node(self): if self.node_type == Mapreduce.node_type: node = self.mapreduce elif self.node_type == Pig.node_type: node = self.pig elif self.node_type == Hive.node_type: node = self.hive elif self.node_type == Sqoop.node_type: node = self.sqoop elif self.node_type == Ssh.node_type: node = self.ssh elif self.node_type == Shell.node_type: node = self.shell elif self.node_type == DistCp.node_type: node = self.distcp elif self.node_type == Fs.node_type: node = self.fs elif self.node_type == Email.node_type: node = self.email elif self.node_type == SubWorkflow.node_type: node = self.subworkflow elif self.node_type == Streaming.node_type: node = self.streaming elif self.node_type == Java.node_type: node = self.java elif self.node_type == Generic.node_type: node = self.generic elif self.node_type == Start.node_type: node = self.start elif self.node_type == End.node_type: node = self.end elif self.node_type == Kill.node_type: node = self.kill elif self.node_type == Fork.node_type: node = self.fork elif self.node_type == Join.node_type: node = self.join elif self.node_type == Decision.node_type: node = self.decision elif self.node_type == DecisionEnd.node_type: node = self.decisionend else: raise Exception(_('Unknown Node type: %s. Was it set at its creation?'), (self.node_type,)) return node def find_parameters(self): return find_parameters(self, self.PARAM_FIELDS) def __unicode__(self): if self.name != '': return '%s' % self.name else: return '%s-%s' % (self.node_type, self.id) def to_xml(self, mapping=None): if mapping is None: mapping = {} node = self.get_full_node() data = { 'node': node, 'mapping': mapping } return django_mako.render_to_string(node.get_template_name(), data) # Can't use through relation directly with this Django version? # https://docs.djangoproject.com/en/1.2/topics/db/models/#intermediary-manytomany def get_link(self, name=None): if name is None: return Link.objects.exclude(name__in=Link.META_LINKS).get(parent=self) else: return Link.objects.exclude(name__in=Link.META_LINKS).get(parent=self, name=name) def get_child_link(self, name=None): return self.get_link(name) def get_child(self, name=None): """Includes DecisionEnd nodes""" return self.get_link(name).child.get_full_node() def get_oozie_child(self, name=None): """Resolves DecisionEnd nodes""" child = self.get_link(name).child.get_full_node() if child and child.node_type == DecisionEnd.node_type: child = child.get_oozie_child('to') return child def get_children(self, name=None): if name is not None: return [link.child for link in Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self, name=name)] else: return [link.child for link in Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self)] def get_parent(self, name=None): if name is not None: return self.get_parent_link(name).parent.get_full_node() else: return self.get_parent_link().parent.get_full_node() def get_parents(self): return [link.parent for link in self.get_parent_links()] def get_parent_link(self, name=None): if name is not None: return Link.objects.get(child=self, name=name) else: return Link.objects.get(child=self) def get_parent_links(self): return Link.objects.filter(child=self).exclude(name__in=Link.META_LINKS) def get_children_links(self, name=None): if name is None: return Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self) else: return Link.objects.exclude(name__in=Link.META_LINKS).filter(parent=self, name=name) def get_all_children_links(self): return Link.objects.filter(parent=self) def get_template_name(self): return 'editor/gen/workflow-%s.xml.mako' % self.node_type def is_visible(self): return True def add_node(self, child): raise NotImplementedError(_("%(node_type)s has not implemented the 'add_node' method.") % { 'node_type': self.node_type }) @property def data_dict(self): if not self.data: self.data = json.dumps({}) data_python = json.loads(self.data) # Backward compatibility if 'sla' not in data_python: data_python['sla'] = copy.deepcopy(DEFAULT_SLA) if 'credentials' not in data_python: data_python['credentials'] = [] return data_python @property def sla(self): return self.data_dict['sla'] @sla.setter def sla(self, sla): data_ = self.data_dict data_['sla'] = sla self.data = json.dumps(data_) @property def sla_enabled(self): return self.sla[0]['value'] # #1 is enabled @property def credentials(self): return self.data_dict['credentials'] @credentials.setter def credentials(self, credentials): data_ = self.data_dict data_['credentials'] = credentials self.data = json.dumps(data_) class Action(Node): types = () class Meta: # Cloning does not work anymore if not abstract abstract = True def add_node(self, child): Link.objects.filter(parent=self, name='ok').delete() Link.objects.create(parent=self, child=child, name='ok') if not Link.objects.filter(parent=self, name='error').exists(): Link.objects.create(parent=self, child=Kill.objects.get(name='kill', workflow=self.workflow), name='error') # The fields with '[]' as default value are JSON dictionaries # When adding a new action, also update # - Action.types below # - Node.get_full_node() # - forms.py _node_type_TO_FORM_CLS # - workflow.js # - maybe actions_utils.mako class Mapreduce(Action): PARAM_FIELDS = ('files', 'archives', 'job_properties', 'jar_path', 'prepares', 'sla') node_type = 'mapreduce' files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) jar_path = models.CharField(max_length=PATH_MAX, verbose_name=_t('Jar name'), help_text=_t('Name or path to the %(program)s jar file on HDFS. E.g. examples.jar.') % {'program': 'MapReduce'}) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete and then to create before starting the application. ' 'This should be used exclusively for directory cleanup.')) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) def get_prepares(self): return json.loads(self.prepares) class Streaming(Action): PARAM_FIELDS = ('files', 'archives', 'job_properties', 'mapper', 'reducer', 'sla') node_type = "streaming" files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) mapper = models.CharField(max_length=PATH_MAX, blank=False, verbose_name=_t('Mapper'), help_text=_t('The executable/script to be used as mapper.')) reducer = models.CharField(max_length=PATH_MAX, blank=False, verbose_name=_t('Reducer'), help_text=_t('The executable/script to be used as reducer.')) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) class Java(Action): PARAM_FIELDS = ('files', 'archives', 'jar_path', 'main_class', 'args', 'java_opts', 'job_properties', 'prepares', 'sla') node_type = "java" files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) jar_path = models.CharField(max_length=PATH_MAX, blank=False, verbose_name=_t('Jar name'), help_text=_t('Name or path to the %(program)s jar file on HDFS. E.g. examples.jar.') % {'program': 'Java'}) main_class = models.CharField(max_length=256, blank=False, verbose_name=_t('Main class'), help_text=_t('Full name of the Java class. E.g. org.apache.hadoop.examples.Grep')) args = models.TextField(blank=True, verbose_name=_t('Arguments'), help_text=_t('Arguments of the main method. The value of each arg element is considered a single argument ' 'and they are passed to the main method in the same order.')) java_opts = models.CharField(max_length=256, blank=True, verbose_name=_t('Java options'), help_text=_t('Command-line parameters used to start the JVM that will execute ' 'the Java application. Using this element is equivalent to using the mapred.child.java.opts ' 'configuration property. E.g. -Dexample-property=hue')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete and then to create before starting the application. ' 'This should be used exclusively for directory cleanup.')) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) capture_output = models.BooleanField(default=False, verbose_name=_t('Capture output'), help_text=_t('Capture output of the stdout of the %(program)s command execution. The %(program)s ' 'command output must be in Java Properties file format and it must not exceed 2KB. ' 'From within the workflow definition, the output of an %(program)s action node is accessible ' 'via the String action:output(String node, String key) function') % {'program': node_type.title()}) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) def get_prepares(self): return json.loads(self.prepares) class Pig(Action): PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials') node_type = 'pig' script_path = models.CharField(max_length=256, blank=False, verbose_name=_t('Script name'), help_text=_t('Script name or path to the Pig script. E.g. my_script.pig.')) params = models.TextField(default="[]", verbose_name=_t('Parameters'), help_text=_t('The Pig parameters of the script. e.g. "-param", "INPUT=${inputDir}"')) files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete and then to create before starting the application. ' 'This should be used exclusively for directory cleanup.')) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) def get_params(self): return json.loads(self.params) def get_prepares(self): return json.loads(self.prepares) class Hive(Action): PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials') node_type = 'hive' script_path = models.CharField(max_length=256, blank=False, verbose_name=_t('Script name'), help_text=_t('Script name or path to the %(type)s script. E.g. my_script.sql.') % {'type': node_type.title()}) params = models.TextField(default="[]", verbose_name=_t('Parameters'), help_text=_t('The %(type)s parameters of the script. E.g. N=5, INPUT=${inputDir}') % {'type': node_type.title()}) files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete, then create, before starting the application. ' 'This should be used exclusively for directory cleanup.')) job_xml = models.CharField(max_length=PATH_MAX, default='hive-config.xml', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hive hive-config.xml file bundled in the workflow deployment directory. Pick a name different than hive-site.xml.')) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) def get_params(self): return json.loads(self.params) def get_prepares(self): return json.loads(self.prepares) class Sqoop(Action): PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials') node_type = 'sqoop' script_path = models.TextField(blank=True, verbose_name=_t('Command'), default='', help_text=_t('The full %(type)s command. Either put it here or split it by spaces and insert the parts as multiple parameters below.') % {'type': node_type.title()}) params = models.TextField(default="[]", verbose_name=_t('Parameters'), help_text=_t('If no command is specified, split the command by spaces and insert the %(type)s parameters ' 'here e.g. import, --connect, jdbc:hsqldb:file:db.hsqldb, ...') % {'type': node_type.title()}) files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete then to create before starting the application. ' 'This should be used exclusively for directory cleanup')) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) def get_params(self): return json.loads(self.params) def get_prepares(self): return json.loads(self.prepares) class Ssh(Action): PARAM_FIELDS = ('user', 'host', 'command', 'params', 'sla', 'credentials') node_type = 'ssh' user = models.CharField(max_length=64, verbose_name=_t('User'), help_text=_t('User executing the shell command.')) host = models.CharField(max_length=256, verbose_name=_t('Host'), help_text=_t('Where the shell will be executed.')) command = models.CharField(max_length=256, verbose_name=_t('%(type)s command') % {'type': node_type.title()}, help_text=_t('The command that will be executed.')) params = models.TextField(default="[]", verbose_name=_t('Arguments'), help_text=_t('The arguments of the %(type)s command.') % {'type': node_type.title()}) capture_output = models.BooleanField(default=False, verbose_name=_t('Capture output'), help_text=_t('Capture output of the stdout of the %(program)s command execution. The %(program)s ' 'command output must be in Java properties file format and it must not exceed 2KB. ' 'From within the workflow definition, the output of an %(program)s action node is accessible ' 'via the String action:output(String node, String key) function') % {'program': node_type.title()}) def get_params(self): return json.loads(self.params) class Shell(Action): PARAM_FIELDS = ('files', 'archives', 'job_properties', 'params', 'prepares', 'sla', 'credentials') node_type = 'shell' command = models.CharField(max_length=256, blank=False, verbose_name=_t('%(type)s command') % {'type': node_type.title()}, help_text=_t('The path of the Shell command to execute.')) params = models.TextField(default="[]", verbose_name=_t('Arguments'), help_text=_t('The arguments of Shell command can then be specified using one or more argument element.')) files = models.TextField(default="[]", verbose_name=_t('Files'), help_text=_t('List of names or paths of files to be added to the distributed cache and the task running directory.')) archives = models.TextField(default="[]", verbose_name=_t('Archives'), help_text=_t('List of names or paths of the archives to be added to the distributed cache.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production)')) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete then to create before starting the application. ' 'This should be used exclusively for directory cleanup')) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) capture_output = models.BooleanField(default=False, verbose_name=_t('Capture output'), help_text=_t('Capture output of the stdout of the %(program)s command execution. The %(program)s ' 'command output must be in Java Properties file format and it must not exceed 2KB. ' 'From within the workflow definition, the output of an %(program)s action node is accessible ' 'via the String action:output(String node, String key) function') % {'program': node_type.title()}) def get_properties(self): return json.loads(self.job_properties) def get_files(self): return json.loads(self.files) def get_archives(self): return json.loads(self.archives) def get_params(self): return json.loads(self.params) def get_prepares(self): return json.loads(self.prepares) class DistCp(Action): PARAM_FIELDS = ('job_properties', 'params', 'prepares', 'sla', 'credentials') node_type = 'distcp' params = models.TextField(default="[]", verbose_name=_t('Arguments'), help_text=_t('The arguments of the %(type)s command. Put options first, then source paths, then destination path.') % {'type': node_type.title()}) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('For the job configuration (e.g. mapred.job.queue.name=production')) prepares = models.TextField(default="[]", verbose_name=_t('Prepares'), help_text=_t('List of absolute paths to delete then to create before starting the application. ' 'This should be used exclusively for directory cleanup')) job_xml = models.CharField(max_length=PATH_MAX, default='', blank=True, verbose_name=_t('Job XML'), help_text=_t('Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. ' 'Properties specified in the Job Properties element override properties specified in the ' 'files specified in the Job XML element.')) def get_properties(self): return json.loads(self.job_properties) def get_params(self): return json.loads(self.params) def get_prepares(self): return json.loads(self.prepares) class Fs(Action): PARAM_FIELDS = ('deletes', 'mkdirs', 'moves', 'chmods', 'touchzs', 'sla', 'credentials') node_type = 'fs' deletes = models.TextField(default="[]", verbose_name=_t('Delete path'), blank=True, help_text=_t('Delete the specified path, if it is a directory it deletes recursively all its content and ' 'then deletes the directory.')) mkdirs = models.TextField(default="[]", verbose_name=_t('Create directory'), blank=True, help_text=_t('Create the specified directory, it creates all missing directories in the path. ' 'If the directory already exist it does a no-op.')) moves = models.TextField(default="[]", verbose_name=_t('Move file'), blank=True, help_text=_t('Move a file or directory to another path.')) chmods = models.TextField(default="[]", verbose_name=_t('Change permissions'), blank=True, help_text=_t('Change the permissions for the specified path. Permissions can be specified using the Unix Symbolic ' 'representation (e.g. -rwxrw-rw-) or an octal representation (755).')) touchzs = models.TextField(default="[]", verbose_name=_t('Create or touch a file'), blank=True, help_text=_t('Creates a zero length file in the specified path if none exists or touch it.')) def get_deletes(self): return json.loads(self.deletes) def get_mkdirs(self): return json.loads(self.mkdirs) def get_moves(self): return json.loads(self.moves) def get_chmods(self): return json.loads(self.chmods) def get_touchzs(self): return json.loads(self.touchzs) class Email(Action): PARAM_FIELDS = ('to', 'cc', 'subject', 'body', 'sla', 'credentials') node_type = 'email' to = models.TextField(default='', verbose_name=_t('TO addresses'), help_text=_t('Comma-separated values.')) cc = models.TextField(default='', verbose_name=_t('CC addresses (optional)'), blank=True, help_text=_t('Comma-separated values.')) subject = models.TextField(default='', verbose_name=_t('Subject'), help_text=_t('Plain-text.')) body = models.TextField(default='', verbose_name=_t('Body'), help_text=_t('Plain-text.')) class SubWorkflow(Action): PARAM_FIELDS = ('subworkflow', 'propagate_configuration', 'job_properties', 'sla', 'credentials') node_type = 'subworkflow' sub_workflow = models.ForeignKey(Workflow, default=None, db_index=True, blank=True, null=True, verbose_name=_t('Sub-workflow'), help_text=_t('The sub-workflow application to include. You must own all the sub-workflows.')) propagate_configuration = models.BooleanField(default=True, verbose_name=_t('Propagate configuration'), blank=True, help_text=_t('If the workflow job configuration should be propagated to the child workflow.')) job_properties = models.TextField(default='[]', verbose_name=_t('Hadoop job properties'), help_text=_t('Can be used to specify the job properties that are required to run the child workflow job.')) def get_properties(self): return json.loads(self.job_properties) class Generic(Action): PARAM_FIELDS = ('xml', 'credentials', 'sla', 'credentials') node_type = 'generic' xml = models.TextField(default='', verbose_name=_t('XML of the custom action'), help_text=_t('This will be inserted verbatim in the action %(action)s. ' 'E.g. all the XML content like %(xml_action)s ' 'will be inserted into the action and produce %(full_action)s') % { 'action': '&lt;action name="email"&gt;...&lt;/action&gt;', 'xml_action': '&lt;email&gt;&lt;cc&gt;[email protected]&lt;/cc&gt;&lt;/email&gt;', 'full_action': '&lt;action name="email"&gt;&lt;email&gt;&lt;cc&gt;[email protected]&lt;/cc&gt;&lt;/email&gt;&lt;ok/&gt;&lt;error/&gt;&lt;/action&gt;'}) Action.types = (Mapreduce.node_type, Streaming.node_type, Java.node_type, Pig.node_type, Hive.node_type, Sqoop.node_type, Ssh.node_type, Shell.node_type, DistCp.node_type, Fs.node_type, Email.node_type, SubWorkflow.node_type, Generic.node_type) class ControlFlow(Node): """ http://incubator.apache.org/oozie/docs/3.2.0-incubating/docs/WorkflowFunctionalSpec.html#a3.1_Control_Flow_Nodes """ class Meta: abstract = True def get_xml(self): return django_mako.render_to_string(self.get_template_name(), {}) def is_visible(self): return True # Could not make this abstract class Start(ControlFlow): node_type = 'start' def add_node(self, child): Link.objects.filter(parent=self).delete() link = Link.objects.create(parent=self, child=child, name='to') class End(ControlFlow): node_type = 'end' def add_node(self, child): raise RuntimeError(_("End should not have any children.")) class Kill(ControlFlow): node_type = 'kill' message = models.CharField(max_length=256, blank=False, default='Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]') def add_node(self, child): raise RuntimeError(_("Kill should not have any children.")) def is_visible(self): return False class Fork(ControlFlow): """ A Fork can be converted into a Decision node. """ node_type = 'fork' def is_visible(self): return True def get_child_join(self): return Link.objects.get(parent=self, name='related').child.get_full_node() def convert_to_decision(self): self.remove_join() decision = Decision.objects.create(workflow=self.workflow, node_type=Decision.node_type) decision.save() links = self.get_all_children_links() has_default = False for link in links: if link.name == 'default': has_default = True link.parent = decision # Defaults to end if not has_default: link = Link.objects.create(name="default", parent=decision, child=self.workflow.end) link.save() self.delete() return decision def remove_join(self): join = self.get_child_join() after_join = join.get_child('to') for parent in join.get_parent_actions(): link = parent.get_link('ok') link.child = after_join link.save() # Automatically delete links thought foreign keys join.delete() class Join(ControlFlow): node_type = 'join' def is_visible(self): return True def get_parent_fork(self): return self.get_parent_link('related').parent.get_full_node() def get_parent_actions(self): return [link.parent for link in self.get_parent_links()] class Decision(ControlFlow): """ Essentially a fork where only one of the paths of execution are chosen. Graphically, this is represented the same way as a fork. The DecisionEnd node is not represented in Oozie, only in Hue. """ node_type = 'decision' def get_child_end(self): return Link.objects.get(parent=self, name='related').child.get_full_node() def is_visible(self): return True def update_description(self): self.description = ', '.join(self.get_children_links().values_list('comment', flat=True)) self.save() class DecisionEnd(ControlFlow): """ Defines the end of a join. This node exists purely in the Hue application to provide a smooth transition from Decision to Endself. NOTE: NOT AN OOZIE NODE """ node_type = 'decisionend' def is_visible(self): return False def get_parent_decision(self): return self.get_parent_link('related').parent.get_full_node() def get_parent_actions(self): return [link.parent for link in self.get_parent_links()] def to_xml(self, mapping): return '' FREQUENCY_UNITS = (('minutes', _('Minutes')), ('hours', _('Hours')), ('days', _('Days')), ('months', _('Months'))) FREQUENCY_NUMBERS = [(i, i) for i in xrange(1, 61)] DATASET_FREQUENCY = ['MINUTE', 'HOUR', 'DAY', 'MONTH', 'YEAR'] class Coordinator(Job): frequency_number = models.SmallIntegerField(default=1, choices=FREQUENCY_NUMBERS, verbose_name=_t('Frequency number'), help_text=_t('The number of units of the rate at which ' 'data is periodically created.')) # unused frequency_unit = models.CharField(max_length=20, choices=FREQUENCY_UNITS, default='days', verbose_name=_t('Frequency unit'), help_text=_t('The unit of the rate at which data is periodically created.')) # unused timezone = models.CharField(max_length=24, choices=TIMEZONES, default='America/Los_Angeles', verbose_name=_t('Timezone'), help_text=_t('The timezone of the coordinator. Only used for managing the daylight saving time changes when combining several coordinators.')) start = models.DateTimeField(default=datetime.today(), verbose_name=_t('Start'), help_text=_t('When to start the first workflow.')) end = models.DateTimeField(default=datetime.today() + timedelta(days=3), verbose_name=_t('End'), help_text=_t('When to start the last workflow.')) workflow = models.ForeignKey(Workflow, null=True, verbose_name=_t('Workflow'), help_text=_t('The workflow to schedule repeatedly.')) timeout = models.SmallIntegerField(null=True, blank=True, verbose_name=_t('Timeout'), help_text=_t('Number of minutes the coordinator action will be in ' 'WAITING or READY status before giving up on its execution.')) concurrency = models.PositiveSmallIntegerField(null=True, blank=True, choices=FREQUENCY_NUMBERS, verbose_name=_t('Concurrency'), help_text=_t('The number of coordinator actions that are allowed to run concurrently (RUNNING status) ' 'before the coordinator engine starts throttling them.')) execution = models.CharField(max_length=10, null=True, blank=True, verbose_name=_t('Execution'), choices=(('FIFO', _t('FIFO (oldest first) default')), ('LIFO', _t('LIFO (newest first)')), ('LAST_ONLY', _t('LAST_ONLY (discards all older materializations)'))), help_text=_t('Execution strategy of its coordinator actions when there is backlog of coordinator ' 'actions in the coordinator engine. The different execution strategies are \'oldest first\', ' '\'newest first\' and \'last one only\'. A backlog normally happens because of delayed ' 'input data, concurrency control or because manual re-runs of coordinator jobs.')) throttle = models.PositiveSmallIntegerField(null=True, blank=True, choices=FREQUENCY_NUMBERS, verbose_name=_t('Throttle'), help_text=_t('The materialization or creation throttle value for its coordinator actions. ' 'Number of maximum coordinator actions that are allowed to be in WAITING state concurrently.')) job_properties = models.TextField(default='[]', verbose_name=_t('Workflow properties'), help_text=_t('Additional properties to transmit to the workflow, e.g. limit=100, and EL functions, e.g. username=${coord:user()}')) HUE_ID = 'hue-id-c' ICON = '/oozie/static/art/icon_oozie_coordinator_48.png' METADATA_FORMAT_VERSION = "0.0.1" CRON_MAPPING = { '0,15,30,45 * * * *': _('Every 15 minutes'), '0,30 * * * *': _('Every 30 minutes'), '0 * * * *': _('Every hour'), '0 0 * * *': _('Every day at midnight'), '0 0 * * 0': _('Every week'), '0 0 1 * *': _('Every month'), '0 0 1 1 *': _('Every year'), } def get_type(self): return 'coordinator' def to_xml(self, mapping=None): if mapping is None: mapping = {} tmpl = "editor/gen/coordinator.xml.mako" return re.sub(re.compile('\s*\n+', re.MULTILINE), '\n', django_mako.render_to_string(tmpl, {'coord': self, 'mapping': mapping})).encode('utf-8', 'xmlcharrefreplace') def clone(self, new_owner=None): datasets = Dataset.objects.filter(coordinator=self) data_inputs = DataInput.objects.filter(coordinator=self) data_outputs = DataOutput.objects.filter(coordinator=self) name = self.name + '-copy' if new_owner is not None: owner = new_owner else: owner = self.owner copy_doc = self.doc.get().copy(name=name, owner=owner) copy = self copy.pk = None copy.id = None copy.name = name copy.deployment_dir = '' copy.owner = owner copy.save() copy.doc.all().delete() copy.doc.add(copy_doc) old_dataset_mapping = {} for dataset in datasets: prev_id = dataset.id dataset.pk = None dataset.id = None dataset.coordinator = copy dataset.save() old_dataset_mapping[prev_id] = dataset for data_input in data_inputs: data_input.pk = None data_input.id = None data_input.coordinator = copy data_input.dataset = old_dataset_mapping[data_input.dataset.id] data_input.save() for data_output in data_outputs: data_output.pk = None data_output.id = None data_output.coordinator = copy data_output.dataset = old_dataset_mapping[data_output.dataset.id] data_output.save() return copy @classmethod def get_application_path_key(cls): return 'oozie.coord.application.path' @classmethod def get_application_filename(cls): return 'coordinator.xml' def get_properties(self): props = json.loads(self.job_properties) index = [prop['name'] for prop in props] for prop in self.workflow.get_parameters(): if not prop['name'] in index: props.append(prop) index.append(prop['name']) # Remove DataInputs and DataOutputs datainput_names = [_input.name for _input in self.datainput_set.all()] dataoutput_names = [_output.name for _output in self.dataoutput_set.all()] removable_names = datainput_names + dataoutput_names props = filter(lambda prop: prop['name'] not in removable_names, props) return props @property def job_properties_escapejs(self): return self._escapejs_parameters_list(self.job_properties) @property def start_utc(self): return utc_datetime_format(self.start) @property def end_utc(self): return utc_datetime_format(self.end) def get_absolute_url(self): return reverse('oozie:edit_coordinator', kwargs={'coordinator': self.id}) @property def frequency(self): return '${coord:%(unit)s(%(number)d)}' % {'unit': self.frequency_unit, 'number': self.frequency_number} @property def text_frequency(self): return '%(number)d %(unit)s' % {'unit': self.frequency_unit, 'number': self.frequency_number} def find_parameters(self): params = self.workflow.find_parameters() for param in find_parameters(self, ['job_properties']): params[param] = '' if self.sla_enabled: for param in find_json_parameters(self.sla): params.add(param) for dataset in self.dataset_set.all(): for param in find_parameters(dataset, ['uri']): if param not in set(DATASET_FREQUENCY): params[param] = '' for ds in self.datainput_set.all(): params.pop(ds.name, None) for ds in self.dataoutput_set.all(): params.pop(ds.name, None) for wf_param in json.loads(self.job_properties): params.pop(wf_param['name'], None) return params def compress(self, mapping=None, fp=StringIO.StringIO()): metadata = { 'version': Coordinator.METADATA_FORMAT_VERSION, 'workflow': self.workflow.name, 'attributes': { 'description': self.description, 'deployment_dir': self.deployment_dir } } xml = self.to_xml(mapping=mapping) zfile = zipfile.ZipFile(fp, 'w') zfile.writestr("coordinator.xml", smart_str(xml)) zfile.writestr("coordinator-metadata.json", smart_str(json.dumps(metadata))) zfile.close() return fp @classmethod def decompress(cls, fp): zfile = zipfile.ZipFile(fp, 'r') metadata_json = zfile.read('coordinator-metadata.json') metadata = json.loads(metadata_json) xml = zfile.read('coordinator.xml') return xml, metadata @property def sla_jsescaped(self): return json.dumps(self.sla, cls=JSONEncoderForHTML) @property def cron_frequency(self): if 'cron_frequency' in self.data_dict: return self.data_dict['cron_frequency'] else: # Backward compatibility freq = '0 0 * * *' if self.frequency_number == 1: if self.frequency_unit == 'MINUTES': freq = '* * * * *' elif self.frequency_unit == 'HOURS': freq = '0 * * * *' elif self.frequency_unit == 'DAYS': freq = '0 0 * * *' elif self.frequency_unit == 'MONTH': freq = '0 0 * * *' return {'frequency': freq, 'isAdvancedCron': False} @property def cron_frequency_human(self): frequency = self.cron_frequency['frequency'] return Coordinator.CRON_MAPPING.get(frequency, frequency) @cron_frequency.setter def cron_frequency(self, cron_frequency): data_ = self.data_dict data_['cron_frequency'] = cron_frequency self.data = json.dumps(data_) class DatasetManager(models.Manager): def can_read_or_exception(self, request, dataset_id): if dataset_id is None: return try: dataset = Dataset.objects.get(pk=dataset_id) if dataset.coordinator.can_read(request.user): return dataset else: message = _("Permission denied. %(username)s does not have the permissions to access dataset %(id)s.") % \ {'username': request.user.username, 'id': dataset.id} access_warn(request, message) request.error(message) raise PopupException(message) except Dataset.DoesNotExist: raise PopupException(_('dataset %(id)s not exist') % {'id': dataset_id}) class Dataset(models.Model): """ http://oozie.apache.org/docs/3.3.0/CoordinatorFunctionalSpec.html#a6.3._Synchronous_Coordinator_Application_Definition """ name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name'), help_text=_t('The name of the dataset.')) description = models.CharField(max_length=1024, blank=True, default='', verbose_name=_t('Description'), help_text=_t('A description of the dataset.')) start = models.DateTimeField(default=datetime.today(), verbose_name=_t('Start'), help_text=_t(' The UTC datetime of the initial instance of the dataset. The initial instance also provides ' 'the baseline datetime to compute instances of the dataset using multiples of the frequency.')) frequency_number = models.SmallIntegerField(default=1, choices=FREQUENCY_NUMBERS, verbose_name=_t('Frequency number'), help_text=_t('The number of units of the rate at which ' 'data is periodically created.')) frequency_unit = models.CharField(max_length=20, choices=FREQUENCY_UNITS, default='days', verbose_name=_t('Frequency unit'), help_text=_t('The unit of the rate at which data is periodically created.')) uri = models.CharField(max_length=1024, default='/data/${YEAR}${MONTH}${DAY}', verbose_name=_t('URI'), help_text=_t('The URI template that identifies the dataset and can be resolved into concrete URIs to identify a particular ' 'dataset instance. The URI consist of constants (e.g. ${YEAR}/${MONTH}) and ' 'configuration properties (e.g. /home/${USER}/projects/${PROJECT})')) timezone = models.CharField(max_length=24, choices=TIMEZONES, default='America/Los_Angeles', verbose_name=_t('Timezone'), help_text=_t('The timezone of the dataset. Only used for managing the daylight saving time changes when combining several datasets.')) done_flag = models.CharField(max_length=64, blank=True, default='', verbose_name=_t('Done flag'), help_text=_t('The done file for the data set. If the Done flag is not specified, then Oozie ' 'configures Hadoop to create a _SUCCESS file in the output directory. If Done ' 'flag is set to empty, then Coordinator looks for the existence of the directory itself.')) coordinator = models.ForeignKey(Coordinator, verbose_name=_t('Coordinator'), help_text=_t('The coordinator associated with this data.')) instance_choice = models.CharField(max_length=10, default='default', verbose_name=_t('Instance type'), help_text=_t('Customize the date instance(s), e.g. define a range of dates, use EL functions...')) advanced_start_instance = models.CharField(max_length=128, default='0', verbose_name=_t('Start instance'), help_text=_t('Shift the frequency for gettting past/future start date or enter verbatim the Oozie start instance, e.g. ${coord:current(0)}')) advanced_end_instance = models.CharField(max_length=128, blank=True, default='0', verbose_name=_t('End instance'), help_text=_t('Optional: Shift the frequency for gettting past/future end dates or enter verbatim the Oozie end instance.')) objects = DatasetManager() unique_together = ('coordinator', 'name') def __unicode__(self): return '%s' % (self.name,) @property def start_utc(self): return utc_datetime_format(self.start) @property def frequency(self): return '${coord:%(unit)s(%(number)d)}' % {'unit': self.frequency_unit, 'number': self.frequency_number} @property def text_frequency(self): return '%(number)d %(unit)s' % {'unit': self.frequency_unit, 'number': self.frequency_number} @property def start_instance(self): if not self.is_advanced_start_instance: return int(self.advanced_start_instance) else: return 0 @property def is_advanced_start_instance(self): return not self.is_int(self.advanced_start_instance) def is_int(self, text): try: int(text) return True except ValueError: return False @property def end_instance(self): if not self.is_advanced_end_instance: return int(self.advanced_end_instance) else: return 0 @property def is_advanced_end_instance(self): return not self.is_int(self.advanced_end_instance) class DataInput(models.Model): name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name of an input variable in the workflow.'), help_text=_t('The name of the variable of the workflow to automatically fill up.')) dataset = models.OneToOneField(Dataset, verbose_name=_t('The dataset representing format of the data input.'), help_text=_t('The pattern of the input data we want to process.')) coordinator = models.ForeignKey(Coordinator) unique_together = ('coordinator', 'name') class DataOutput(models.Model): name = models.CharField(max_length=40, validators=[name_validator], verbose_name=_t('Name of an output variable in the workflow'), help_text=_t('The name of the variable of the workflow to automatically filled up.')) dataset = models.OneToOneField(Dataset, verbose_name=_t('The dataset representing the format of the data output.'), help_text=_t('The pattern of the output data we want to generate.')) coordinator = models.ForeignKey(Coordinator) unique_together = ('coordinator', 'name') class BundledCoordinator(models.Model): bundle = models.ForeignKey('Bundle', verbose_name=_t('Bundle'), help_text=_t('The bundle regrouping all the coordinators.')) coordinator = models.ForeignKey(Coordinator, verbose_name=_t('Coordinator'), help_text=_t('The coordinator to batch with other coordinators.')) parameters = models.TextField(default='[{"name":"oozie.use.system.libpath","value":"true"}]', verbose_name=_t('Parameters'), help_text=_t('Constants used at the submission time (e.g. market=US, oozie.use.system.libpath=true).')) def get_parameters(self): return json.loads(self.parameters) class Bundle(Job): kick_off_time = models.DateTimeField(default=datetime.today(), verbose_name=_t('Start'), help_text=_t('When to start the first coordinators.')) coordinators = models.ManyToManyField(Coordinator, through='BundledCoordinator') HUE_ID = 'hue-id-b' ICON = '/oozie/static/art/icon_oozie_bundle_48.png' METADATA_FORMAT_VERSION = '0.0.1' def get_type(self): return 'bundle' def to_xml(self, mapping=None): if mapping is None: mapping = {} tmpl = "editor/gen/bundle.xml.mako" return force_unicode( re.sub(re.compile('\s*\n+', re.MULTILINE), '\n', django_mako.render_to_string(tmpl, { 'bundle': self, 'mapping': mapping }))) def clone(self, new_owner=None): bundleds = BundledCoordinator.objects.filter(bundle=self) name = self.name + '-copy' if new_owner is not None: owner = new_owner else: owner = self.owner copy_doc = self.doc.get().copy(name=name, owner=owner) copy = self copy.pk = None copy.id = None copy.name = name copy.deployment_dir = '' copy.owner = owner copy.save() copy.doc.all().delete() copy.doc.add(copy_doc) for bundled in bundleds: bundled.pk = None bundled.id = None bundled.bundle = copy bundled.save() return copy @classmethod def get_application_path_key(cls): return 'oozie.bundle.application.path' @classmethod def get_application_filename(cls): return 'bundle.xml' def get_absolute_url(self): return reverse('oozie:edit_bundle', kwargs={'bundle': self.id}) def find_parameters(self): params = {} for bundled in BundledCoordinator.objects.filter(bundle=self): for param in bundled.coordinator.find_parameters(): params[param] = '' for param in bundled.get_parameters(): params.pop(param['name'], None) return params @property def kick_off_time_utc(self): return utc_datetime_format(self.kick_off_time) def compress(self, mapping=None, fp=StringIO.StringIO()): metadata = { 'version': Bundle.METADATA_FORMAT_VERSION, 'attributes': { 'description': self.description, 'deployment_dir': self.deployment_dir } } xml = self.to_xml(mapping=mapping) zfile = zipfile.ZipFile(fp, 'w') zfile.writestr("bundle.xml", smart_str(xml)) zfile.writestr("bundle-metadata.json", smart_str(json.dumps(metadata))) zfile.close() return fp @classmethod def decompress(cls, fp): zfile = zipfile.ZipFile(fp, 'r') metadata_json = zfile.read('bundle-metadata.json') metadata = json.loads(metadata_json) xml = zfile.read('bundle.xml') return xml, metadata class HistoryManager(models.Manager): def create_from_submission(self, submission): History.objects.create(submitter=submission.user, oozie_job_id=submission.oozie_id, job=submission.job, properties=json.dumps(submission.properties)) class History(models.Model): """ Contains information on submitted workflows/coordinators. """ submitter = models.ForeignKey(User, db_index=True) submission_date = models.DateTimeField(auto_now=True, db_index=True) oozie_job_id = models.CharField(max_length=128) job = models.ForeignKey(Job, db_index=True) properties = models.TextField() objects = HistoryManager() @property def properties_dict(self): return json.loads(self.properties) def get_absolute_oozie_url(self): view = 'oozie:list_oozie_workflow' if self.oozie_job_id.endswith('C'): view = 'oozie:list_oozie_coordinator' elif self.oozie_job_id.endswith('B'): view = 'oozie:list_oozie_bundle' return reverse(view, kwargs={'job_id': self.oozie_job_id}) def get_workflow(self): if self.oozie_job_id.endswith('W'): return self.job.get_full_node() def get_coordinator(self): if self.oozie_job_id.endswith('C'): return self.job.get_full_node() @classmethod def get_workflow_from_config(self, conf_dict): try: return Workflow.objects.get(id=conf_dict.get(Workflow.HUE_ID)) except Workflow.DoesNotExist: pass @classmethod def get_coordinator_from_config(self, conf_dict): try: return Coordinator.objects.get(id=conf_dict.get(Coordinator.HUE_ID)) except Coordinator.DoesNotExist: pass @classmethod def cross_reference_submission_history(cls, user, oozie_id): # Try do get the history history = None try: history = History.objects.get(oozie_job_id=oozie_id) if history.job.owner != user: history = None except History.DoesNotExist: pass return history def get_link(oozie_id): link = '' if 'W@' in oozie_id: link = reverse('oozie:list_oozie_workflow_action', kwargs={'action': oozie_id}) elif oozie_id.endswith('W'): link = reverse('oozie:list_oozie_workflow', kwargs={'job_id': oozie_id}) elif oozie_id.endswith('C'): link = reverse('oozie:list_oozie_coordinator', kwargs={'job_id': oozie_id}) return link def find_parameters(instance, fields=None): """Find parameters in the given fields""" if fields is None: fields = [field.name for field in instance._meta.fields] params = [] for field in fields: data = getattr(instance, field) if field == 'sla' and not instance.sla_enabled: continue if isinstance(data, list): params.extend(find_json_parameters(data)) elif isinstance(data, basestring): for match in Template.pattern.finditer(data): name = match.group('braced') if name is not None: params.append(name) return params def find_json_parameters(fields): # To make smarter # Input is list of json dict params = [] for field in fields: for data in field.values(): if isinstance(data, basestring): for match in Template.pattern.finditer(data): name = match.group('braced') if name is not None: params.append(name) return params # See http://wiki.apache.org/hadoop/JobConfFile _STD_PROPERTIES = [ 'mapred.input.dir', 'mapred.output.dir', 'mapred.job.name', 'mapred.job.queue.name', 'mapred.mapper.class', 'mapred.reducer.class', 'mapred.combiner.class', 'mapred.partitioner.class', 'mapred.map.tasks', 'mapred.reduce.tasks', 'mapred.input.format.class', 'mapred.output.format.class', 'mapred.input.key.class', 'mapred.input.value.class', 'mapred.output.key.class', 'mapred.output.value.class', 'mapred.mapoutput.key.class', 'mapred.mapoutput.value.class', 'mapred.combine.buffer.size', 'mapred.min.split.size', 'mapred.speculative.execution', 'mapred.map.tasks.speculative.execution', 'mapred.reduce.tasks.speculative.execution', 'mapred.queue.default.acl-administer-jobs', ] _STD_PROPERTIES_JSON = json.dumps(_STD_PROPERTIES) ACTION_TYPES = { Mapreduce.node_type: Mapreduce, Streaming.node_type: Streaming, Java.node_type: Java, Pig.node_type: Pig, Hive.node_type: Hive, Sqoop.node_type: Sqoop, Ssh.node_type: Ssh, Shell.node_type: Shell, DistCp.node_type: DistCp, Fs.node_type: Fs, Email.node_type: Email, SubWorkflow.node_type: SubWorkflow, Generic.node_type: Generic, } CONTROL_TYPES = { Fork.node_type: Fork, Join.node_type: Join, Decision.node_type: Decision, DecisionEnd.node_type: DecisionEnd, Start.node_type: Start, End.node_type: End, } NODE_TYPES = ACTION_TYPES.copy() NODE_TYPES.update(CONTROL_TYPES)
apache-2.0
4,944,804,039,425,903,000
38.640862
183
0.636088
false
3.82539
false
false
false
d/hamster-applet
src/docky_control/2.1/hamster_control.py
1
3518
#!/usr/bin/env python # # Copyright (C) 2010 Toms Baugis # # Original code from Banshee control, # Copyright (C) 2009-2010 Jason Smith, Rico Tzschichholz # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import atexit import gobject import sys, os from subprocess import Popen try: import gtk from dockmanager.dockmanager import DockManagerItem, DockManagerSink, DOCKITEM_IFACE from signal import signal, SIGTERM from sys import exit except ImportError, e: print e exit() from hamster import client from hamster.utils import stuff, i18n i18n.setup_i18n() class HamsterItem(DockManagerItem): def __init__(self, sink, path): DockManagerItem.__init__(self, sink, path) self.storage = client.Storage() self.storage.connect("facts-changed", lambda storage: self.refresh_hamster()) self.storage.connect("activities-changed", lambda storage: self.refresh_hamster()) self.id_map = {} #menu items self.update_text() self.add_actions() gobject.timeout_add_seconds(60, self.refresh_hamster) def refresh_hamster(self): try: self.update_text() finally: # we want to go on no matter what, so in case of any error we find out about it sooner return True def update_text(self): today = self.storage.get_todays_facts() if today and today[-1].end_time is None: fact = today[-1] self.set_tooltip("%s - %s" % (fact.activity, fact.category)) self.set_badge(stuff.format_duration(fact.delta, human=False)) else: self.set_tooltip(_("No activity")) self.reset_badge() def menu_pressed(self, menu_id): if self.id_map[menu_id] == _("Overview"): Popen(["hamster-time-tracker", "overview"]) elif self.id_map[menu_id] == _("Preferences"): Popen(["hamster-time-tracker", "preferences"]) self.add_actions() # TODO - figure out why is it that we have to regen all menu items after each click def add_actions(self): # first clear the menu for k in self.id_map.keys(): self.remove_menu_item(k) self.id_map = {} # now add buttons self.add_menu_item(_("Overview"), "") self.add_menu_item(_("Preferences"), "preferences-desktop-personal") class HamsterSink(DockManagerSink): def item_path_found(self, pathtoitem, item): if item.Get(DOCKITEM_IFACE, "DesktopFile", dbus_interface="org.freedesktop.DBus.Properties").endswith ("hamster-time-tracker.desktop"): self.items[pathtoitem] = HamsterItem(self, pathtoitem) hamstersink = HamsterSink() def cleanup(): hamstersink.dispose() if __name__ == "__main__": mainloop = gobject.MainLoop(is_running=True) atexit.register (cleanup) signal(SIGTERM, lambda signum, stack_frame: exit(1)) while mainloop.is_running(): mainloop.run()
gpl-3.0
982,473,164,084,869,900
29.591304
143
0.666003
false
3.593463
false
false
false
JoshBorke/redline
accounts/urls.py
1
2078
from django.conf.urls.defaults import * from redline import settings urlpatterns = patterns('redline.accounts.views', url(r'^$', 'accounts_list', name='account_list'), url(r'^overview/(?P<year>\d+)/(?P<month>\d+)/$', 'accounts_detail', name='accounts_detail'), url(r'^overview/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)$', 'accounts_detail_type', name='accounts_detail_type'), url(r'^overview/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)/(?P<slug>[\w_-]+)/$', 'accounts_category_detail', name='accounts_category_detail'), url(r'^add/$', 'account_add', name='account_add'), url(r'^edit/(?P<account_id>\d+)/$', 'account_edit', name='account_edit'), # for specific accounts url(r'^info/(?P<account_id>\d+)/$', 'account_info', name='account_info'), url(r'^info/(?P<account_id>\d+)/(?P<year>\d+)/(?P<month>\d+)$', 'account_detail', name='account_detail'), url(r'^info/(?P<account_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)$', 'account_detail_type', name='account_detail_type'), url(r'^info/(?P<account_id>\d+)/(?P<year>\d+)/(?P<month>\d+)/(?P<ttype>\d+)/$(?P<slug>[\w_-]+)/', 'account_category_detail', name='account_category_detail'), # for misc url(r'^delete/(?P<account_id>\d+)/$', 'account_delete', name='account_delete'), url(r'^import/(?P<account_id>\d+)/$', 'account_import', name='account_import'), # for account types, not used url(r'^account_type$', 'account_type_list', name='account_type_list'), url(r'^account_type/add/$', 'account_type_add', name='account_type_add'), url(r'^account_type/edit/(?P<account_type_id>\d+)/$', 'account_type_edit', name='account_type_edit'), url(r'^account_type/delete/(?P<account_type_id>\d+)/$', 'account_type_delete', name='account_type_delete'), ) if settings.DEBUG: urlpatterns += patterns('', (r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/home/josh/local/redline/media'}), (r'^data-files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/home/josh/local/redline/media'}), )
gpl-3.0
-7,997,031,493,255,206,000
66.032258
161
0.60924
false
3.00289
false
false
false
iirob/python-opcua
opcua/common/structures.py
1
11226
""" Support for custom structures in client and server We only support a subset of features but should be enough for custom structures """ import os import importlib import re import logging # The next two imports are for generated code from datetime import datetime import uuid from enum import Enum, IntEnum, EnumMeta from lxml import objectify from opcua.ua.ua_binary import Primitives from opcua import ua logger = logging.getLogger(__name__) def get_default_value(uatype, enums): if uatype == "String": return "None" elif uatype == "Guid": return "uuid.uuid4()" elif uatype in ("ByteString", "CharArray", "Char"): return "b''" elif uatype == "Boolean": return "True" elif uatype == "DateTime": return "datetime.utcnow()" elif uatype in ("Int16", "Int32", "Int64", "UInt16", "UInt32", "UInt64", "Double", "Float", "Byte", "SByte"): return 0 elif uatype in enums: return "ua." + uatype + "(" + enums[uatype] + ")" elif hasattr(ua, uatype) and issubclass(getattr(ua, uatype), Enum): # We have an enum, try to initilize it correctly val = list(getattr(ua, uatype).__members__)[0] return "ua.{}([})".format(uatype, val) else: return "ua.{}()".format(uatype) class EnumType(object): def __init__(self, name): self.name = name self.fields = [] self.typeid = None def get_code(self): code = """ class {0}(IntEnum): ''' {0} EnumInt autogenerated from xml ''' """.format(self.name) for EnumeratedValue in self.fields: name = EnumeratedValue.Name value = EnumeratedValue.Value code += " {} = {}\n".format(name, value) return code class EnumeratedValue(object): def __init__(self, name, value): if name == "None": name = "None_" name = name.replace(" ", "") self.Name = name self.Value = value class Struct(object): def __init__(self, name): self.name = name self.fields = [] self.typeid = None def get_code(self): code = """ class {0}(object): ''' {0} structure autogenerated from xml ''' """.format(self.name) code += " ua_types = [\n" for field in self.fields: prefix = "ListOf" if field.array else "" uatype = prefix + field.uatype if uatype == "ListOfChar": uatype = "String" code += " ('{}', '{}'),\n".format(field.name, uatype) code += " ]" code += """ def __init__(self): """ if not self.fields: code += " pass" for field in self.fields: code += " self.{} = {}\n".format(field.name, field.value) return code class Field(object): def __init__(self, name): self.name = name self.uatype = None self.value = None self.array = False class StructGenerator(object): def __init__(self): self.model = [] def make_model_from_string(self, xml): obj = objectify.fromstring(xml) self._make_model(obj) def make_model_from_file(self, path): obj = objectify.parse(path) root = obj.getroot() self._make_model(root) def _make_model(self, root): enums = {} for child in root.iter("{*}EnumeratedType"): intenum = EnumType(child.get("Name")) for xmlfield in child.iter("{*}EnumeratedValue"): name = xmlfield.get("Name") value = xmlfield.get("Value") enumvalue = EnumeratedValue(name, value) intenum.fields.append(enumvalue) enums[child.get("Name")] = value self.model.append(intenum) for child in root.iter("{*}StructuredType"): struct = Struct(child.get("Name")) array = False for xmlfield in child.iter("{*}Field"): name = xmlfield.get("Name") if name.startswith("NoOf"): array = True continue field = Field(_clean_name(name)) field.uatype = xmlfield.get("TypeName") if ":" in field.uatype: field.uatype = field.uatype.split(":")[1] field.uatype = _clean_name(field.uatype) field.value = get_default_value(field.uatype, enums) if array: field.array = True field.value = [] array = False struct.fields.append(field) self.model.append(struct) def save_to_file(self, path, register=False): _file = open(path, "wt") self._make_header(_file) for struct in self.model: _file.write(struct.get_code()) if register: _file.write(self._make_registration()) _file.close() def _make_registration(self): code = "\n\n" for struct in self.model: code += "ua.register_extension_object('{name}', ua.NodeId.from_string('{nodeid}'), {name})\n".format(name=struct.name, nodeid=struct.typeid) return code def get_python_classes(self, env=None): return _generate_python_class(self.model, env=env) def save_and_import(self, path, append_to=None): """ save the new structures to a python file which be used later import the result and return resulting classes in a dict if append_to is a dict, the classes are added to the dict """ self.save_to_file(path) name = os.path.basename(path) name = os.path.splitext(name)[0] mymodule = importlib.import_module(name) if append_to is None: result = {} else: result = append_to for struct in self.model: result[struct.name] = getattr(mymodule, struct.name) return result def _make_header(self, _file): _file.write(""" ''' THIS FILE IS AUTOGENERATED, DO NOT EDIT!!! ''' from datetime import datetime import uuid from opcua import ua """) def set_typeid(self, name, typeid): for struct in self.model: if struct.name == name: struct.typeid = typeid return def load_type_definitions(server, nodes=None): """ Download xml from given variable node defining custom structures. If no node is given, attemps to import variables from all nodes under "0:OPC Binary" the code is generated and imported on the fly. If you know the structures are not going to be modified it might be interresting to copy the generated files and include them in you code """ if nodes is None: nodes = [] for desc in server.nodes.opc_binary.get_children_descriptions(): if desc.BrowseName != ua.QualifiedName("Opc.Ua"): nodes.append(server.get_node(desc.NodeId)) structs_dict = {} generators = [] for node in nodes: xml = node.get_value() xml = xml.decode("utf-8") generator = StructGenerator() generators.append(generator) generator.make_model_from_string(xml) # generate and execute new code on the fly generator.get_python_classes(structs_dict) # same but using a file that is imported. This can be usefull for debugging library #name = node.get_browse_name().Name # Make sure structure names do not contain charaters that cannot be used in Python class file names #name = _clean_name(name) #name = "structures_" + node.get_browse_name().Name #generator.save_and_import(name + ".py", append_to=structs_dict) # register classes # every children of our node should represent a class for ndesc in node.get_children_descriptions(): ndesc_node = server.get_node(ndesc.NodeId) ref_desc_list = ndesc_node.get_references(refs=ua.ObjectIds.HasDescription, direction=ua.BrowseDirection.Inverse) if ref_desc_list: #some server put extra things here name = _clean_name(ndesc.BrowseName.Name) if not name in structs_dict: logger.warning("%s is found as child of binary definition node but is not found in xml", name) continue nodeid = ref_desc_list[0].NodeId ua.register_extension_object(name, nodeid, structs_dict[name]) # save the typeid if user want to create static file for type definitnion generator.set_typeid(name, nodeid.to_string()) for key, val in structs_dict.items(): if isinstance(val, EnumMeta) and key is not "IntEnum": setattr(ua, key, val) return generators, structs_dict def _clean_name(name): """ Remove characters that might be present in OPC UA structures but cannot be part of of Python class names """ name = re.sub(r'\W+', '_', name) name = re.sub(r'^[0-9]+', r'_\g<0>', name) return name def _generate_python_class(model, env=None): """ generate Python code and execute in a new environment return a dict of structures {name: class} Rmw: Since the code is generated on the fly, in case of error the stack trace is not available and debugging is very hard... """ if env is None: env = {} # Add the required libraries to dict if "ua" not in env: env['ua'] = ua if "datetime" not in env: env['datetime'] = datetime if "uuid" not in env: env['uuid'] = uuid if "enum" not in env: env['IntEnum'] = IntEnum # generate classes one by one and add them to dict for element in model: code = element.get_code() exec(code, env) return env def load_enums(server, env=None): """ Read enumeration data types on server and generate python Enums in ua scope for them """ model = [] nodes = server.nodes.enum_data_type.get_children() if env is None: env = ua.__dict__ for node in nodes: name = node.get_browse_name().Name try: c = _get_enum_strings(name, node) except ua.UaError as ex: try: c = _get_enum_values(name, node) except ua.UaError as ex: logger.info("Node %s, %s under DataTypes/Enumeration, does not seem to have a child called EnumString or EumValue: %s", name, node, ex) continue if not hasattr(ua, c.name): model.append(c) return _generate_python_class(model, env=env) def _get_enum_values(name, node): def_node = node.get_child("0:EnumValues") val = def_node.get_value() c = EnumType(name) c.fields = [EnumeratedValue(enumval.DisplayName.Text, enumval.Value) for enumval in val] return c def _get_enum_strings(name, node): def_node = node.get_child("0:EnumStrings") val = def_node.get_value() c = EnumType(name) c.fields = [EnumeratedValue(st.Text, idx) for idx, st in enumerate(val)] return c
lgpl-3.0
-3,811,595,053,384,737,300
30.622535
152
0.576786
false
3.880401
false
false
false
albertoriva/bioscripts
simplediff.py
1
5294
#!/usr/bin/env python import sys import csv import math def parseSlice(s): if "-" in s: parts = s.split("-") return slice(int(parts[0]) - 1, int(parts[1])) else: p = int(s) return slice(p-1, p) class SimpleDiff(): filename = None outfile = "/dev/stdout" labels = None colname1 = "avg1" colname2 = "avg2" alpha = 1.0 slice1 = None slice2 = None def process(self, f, out, header=True): nin = 0 nout = 0 na = self.slice1.stop - self.slice1.start nb = self.slice2.stop - self.slice2.start if header: f.readline() c = csv.reader(f, delimiter='\t') for line in c: nin += 1 data1 = line[self.slice1] data2 = line[self.slice2] data1 = [ float(v) for v in data1 ] data2 = [ float(v) for v in data2 ] amin = min(data1) amax = max(data1) bmin = min(data2) bmax = max(data2) if amin > bmax: # A over B r1 = amax - amin r2 = bmax - bmin d = self.alpha * max(r1, r2) if (amin - bmax) > d: avg1 = sum(data1) / na avg2 = sum(data2) / nb if avg1 > 0 and avg2 > 0: out.write("{}\t{}\t{}\t{}\n".format(line[0], avg1, avg2, math.log(avg1/avg2, 2.0))) nout += 1 elif bmin > amax: # B over A r1 = amax - amin r2 = bmax - bmin d = self.alpha * max(r1, r2) if (bmin - amax) > d: avg1 = sum(data1) / na avg2 = sum(data2) / nb if avg1 > 0 and avg2 > 0: out.write("{}\t{}\t{}\t{}\n".format(line[0], avg1, avg2, math.log(avg1/avg2, 2.0))) nout += 1 return (nin, nout) def parseArgs(self, args): prev = "" if "-h" in args or "--help" in args: return self.usage() for a in args: if prev == "-a": self.alpha = float(a) prev = "" elif prev == "-o": self.outfile = a prev = "" elif prev == "-l": self.labels = parseSlice(a) prev = "" elif prev == "-c1": self.colname1 = a prev = "" elif prev == "-c2": self.colname2 = a prev = "" elif a in ["-a", "-o", "-l", "-c1", "-c2"]: prev = a elif self.filename is None: self.filename = a elif self.slice1 is None: self.slice1 = parseSlice(a) elif self.slice2 is None: self.slice2 = parseSlice(a) if (self.filename and self.slice1 and self.slice2): return True else: return self.usage() def usage(self): sys.stdout.write("""Usage: simplediff.py [options] exprfile slice1 slice2 This program performs "simple" differential analysis on gene expression data. `exprfile' should be a file containing gene expression values with genes on the rows and samples in the columns. `slice1' and `slice2' should be expressions of the form P-Q indicating which columns contain the data for the two conditions being compared (e.g., if the first condition is represented by three columns starting at column 5, use 5-7). Options: -a A | Set the alpha parameter to A (see below). Default: {}. -o O | Write output to file O. -c1 C | Set label for average of condition 1 values to C. Default: {}. -c1 C | Set label for average of condition 2 values to C. Default: {}. A gene is considered to be differentially expressed between two groups of samples (A and B) if the two following conditions hold: * The two sets of expression values are totally separated, ie: the minimum expression values for the samples in A is larger than the maximum in B -OR- the minimum expression values for the samples in B is larger than the maximum in A * The distance between the two sets of values (the difference between the maximum of the "lower" one and the minimum of the "upper" one) is larger than the largest of the two ranges of values in A and B, multiplied by the alpha parameter. Example: A = {{10, 12, 16}} B = {{20, 21, 22}} The two sets are separated, because min(B) > max(A). The distance between the two sets is 4 (20-16), range(A) = 6, range(B) = 2. If alpha is set to 1.0 (the default) then this gene would NOT be considered significantly different, because the largest range is 6, and 6 * alpha > 4. If alpha was set to 0.5, the gene would be called as different. """.format(self.alpha, self.colname1, self.colname2)) def run(self): with open(self.outfile, "w") as out: with open(self.filename, "r") as f: (nin, nout) = self.process(f, out) sys.stderr.write("{} in, {} out\n".format(nin, nout)) if __name__ == "__main__": SD = SimpleDiff() if SD.parseArgs(sys.argv[1:]): SD.run()
gpl-3.0
6,085,323,893,334,041,000
34.293333
107
0.525123
false
3.696927
false
false
false
bjodah/chempy
benchmarks/benchmarks/equilibria.py
1
1079
import numpy as np from chempy.tests.ammonical_cupric_solution import get_ammonical_cupric_eqsys class TimeEqsys: def setup(self): self.eqsys, self.c0 = get_ammonical_cupric_eqsys() def time_roots(self): x, new_inits, success = self.eqsys.roots(self.c0, np.logspace(-3, 0, 50), "NH3") assert all(success) def time_roots_symengine(self): from symengine import Lambdify x, new_inits, success = self.eqsys.roots( self.c0, np.logspace(-3, 0, 50), "NH3", lambdify=Lambdify, lambdify_unpack=False, ) assert all(success) def time_roots_no_propagate(self): x, new_inits, success = self.eqsys.roots( self.c0, np.logspace(-3, 0, 50), "NH3", propagate=False ) assert all(success) if __name__ == "__main__": import time te = TimeEqsys() te.setup() # t1 = time.time() # te.time_roots_symengine() # print(time.time()-t1) t1 = time.time() te.time_roots() print(time.time() - t1)
bsd-2-clause
-5,027,225,692,750,565,000
23.522727
88
0.570899
false
3.074074
false
false
false
hgamboa/novainstrumentation
novainstrumentation/peakdelta.py
1
2443
# -*- coding: utf-8 -*- """ Created on Wed Mar 20 16:20:03 2013 @author: utilizador """ import sys from numpy import NaN, Inf, arange, isscalar, array, asarray ############################################################################## ########################### Peaks Detection ################################## ############################################################################## def peakdelta(v, delta, x=None): """ Returns two arrays function [maxtab, mintab]=peakdelta(v, delta, x) %PEAKDET Detect peaks in a vector % [MAXTAB, MINTAB] = peakdelta(V, DELTA) finds the local % maxima and minima ("peaks") in the vector V. % MAXTAB and MINTAB consists of two columns. Column 1 % contains indices in V, and column 2 the found values. % % With [MAXTAB, MINTAB] = peakdelta(V, DELTA, X) the indices % in MAXTAB and MINTAB are replaced with the corresponding % X-values. % % A point is considered a maximum peak if it has the maximal % value, and was preceded (to the left) by a value lower by % DELTA. % Eli Billauer, 3.4.05 (Explicitly not copyrighted). % This function is released to the public domain; Any use is allowed. """ maxtab = [] mintab = [] if x is None: x = arange(len(v)) v = asarray(v) if len(v) != len(x): sys.exit('Input vectors v and x must have same length') if not isscalar(delta): sys.exit('Input argument delta must be a scalar') if delta <= 0: sys.exit('Input argument delta must be positive') mn, mx = Inf, -Inf mnpos, mxpos = NaN, NaN lookformax = True for i in arange(len(v)): this = v[i] if this > mx: mx = this mxpos = x[i] if this < mn: mn = this mnpos = x[i] if lookformax: if this < mx - delta: maxtab.append((mxpos, mx)) mn = this mnpos = x[i] lookformax = False else: if this > mn + delta: mintab.append((mnpos, mn)) mx = this mxpos = x[i] lookformax = True return array(maxtab), array(mintab)
mit
-1,012,318,517,677,826,700
27.792683
80
0.462137
false
4.018092
false
false
false
702nADOS/sumo
tools/xml/xml2csv.py
1
10954
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @file xml2csv.py @author Jakob Erdmann @author Michael Behrisch @author Laura Bieker @date 2013-12-08 @version $Id: xml2csv.py 22608 2017-01-17 06:28:54Z behrisch $ Convert hierarchical xml files to csv. This only makes sense if the hierarchy has low depth. SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/ Copyright (C) 2013-2017 DLR (http://www.dlr.de/) and contributors This file is part of SUMO. SUMO is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. """ from __future__ import print_function from __future__ import absolute_import import os import sys import socket import collections from optparse import OptionParser import xml.sax try: import lxml.etree import lxml.sax haveLxml = True except ImportError: haveLxml = False import xsd PY3 = sys.version_info > (3,) class NestingHandler(xml.sax.handler.ContentHandler): """A handler which knows the current nesting of tags""" def __init__(self): self.tagstack = [] def startElement(self, name, attrs): self.tagstack.append(name) def endElement(self, name): self.tagstack.pop() def depth(self): # do not count the root element return len(self.tagstack) - 1 class AttrFinder(NestingHandler): def __init__(self, xsdFile, source, split): NestingHandler.__init__(self) self.tagDepths = {} # tag -> depth of appearance self.tagAttrs = collections.defaultdict( collections.OrderedDict) # tag -> set of attrs self.renamedAttrs = {} # (name, attr) -> renamedAttr self.attrs = {} self.depthTags = {} # child of root: depth of appearance -> tag list self.rootDepth = 1 if split else 0 if xsdFile: self.xsdStruc = xsd.XsdStructure(xsdFile) if split: for ele in self.xsdStruc.root.children: self.attrs[ele.name] = [] self.depthTags[ele.name] = [[]] self.recursiveAttrFind(ele, ele, 1) else: self.attrs[self.xsdStruc.root.name] = [] self.depthTags[self.xsdStruc.root.name] = [] self.recursiveAttrFind( self.xsdStruc.root, self.xsdStruc.root, 0) else: self.xsdStruc = None xml.sax.parse(source, self) def addElement(self, root, name, depth): if name not in self.tagDepths: if len(self.depthTags[root]) == depth: self.tagDepths[name] = depth self.depthTags[root].append([]) self.depthTags[root][depth].append(name) return True if name not in self.depthTags[root][depth]: print("Ignoring tag %s at depth %s" % (name, depth), file=sys.stderr) return False def recursiveAttrFind(self, root, currEle, depth): if not self.addElement(root.name, currEle.name, depth): return for a in currEle.attributes: if ":" not in a.name: # no namespace support yet self.tagAttrs[currEle.name][a.name] = a anew = "%s_%s" % (currEle.name, a.name) self.renamedAttrs[(currEle.name, a.name)] = anew attrList = self.attrs[root.name] if anew in attrList: del attrList[attrList.index(anew)] attrList.append(anew) for ele in currEle.children: self.recursiveAttrFind(root, ele, depth + 1) def startElement(self, name, attrs): NestingHandler.startElement(self, name, attrs) if self.depth() >= self.rootDepth: root = self.tagstack[self.rootDepth] if self.depth() == self.rootDepth and root not in self.attrs: self.attrs[root] = [] self.depthTags[root] = [[]] * self.rootDepth if not self.addElement(root, name, self.depth()): return # collect attributes for a in sorted(list(attrs.keys())): if a not in self.tagAttrs[name] and ":" not in a: self.tagAttrs[name][a] = xsd.XmlAttribute(a) if not (name, a) in self.renamedAttrs: anew = "%s_%s" % (name, a) self.renamedAttrs[(name, a)] = anew self.attrs[root].append(anew) class CSVWriter(NestingHandler): def __init__(self, attrFinder, options): NestingHandler.__init__(self) self.attrFinder = attrFinder self.options = options self.currentValues = collections.defaultdict(lambda: "") self.haveUnsavedValues = False self.outfiles = {} self.rootDepth = 1 if options.split else 0 for root in sorted(attrFinder.depthTags): if len(attrFinder.depthTags) == 1: if not options.output: options.output = os.path.splitext(options.source)[0] if not options.output.isdigit() and not options.output.endswith(".csv"): options.output += ".csv" self.outfiles[root] = getOutStream(options.output) else: if options.output: outfilename = options.output + "%s.csv" % root else: outfilename = os.path.splitext( options.source)[0] + "%s.csv" % root self.outfiles[root] = open(outfilename, 'w') if (PY3): self.outfiles[root].write(str.encode( options.separator.join(map(self.quote, attrFinder.attrs[root])) + "\n")) else: self.outfiles[root].write( options.separator.join(map(self.quote, attrFinder.attrs[root])) + "\n") def quote(self, s): return "%s%s%s" % (self.options.quotechar, s, self.options.quotechar) # the following two are needed for the lxml saxify to work def startElementNS(self, name, qname, attrs): self.startElement(qname, attrs) def endElementNS(self, name, qname): self.endElement(qname) def startElement(self, name, attrs): NestingHandler.startElement(self, name, attrs) if self.depth() >= self.rootDepth: root = self.tagstack[self.rootDepth] # print("start", name, root, self.depth(), self.attrFinder.depthTags[root][self.depth()]) if name in self.attrFinder.depthTags[root][self.depth()]: for a, v in attrs.items(): if isinstance(a, tuple): a = a[1] # print(a, dict(self.attrFinder.tagAttrs[name])) if a in self.attrFinder.tagAttrs[name]: if self.attrFinder.xsdStruc: enum = self.attrFinder.xsdStruc.getEnumeration( self.attrFinder.tagAttrs[name][a].type) if enum: v = enum.index(v) a2 = self.attrFinder.renamedAttrs.get((name, a), a) self.currentValues[a2] = v self.haveUnsavedValues = True def endElement(self, name): if self.depth() >= self.rootDepth: root = self.tagstack[self.rootDepth] # print("end", name, root, self.depth(), self.attrFinder.depthTags[root][self.depth()], self.haveUnsavedValues) if name in self.attrFinder.depthTags[root][self.depth()]: if self.haveUnsavedValues: if(PY3): self.outfiles[root].write(str.encode(self.options.separator.join( [self.quote(self.currentValues[a]) for a in self.attrFinder.attrs[root]]) + "\n")) else: self.outfiles[root].write(self.options.separator.join( [self.quote(self.currentValues[a]) for a in self.attrFinder.attrs[root]]) + "\n") self.haveUnsavedValues = False for a in self.attrFinder.tagAttrs[name]: a2 = self.attrFinder.renamedAttrs.get((name, a), a) del self.currentValues[a2] NestingHandler.endElement(self, name) def getSocketStream(port, mode='rb'): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("localhost", port)) s.listen(1) conn, addr = s.accept() return conn.makefile(mode) def getOutStream(output): if output.isdigit(): return getSocketStream(int(output), 'wb') return open(output, 'wb') def get_options(): optParser = OptionParser( usage=os.path.basename(sys.argv[0]) + " [<options>] <input_file_or_port>") optParser.add_option("-s", "--separator", default=";", help="separating character for fields") optParser.add_option("-q", "--quotechar", default='', help="quoting character for fields") optParser.add_option("-x", "--xsd", help="xsd schema to use") optParser.add_option("-a", "--validation", action="store_true", default=False, help="enable schema validation") optParser.add_option("-p", "--split", action="store_true", default=False, help="split in different files for the first hierarchy level") optParser.add_option("-o", "--output", help="base name for output") options, args = optParser.parse_args() if len(args) != 1: optParser.print_help() sys.exit() if options.validation and not haveLxml: print("lxml not available, skipping validation", file=sys.stderr) options.validation = False if args[0].isdigit(): if not options.xsd: print("a schema is mandatory for stream parsing", file=sys.stderr) sys.exit() options.source = getSocketStream(int(args[0])) else: options.source = args[0] if options.output and options.output.isdigit() and options.split: print( "it is not possible to use splitting together with stream output", file=sys.stderr) sys.exit() return options def main(): options = get_options() # get attributes attrFinder = AttrFinder(options.xsd, options.source, options.split) # write csv handler = CSVWriter(attrFinder, options) if options.validation: schema = lxml.etree.XMLSchema(file=options.xsd) parser = lxml.etree.XMLParser(schema=schema) tree = lxml.etree.parse(options.source, parser) lxml.sax.saxify(tree, handler) else: xml.sax.parse(options.source, handler) if __name__ == "__main__": main()
gpl-3.0
-2,313,956,629,142,083,600
38.545126
122
0.576228
false
3.919141
false
false
false
joostvdg/jenkins-job-builder
jenkins_jobs/cli/subcommand/base.py
1
2294
#!/usr/bin/env python # Copyright (C) 2015 Wayne Warren # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class BaseSubCommand(object): """Base class for Jenkins Job Builder subcommands, intended to allow subcommands to be loaded as stevedore extensions by third party users. """ def __init__(self): pass @abc.abstractmethod def parse_args(self, subparsers, recursive_parser): """Define subcommand arguments. :param subparsers A sub parser object. Implementations of this method should create a new subcommand parser by calling parser = subparsers.add_parser('command-name', ...) This will return a new ArgumentParser object; all other arguments to this method will be passed to the argparse.ArgumentParser constructor for the returned object. """ @abc.abstractmethod def execute(self, config): """Execute subcommand behavior. :param config JJBConfig object containing final configuration from config files, command line arguments, and environment variables. """ @staticmethod def parse_option_recursive_exclude(parser): """Add '--recursive' and '--exclude' arguments to given parser. """ parser.add_argument( '-r', '--recursive', action='store_true', dest='recursive', default=False, help='''look for yaml files recursively''') parser.add_argument( '-x', '--exclude', dest='exclude', action='append', default=[], help='''paths to exclude when using recursive search, uses standard globbing.''')
apache-2.0
1,903,094,280,295,874,600
33.238806
79
0.649085
false
4.769231
false
false
false
NNTin/Reply-Dota-2-Reddit
displayreddit/drmatch.py
1
5099
from steamapi.getproplayerlist import proPlayerDictionary from steamapi.getheroes import heroDictionary from steamapi.getgameitems import itemDictionary from converter import timeconverter, playerconverter def displayResult(matchJson, playerSummariesJson): introTemplate = '####&#009;\n#####&#009; ' \ 'Hover to view match ID: {matchid} [DB](http://www.dotabuff.com/matches/{matchid})/' \ '[OD](https://www.opendota.com/matches/{matchid})/' \ '[STRATZ](https://stratz.com/match/{matchid})' \ '\n######&#009;\n\n' \ '[**{teamwinner} wins {winnerkills}-{loserkills} @ {time}**](#lumbdi "{additionalinformation}")\n\n' tableTemplate = 'Lvl | Hero | Player| K/D/A | LH/D | XPM | GPM | HD | HH | TD\n' \ ':--|:--:|:--|:--|:--|:--|:--|:--|:--|:--\n' tableLineTemplate = '{level}|{hero}|{account}|{kda}|{lhd}|{xpm}|{gpm}|{hd}|{hh}|{td}\n' dividerTemplate = '{level}||↑Radiant↑ ↓Dire↓ |{kda}|{lhd}|{xpm}|{gpm}|{hd}|{hh}|{td}\n' outtro = '\n\n---\n\n' #print(introTemplate + tableTemplate + tableLineTemplate + outtroTemplate) matchID = matchJson['result']["match_id"] if matchJson['result']['radiant_win']: teamwinner = 'Radiant' winnerkills = matchJson['result']["radiant_score"] loserkills = matchJson['result']["dire_score"] else: teamwinner = 'Dire' winnerkills = matchJson['result']["dire_score"] loserkills = matchJson['result']["radiant_score"] time = timeconverter.durationTimeConverter(matchJson['result']["duration"]) #TODO: Provide additional information if match is tournament matchDate = timeconverter.unixTimeConverter(matchJson['result']["start_time"]) firstBloodTime = timeconverter.durationTimeConverter(matchJson['result']["first_blood_time"]) additionalInformation = 'Match ID: %s, match date: %s, first blood time: %s' %(matchID,matchDate,firstBloodTime) intro = introTemplate.format(matchid=matchID, teamwinner=teamwinner, winnerkills=winnerkills, loserkills=loserkills, time=time, additionalinformation=additionalInformation) radiantTable = '' direTable = '' teamStats = [{'level': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'lasthits': 0, 'denies': 0, 'xpm': 0, 'gpm': 0, 'hd': 0, 'hh': 0, 'td': 0 }, {'level': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'lasthits': 0, 'denies': 0, 'xpm': 0, 'gpm': 0, 'hd': 0, 'hh': 0, 'td': 0 }] for player in matchJson['result']['players']: #level, hero, player, kda, lh d, xpm, gpm, hd, hh, td stats = {'level': player['level'], 'kills': player['kills'], 'deaths': player['deaths'], 'assists': player['assists'], 'lasthits': player['last_hits'], 'denies': player['denies'], 'xpm': player["xp_per_min"], 'gpm': player["gold_per_min"], 'hd': player["hero_damage"], 'hh': player["hero_healing"], 'td': player["tower_damage"]} hero = '[](/hero-%s)' %heroDictionary[player['hero_id']] account = playerconverter.playerConverter(player['account_id'], playerSummariesJson) kda = '%s/%s/%s' %(stats['kills'],stats['deaths'], stats['assists']) lhd = '%s/%s' %(player['last_hits'], player['denies']) if player['player_slot'] < 127: #<127 -> Radiant radiantTable += tableLineTemplate.format(level=stats['level'], hero=hero, account=account, kda=kda, lhd=lhd, xpm=stats['xpm'], gpm=stats['gpm'], hd=stats['hd'], hh=stats['hh'], td=stats['td']) for stat in stats: teamStats[0][stat] += stats[stat] else: direTable += tableLineTemplate.format(level=stats['level'], hero=hero, account=account, kda=kda, lhd=lhd, xpm=stats['xpm'], gpm=stats['gpm'], hd=stats['hd'], hh=stats['hh'], td=stats['td']) for stat in stats: teamStats[1][stat] += stats[stat] teamStatsDict = {} for i in range(0, len(teamStats)): teamStats[i]['kda'] = '%s/%s/%s' %(teamStats[i]['kills'], teamStats[i]['deaths'], teamStats[i]['assists']) teamStats[i]['lhd'] = '%s/%s' %(teamStats[i]['lasthits'], teamStats[i]['denies']) teamStats[i].pop('kills') teamStats[i].pop('deaths') teamStats[i].pop('assists') teamStats[i].pop('lasthits') teamStats[i].pop('denies') for stat in teamStats[i]: teamStatsDict[stat] = teamStatsDict.get(stat, '') + ' ' + str(teamStats[i][stat]) divider = dividerTemplate.format(level=teamStatsDict['level'], kda=teamStatsDict['kda'], lhd=teamStatsDict['lhd'], xpm=teamStatsDict['xpm'], gpm=teamStatsDict['gpm'], hd=teamStatsDict['hd'], hh=teamStatsDict['hh'], td=teamStatsDict['td']) table = tableTemplate + radiantTable + divider + direTable return intro + table + '\n\n---\n\n'
mit
-7,995,903,472,055,855,000
54.956044
176
0.574543
false
3.257198
false
false
false
UMDWeather/TheDisplayer
plugins/current/weather_underground.py
1
1027
import urllib, json import datetime as dt import logging log = logging.getLogger(__name__) ################################################################################ ## REQUIRED parameters: ################################################################################ ## data_url - e.g. "http://api.wunderground.com/api/3a5b82718926c103/conditions/q/MD/College_Park.json" ################################################################################ dataFreq = dt.timedelta(minutes=5) iconpath='http://icons.wxbug.com/i/c/j/##.gif' def readData(): '''buffer the data so that we don't do reads too often''' ## is it time to get new data? ctime = dt.datetime.now() if ctime - readData.lastRead > dataFreq: log.debug('downloading new weather data') readData.lastRead = ctime response = urllib.urlopen(data_url) readData.data = json.loads(response.read()) return readData.data readData.data = None readData.lastRead = dt.datetime.now() - dt.timedelta(days=3)
gpl-2.0
941,463,521,244,889,000
32.129032
103
0.523856
false
4.027451
false
false
false
kkaushik24/python-design-patterns
structural/bridge_pattern.py
1
1404
from abc import ABCMeta, abstractmethod class DrawingApi: def draw_circle(self, x, y, radius): pass class DrawingApi1(DrawingApi): def draw_circle(self, x, y, radius): print "Api1 ", x, y, radius class DrawingApi2(DrawingApi): def draw_circle(self, x, y, radius): print "Api1 ", x, y, radius class Shape: __metaclass__ = ABCMeta def __init__(self, drawing_api): self.drawing_api = drawing_api @abstractmethod def draw(self): pass @abstractmethod def resize_by_percentage(pct): pass class CircleShape(Shape): def __init__(self, x, y, radius, drawing_api): super(CircleShape, self).__init__(drawing_api) self.x = x self.y = y self.radius = radius def draw(self): self.drawing_api.draw_circle(self.x, self.y, self.radius) def resize_by_percentage(self, pct): self.radius = self.radius + (self.radius * pct / 100) return self.radius if __name__ == '__main__': drawing_api1 = DrawingApi1() drawing_api2 = DrawingApi2() circle_shape1 = CircleShape(1, 2, 4, drawing_api1) circle_shape2 = CircleShape(4, 8, 12, drawing_api2) circle_shape1.draw() print 'resized circle1 radius', circle_shape1.resize_by_percentage(40) circle_shape2.draw() print 'resized circle2 radius', circle_shape2.resize_by_percentage(50)
apache-2.0
922,079,697,343,518,000
22.79661
74
0.625356
false
3.334917
false
false
false
adybbroe/atrain_match
python_edit_the_code.py
1
3570
import re, glob, os ROOT_DIR = "/home/a001865/git/rename_atrainmatch/atrain_match/" files = glob.glob(ROOT_DIR + "/*.py") files = files + glob.glob(ROOT_DIR + "/*/*.py") files = files + glob.glob(ROOT_DIR + "/*/*/*.py") var_name_dict ={ "time": "profile_time", "utc_time": "profile_utc_time", #"cloud_top_profile": "layer_top_altitude", #"cloud_top_profile_pressure":"layer_top_pressure", #"cloud_base_profile": "layer_base_altitude", #"number_of_layers_found": "number_layers_found", "elevation": "dem_surface_elevation", #"igbp": "igbp_surface_type", #"nsidc": "nsidc_surface_type", "optical_depth": "feature_optical_depth_532"} for filename in files: if os.path.basename(filename) in "python_edit_the_code.py": continue print "do not edit %s"%(os.path.basename(filename)) print filename all_file="" python_file = open(filename,'r') for line in python_file: #line = line.replace("avhrr", "imager") #line = line.replace("AVHRR", "IMAGER") #line = line.replace("Avhrr", "Imager") #line = line.replace("nnImager", "nnAvhrr") #line = line.replace("nnavhrr", "nnimager") #line = line.replace("NN-IMAGER", "NN-AVHRR") line = line.replace("cloudsat_calipso_imager", "truth_imager") #if "_amsr" not in line: # line = line.replace("amsr_imager", "match_util") line = line.replace("match_match_util", "match_amsr_imager") #line = re.sub(r"\.elevation", '.DEM_surface_elevation',line) #if re.search("alipso\.elevation",line) and 1==2: # line = line.rstrip() # line = re.sub(r"alipso\.elevation", # 'alipso.dem_surface_elevation',line) # line = re.sub(r"alipsoObj\.elevation", # 'alipsoObj.dem_surface_elevation',line) # # line = line + "\n" # #line = re.sub(r"nsidc", 'nsidc_surface_type',line) #line = re.sub(r"igbp", 'igbp_surface_type',line) #line = re.sub(r"number_of_layers_found", 'number_layers_found',line) #line = re.sub(r"cloud_top_profile_pressure", # 'layer_top_pressure',line) #line = re.sub(r"cloud_base_profile", # 'layer_base_altitude',line) #line = re.sub(r"cloud_top_profile", # 'layer_top_altitude',line) #line = re.sub(r"\.optical_depth", # '.feature_optical_depth_532',line) #line = re.sub(r"\"optical_depth", # '"feature_optical_depth_532',line) #line = re.sub(r"\'optical_depth", # '\'feature_optical_depth_532',line) #line = re.sub(r"utc_time", # 'profile_utc_time',line) #line = re.sub(r"time_tai", # 'profile_time_tai',line) line = re.sub(r"feature_optical_depth_532_top_layer5km", 'feature_optical_depth_532_top_layer_5km',line) """Maybe not do this!! line = re.sub(r"alipso\.time", 'alipso.profile_time',line) line = re.sub(r"cal\.time", 'cal.profile_time',line) line = re.sub(r"alipsoObj\.time", 'alipsoObj.profile_time',line) """ all_file += line python_file.close() python_file = open(filename,'w') python_file.write(all_file)
gpl-3.0
4,090,333,391,999,040,500
41
77
0.527451
false
3.198925
false
false
false
jeromecc/doctoctocbot
src/moderation/twitter/user.py
1
2364
from moderation.models import SocialUser from bot.tweepy_api import get_api as get_tweepy_api from tweepy import TweepError from tweepy.models import User as TweepyUser from community.models import Community import logging from community.helpers import get_community_twitter_tweepy_api import time logger = logging.getLogger(__name__) class TwitterUser: def __init__(self, userid=None, socialuser=None): try: if userid and socialuser: if socialuser.user_id != userid: raise ValueError("userid and socialuser.user_id mismatch!") self.id = userid self.socialuser = socialuser elif userid and not socialuser: try: su = SocialUser.objects.get(user_id=userid) self.id=userid self.socialuser=su except SocialUser.DoesNotExist: pass elif not userid and socialuser: self.id = socialuser.user_id self.socialuser = socialuser except ValueError: pass def __str__(self): return f"TwitterUser id: {self.id}" def is_protected(self): try: protected = self.socialuser.profile.json.get("protected") except AttributeError: protected = None return protected def friend(self, community): if not isinstance(community, Community): logger.error( f"Given parameter {community} is not a Community object" ) return api = get_community_twitter_tweepy_api( community = community, backend=True ) try: tweepy_user = api.create_friendship(user_id=self.id) logger.debug(tweepy_user) if isinstance(tweepy_user, TweepyUser): return True except TweepError: return False def decline_follow_request(self, community): api = get_community_twitter_tweepy_api( community = community, backend=True ) resp = api.create_block(user_id=self.id) logger.debug(resp) time.sleep(1) resp = api.destroy_block(user_id=self.id) logger.debug(resp)
mpl-2.0
6,726,107,315,724,089,000
30.118421
79
0.565144
false
4.402235
false
false
false
bugzPDX/airmozilla
airmozilla/manage/autocompeter.py
1
5110
import datetime import json import time import sys from pprint import pprint import requests from django.conf import settings from django.utils import timezone from django.db.models import Count from django.core.exceptions import ImproperlyConfigured from funfactory.urlresolvers import reverse from airmozilla.main.models import Event, EventHitStats def _get_url(): return getattr( settings, 'AUTOCOMPETER_URL', 'https://autocompeter.com/v1' ) def update( verbose=False, all=False, flush_first=False, max_=1000, since=datetime.timedelta(minutes=60), out=sys.stdout, ): if not getattr(settings, 'AUTOCOMPETER_KEY', None): if verbose: # pragma: no cover print >>out, "Unable to submit titles to autocompeter.com" print >>out, "No settings.AUTOCOMPETER_KEY set up" return autocompeter_url = _get_url() if flush_first: assert all, "must be all if you're flushing" t0 = time.time() response = requests.delete( autocompeter_url + '/flush', headers={ 'Auth-Key': settings.AUTOCOMPETER_KEY, }, verify=not settings.DEBUG ) t1 = time.time() if verbose: # pragma: no cover print >>out, response print >>out, "Took", t1 - t0, "seconds to flush" assert response.status_code == 204, response.status_code now = timezone.now() if all: hits_map = dict( EventHitStats.objects.all().values_list('event', 'total_hits') ) values = hits_map.values() if values: median_hits = sorted(values)[len(values) / 2] else: median_hits = 0 events = Event.objects.approved() else: events = ( Event.objects.approved() .filter(modified__gte=now-since)[:max_] ) if events: # there are events, we'll need a hits_map and a median hits_map = dict( EventHitStats.objects.filter(event__in=events) .values_list('event', 'total_hits') ) values = ( EventHitStats.objects.all() .values_list('total_hits', flat=True) ) if values: median_hits = sorted(values)[len(values) / 2] else: median_hits = 0 title_counts = {} # Only bother to set this up if there are events to loop over. # Oftentimes the cronjob will trigger here with no new recently changed # events and then the loop below ('for event in events:') will do nothing. if events: grouped_by_title = ( Event.objects.approved().values('title').annotate(Count('title')) ) for each in grouped_by_title: title_counts[each['title']] = each['title__count'] documents = [] for event in events: url = reverse('main:event', args=(event.slug,)) title = event.title if event.start_time > now: # future events can be important too popularity = median_hits else: hits = hits_map.get(event.id, 0) popularity = hits if event.privacy == Event.PRIVACY_PUBLIC: group = '' else: group = event.privacy if title_counts[title] > 1: title = '%s %s' % (title, event.start_time.strftime('%d %b %Y')) documents.append({ 'title': title, 'url': url, 'popularity': popularity, 'group': group, }) if verbose: # pragma: no cover pprint(documents, stream=out) if not documents: if verbose: # pragma: no cover print >>out, "No documents." return t0 = time.time() response = requests.post( autocompeter_url + '/bulk', data=json.dumps({'documents': documents}), headers={ 'Auth-Key': settings.AUTOCOMPETER_KEY, }, verify=not settings.DEBUG ) t1 = time.time() assert response.status_code == 201, response.status_code if verbose: # pragma: no cover print >>out, response print >>out, "Took", t1 - t0, "seconds to bulk submit" def stats(): if not getattr(settings, 'AUTOCOMPETER_KEY', None): raise ImproperlyConfigured("No settings.AUTOCOMPETER_KEY set up") autocompeter_url = _get_url() response = requests.get( autocompeter_url + '/stats', headers={ 'Auth-Key': settings.AUTOCOMPETER_KEY, }, verify=not settings.DEBUG ) assert response.status_code == 200, response.status_code return response.json() def test(term, domain=None): autocompeter_url = _get_url() response = requests.get( autocompeter_url, params={ 'd': domain or settings.AUTOCOMPETER_DOMAIN, 'q': term, }, verify=not settings.DEBUG ) assert response.status_code == 200, response.status_code return response.json()
bsd-3-clause
-4,856,101,207,579,082,000
28.709302
78
0.567515
false
3.995309
false
false
false
ufal/neuralmonkey
neuralmonkey/evaluators/chrf.py
1
3455
from typing import List, Dict from typeguard import check_argument_types import numpy as np from neuralmonkey.evaluators.evaluator import Evaluator # pylint: disable=invalid-name NGramDicts = List[Dict[str, int]] # pylint: enable=invalid-name class ChrFEvaluator(Evaluator[List[str]]): """Compute ChrF score. See http://www.statmt.org/wmt15/pdf/WMT49.pdf """ def __init__(self, n: int = 6, beta: float = 1.0, ignored_symbols: List[str] = None, name: str = None) -> None: check_argument_types() if name is None: name = "ChrF-{}".format(beta) super().__init__(name) self.n = n self.beta_2 = beta**2 self.ignored = [] # type: List[str] if ignored_symbols is not None: self.ignored = ignored_symbols def score_instance(self, hypothesis: List[str], reference: List[str]) -> float: hyp_joined = " ".join(hypothesis) hyp_chars = [x for x in list(hyp_joined) if x not in self.ignored] hyp_ngrams = _get_ngrams(hyp_chars, self.n) ref_joined = " ".join(reference) ref_chars = [x for x in list(ref_joined) if x not in self.ignored] ref_ngrams = _get_ngrams(ref_chars, self.n) if not hyp_chars or not ref_chars: if "".join(hyp_chars) == "".join(ref_chars): return 1.0 return 0.0 precision = self.chr_p(hyp_ngrams, ref_ngrams) recall = self.chr_r(hyp_ngrams, ref_ngrams) if precision == 0.0 and recall == 0.0: return 0.0 return ((1 + self.beta_2) * (precision * recall) / ((self.beta_2 * precision) + recall)) def chr_r(self, hyp_ngrams: NGramDicts, ref_ngrams: NGramDicts) -> float: count_all = np.zeros(self.n) count_matched = np.zeros(self.n) for m in range(1, self.n + 1): for ngr in ref_ngrams[m - 1]: ref_count = ref_ngrams[m - 1][ngr] count_all[m - 1] += ref_count if ngr in hyp_ngrams[m - 1]: count_matched[m - 1] += min( ref_count, hyp_ngrams[m - 1][ngr]) return np.mean(np.divide( count_matched, count_all, out=np.ones_like(count_all), where=(count_all != 0))) def chr_p(self, hyp_ngrams: NGramDicts, ref_ngrams: NGramDicts) -> float: count_all = np.zeros(self.n) count_matched = np.zeros(self.n) for m in range(1, self.n + 1): for ngr in hyp_ngrams[m - 1]: hyp_count = hyp_ngrams[m - 1][ngr] count_all[m - 1] += hyp_count if ngr in ref_ngrams[m - 1]: count_matched[m - 1] += min( hyp_count, ref_ngrams[m - 1][ngr]) return np.mean(np.divide( count_matched, count_all, out=np.ones_like(count_all), where=(count_all != 0))) def _get_ngrams(tokens: List[str], n: int) -> NGramDicts: ngr_dicts = [] for m in range(1, n + 1): ngr_dict = {} # type: Dict[str, int] for i in range(m, len(tokens) + 1): ngr = "".join(tokens[i - m:i]) ngr_dict[ngr] = ngr_dict.setdefault(ngr, 0) + 1 ngr_dicts.append(ngr_dict) return ngr_dicts # pylint: disable=invalid-name ChrF3 = ChrFEvaluator(beta=3)
bsd-3-clause
5,065,427,443,965,041,000
33.207921
77
0.529957
false
3.250235
false
false
false
UMONS-GFA/bdas
doc/sensors/sim_pluvio.py
1
18407
__author__ = 'kaufmanno' import numpy as np from scipy.interpolate import pchip_interpolate, interp1d import matplotlib.pyplot as plt draw_graphs = True #draw_graphs = False load_calibration = True save_calibration = False calibration_file = 'calibration.txt' single_flow = True # a varying flow otherwise a series of flows # if not single_flow : min_flow = 1.0 # l/h max_flow = 10.0 # l/h flow_step = 0.1 # l/h def schmitt_trigger(ts, low, high, threshold): filtered = [] fd = [] is_high = False is_low = False state = np.NaN for i in ts: d = 0 if i < low: is_low = True state = 0 elif i > high: is_high = True state = 1 if is_low and i > threshold: is_low = False state = 1 d = 1 elif is_high and i < threshold: is_high = False state = 0 d = 0 filtered.append(state) fd.append(d) return filtered, fd def comb_to_linapprox(comb): sawtooth = np.zeros_like(comb, 'float64') slope = np.zeros_like(comb, 'float64') i = 0 start_tooth = i while i < len(comb): stop_tooth = i if comb[i] == 0: i += 1 else: sawtooth[start_tooth:stop_tooth+1] = sawtooth[start_tooth:start_tooth+1]*np.ones(stop_tooth - start_tooth + 1) + np.linspace(0.0, 1.0, stop_tooth - start_tooth + 1) slope[start_tooth:stop_tooth+1] = 1.0/(stop_tooth - start_tooth) start_tooth = i i += 1 return sawtooth, slope def get_inflow(t, inflow_mean, inflow_variation, inflow_var_period, inflow_var_phase, inflow_random, random=False): if random: inflow = inflow_mean + inflow_variation*np.sin(2*np.pi*t/inflow_var_period+inflow_var_phase) + np.random.normal(0.0, inflow_random, 1)[0] else: inflow = inflow_mean + inflow_variation*np.sin(2*np.pi*t/inflow_var_period+inflow_var_phase) return inflow if __name__ == '__main__': inflow = [] estimated_inflow = [] if single_flow: flow_range = [min_flow] else: flow_range = np.arange(min_flow, max_flow, flow_step) for tk_inflow_mean in flow_range: # General constants g = 9810 # [mm/s²] eps0 = 8.85E-12 # void electric permittivity epsr_teflon = 2.1 # Tank parameters tk_overflow_height = 3.1 # height above tube in tank [mm] tk_tube_height = 4.05 # height of the tube above the bottom of the tank [mm] tk_tube_diameter = 3.5 # hole diameter [mm] tk_tank_diameter = 80 # tank diameter [mm] # Siphon gauge parameters sg_siphon_height = 70.4 # height between bottom and top of siphon [mm] sg_tube_diameter = 80.0 # siphon gauge tank diameter [mm] sg_siphon_diameter = 6.0 # siphon tube diameter [mm] sg_siphon_length = 300.0 # siphon tube length for outflow [mm] sg_desiphoning_level = 1.5 # water level at which siphon stops to be active when level drops in the gauge [mm] sg_residual_water_height = 39.5 # height of residual water after siphoning [mm] # Sensor parameters ss_length = 150 # length of cylindrical capacitor [mm] ss_always_wet_length = tk_tube_height + sg_residual_water_height # length of cylindrical capacitor that is always wet (at the base of the upper tank and the gauge below the siphon) [mm] ss_inner_radius = 10 # inner radius of the cylinder [mm] ss_outer_radius = 10.4 # outer radius of the cylinder [mm] ss_resistance = 500000 # R2 [ohm] # Data acquisition parameters das_period = 2 # sampling period [s] # Derived tank parameters tk_tank_area = np.pi/4*tk_tank_diameter**2 - np.pi*ss_outer_radius**2 # tank area [mm²] tk_hole_area = np.pi/4*tk_tube_diameter**2 # tank area [mm²] # Derived siphon gauge parameters sg_tube_area = np.pi/4*sg_tube_diameter**2 - np.pi*ss_outer_radius**2 # tank area [mm²] # Tank starting state tk_water_level = 4.05 # level of water in tank above the hole [mm] if single_flow: tk_inflow_mean = 4.0 # mean volumetric inflow [l/h] tk_inflow_variation = 3.0 # amplitude of the inflow variation [l/h] tk_inflow_var_period = 8100.0 # period of the inflow variation [s] tk_inflow_random = 0.01 # amplitude of random component on inflow [l/h] tk_inflow_var_phase = 0.0 # phase of the inflow variation [rad] else: tk_inflow_variation = 0.0 # amplitude of the inflow variation [l/h] tk_inflow_var_period = 1.0 # period of the inflow variation [s] tk_inflow_random = 0.0 # amplitude of random component on inflow [l/h] tk_inflow_var_phase = 0.0 # phase of the inflow variation [rad] tk_outflow = 0.0 # volumetric outflow [l/h] # Siphon gauge starting state sg_water_level = 1.5 # level of water in the siphon gauge tank above the base of the siphon [mm] sg_outflow = 0.0 # volumetric outflow [l/h] sg_active = 0 # 1 when siphon is active 0 otherwise # Simulation time time_start = 0.0 # simulation starting time time_end = 36000.0 # simulation ending time time_step = .2 # [s] # Initialisation time = time_start tk_inflow = get_inflow(time, tk_inflow_mean, tk_inflow_variation, tk_inflow_var_period, tk_inflow_var_phase, tk_inflow_random, single_flow) t = [time] tk_h = [tk_water_level] tk_o = [tk_outflow] tk_i = [tk_inflow] sg_h = [sg_water_level] sg_o = [sg_outflow] sg_a = [sg_active] sg_total_outflow_volume = 0 ss_capacity = (ss_always_wet_length + sg_water_level + tk_water_level) * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius) ss_frequency = 1/(0.693*2*ss_resistance*ss_capacity) ss_counter = [ss_frequency*time_step] # Theoretical siphoning time [h] ts0 = 0.54*(sg_tube_area/100.0)*sg_siphon_length**(4/7)*sg_siphon_height**(3/7)/sg_siphon_diameter**(19/7) print('siphoning time without inflow : %4.1f s' % ts0) # Theoretical siphoning rate [l/h] sr = sg_tube_area*sg_siphon_height*3.6/1000/ts0 print('siphoning rate : %4.2f l/h' % sr) # Theoretical siphoning time with inflow ts = ts0/(1-tk_inflow_mean/sr) print('siphoning time with inflow of %4.2f l/h : %4.1f s' % (tk_inflow_mean, ts)) # sensor low and high frequencies ss_min_capacity = ss_always_wet_length * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius) ss_max_freq = 1/(0.693*2*ss_resistance*ss_min_capacity) ss_max_capacity = (ss_always_wet_length + sg_siphon_height + tk_overflow_height) * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius) ss_min_freq = 1/(0.693*2*ss_resistance*ss_max_capacity) print('sensor frequency range [%5.0f Hz - %5.0f Hz]' % (ss_min_freq, ss_max_freq)) # Simulation while time < time_end: time += time_step t.append(time) # tk update tk_net_input = time_step*(tk_inflow-tk_outflow)*1000/3.6 # net water input during time_step [mm³] tk_water_level += tk_net_input/tk_tank_area if tk_water_level > tk_overflow_height: tk_water_level = tk_overflow_height elif tk_water_level < 0.0: tk_water_level = 0.0 tk_outflow = (2*g*tk_water_level)**(1/2)*tk_hole_area*3.6/1000 # [l/h] tk_inflow = get_inflow(time, tk_inflow_mean, tk_inflow_variation, tk_inflow_var_period, tk_inflow_var_phase, tk_inflow_random, single_flow) tk_h.append(tk_water_level) tk_o.append(tk_outflow) tk_i.append(tk_inflow) # sg update sg_net_input = time_step*(tk_outflow-sg_outflow)*1000/3.6 # net water input during time_step [mm³] sg_water_level += sg_net_input/sg_tube_area if sg_water_level > sg_siphon_height: sg_active = 1 elif sg_water_level <= sg_desiphoning_level: sg_active = 0 if sg_active == 1: sg_outflow = np.pi/900*(sg_water_level/(0.000016*sg_siphon_length))**(4/7)*sg_siphon_diameter**(19/7) # [l/h] else: sg_outflow = 0.0 sg_total_outflow_volume += (sg_outflow/3600)*time_step # [l] sg_h.append(sg_water_level) sg_o.append(sg_outflow) sg_a.append(sg_active) # ss update ss_counter.append(ss_frequency*time_step) ss_capacity = (ss_always_wet_length + sg_water_level + tk_water_level) * epsr_teflon / 500 * np.pi * eps0 / np.log(ss_outer_radius / ss_inner_radius) ss_frequency = 1/(0.693*2*ss_resistance*ss_capacity) # # Simulation outputs #print('Total outflow of gauge over %4.1f s : %4.3f l' % (time_end-time_start, sg_total_outflow_volume)) if draw_graphs: sim_fig = plt.figure('Tank and siphon gauge') # Tank tk_ax1 = sim_fig.add_subplot(4, 1, 1) tk_ax1.plot(t, tk_h, '-b') tk_ax1.set_ylabel('level in \nupper tank [mm]') tk_ax2 = sim_fig.add_subplot(4, 1, 2, sharex=tk_ax1) tk_ax2.plot(t, tk_o, '-r') tk_ax2.hold('on') tk_ax2.plot(t, tk_i, '-g') tk_ax2.set_ylabel('inflow in \nupper tank and\n outflow to \nsiphon gauge [l/h]') # Siphon tk_ax3 = sim_fig.add_subplot(4, 1, 3, sharex=tk_ax1) tk_ax3.plot(t, sg_h, '-b') tk_ax3.set_ylabel('level in \nsiphon gauge [mm]') tk_ax4 = sim_fig.add_subplot(4, 1, 4, sharex=tk_ax1) tk_ax4.plot(t, sg_o, '-g') tk_ax4.hold('on') tk_ax4.plot(t, sg_a, '-k') tk_ax4.set_xlabel('time [s]') tk_ax4.set_ylabel('outflow of \nsiphon gauge [l/h]') # Data acquisition system output das_fig = plt.figure('DAS acquisition') das_ax1 = das_fig.add_subplot(5, 1, 1, sharex=tk_ax1) das_ax1.plot(t, ss_counter, '-k') das_ax1.set_ylabel('Sensor oscillations [-]') # resample oscillations to compute DAS frequencies das_t = [] das_frequencies = [] for i in range(0, len(ss_counter)-int(das_period / time_step), int(das_period / time_step)): freq = 0 for j in range(0, int(das_period / time_step)): freq += ss_counter[i+j] das_t.append(time_start+(i+j)*time_step) das_frequencies.append(freq/das_period) x, das_siphoning = schmitt_trigger(das_frequencies, 5000, 7000, 9000) das_sawtooth, das_slope = comb_to_linapprox(das_siphoning) das_volume = das_sawtooth*sg_siphon_height*sg_tube_area/1000000 das_flow = das_slope *sg_siphon_height*sg_tube_area/1000000 / (das_period/3600) if draw_graphs: das_ax2 = das_fig.add_subplot(5, 1, 2, sharex=tk_ax1) das_ax2.plot(das_t, das_frequencies, '-r') das_ax2.set_ylabel('DAS Frequencies [Hz]') das_ax3 = das_fig.add_subplot(5, 1, 3, sharex=tk_ax1) das_ax3.plot(das_t, das_siphoning, '-k') das_ax3.set_ylabel('Siphoning [0/1]') das_ax4 = das_fig.add_subplot(5, 1, 4, sharex=tk_ax1) das_ax4.plot(das_t, das_volume, '-r') das_ax4.set_xlabel('time [s]') das_ax4.set_ylabel('Volume [l]') das_ax4.hold('on') das_ax4.plot(t, np.cumsum(tk_o)/3600*time_step, '-g') das_ax5 = das_fig.add_subplot(5, 1, 5, sharex=tk_ax1) das_ax5.plot(das_t, das_flow, '-g') das_ax5.set_xlabel('time [s]') das_ax5.set_ylabel('Flow [l/h]') plt.show() print('Estimated total Volume : %d x %4.3f l = %4.3f l' %(np.sum(das_siphoning), sg_tube_area*sg_siphon_height/1000000, np.sum(das_siphoning)*sg_tube_area*sg_siphon_height/1000000)) print('________________________________________________') inflow.append(tk_inflow_mean) estimated_inflow.append(2*(das_volume[1349]-das_volume[449])) flow_error = [] for i in range(0, len(inflow)): flow_error.append(100*(inflow[i] - estimated_inflow[i])/estimated_inflow[i]) if not single_flow: err_fig = plt.figure('errors') flow_error = [] for i in range(0, len(inflow)): flow_error.append(100*(inflow[i] - estimated_inflow[i])/estimated_inflow[i]) axes = err_fig.add_subplot(2, 1, 1) axes.plot(estimated_inflow, inflow, '-b') axes.set_xlabel('estimated inflow [l/h]') axes.set_ylabel('real inflow [l/h]') plt.xlim(0.0, 15.0) plt.ylim(0.0, 15.0) plt.grid(b=True, which='major', color='k', linestyle='-') axes2 = err_fig.add_subplot(2, 1, 2, sharex=axes) axes2.plot(estimated_inflow, flow_error, '-r') axes2.set_xlabel('estimated inflow [l/h]') axes2.set_ylabel('relative error [%]') plt.xlim(0.0, 15.0) plt.ylim(0.0, 50.0) plt.grid(b=True, which='major', color='k', linestyle='-') plt.show() calibration = [] for i in range(len(flow_error)): calibration.append(str('\t'.join(list(map(str,[estimated_inflow[i],flow_error[i], '\n']))))) if save_calibration: with open(calibration_file,'w+') as cal_file: cal_file.writelines(calibration) if load_calibration: with open(calibration_file,'r') as cal_file: rows = [list(map(float, L.strip().split('\t'))) for L in cal_file] cal_estimated_inflow, cal_flow_error = [], [] for i in range(len(rows)): cal_estimated_inflow.append(rows[i][0]) cal_flow_error.append(rows[i][1]) cal_inflow, cal_error = [], [] for i in range(len(cal_estimated_inflow)-1): tmp_inflow = np.linspace(cal_estimated_inflow[i], cal_estimated_inflow[i+1], 10) tmp_error = np.linspace(cal_flow_error[i], cal_flow_error[i+1], 10) for j in range(len(tmp_error)): cal_inflow.append(tmp_inflow[j]) cal_error.append(tmp_error[j]) corr_flow = [] for i in range(len(das_flow)): for j in range(len(cal_error)): if round(das_flow[i], 1) == round(cal_inflow[j], 1): corr = cal_error[j] break else: corr = 0.0 corr_flow.append(das_flow[i]*(1.0 + corr/100)) # corr_fig = plt.figure('Corrections') # das_ax1 = corr_fig.add_subplot(1, 1, 1) # das_ax1.plot(t, tk_i, '-g', label='simulated inflow') # das_ax1.plot(das_t, das_flow, '-b',label='retrieved inflow') # das_ax1.plot(das_t, corr_flow, '-r',label='corrected retrieved inflow') # das_ax1.set_xlabel('time [s]') # das_ax1.set_ylabel('Flow [l/h]') # plt.legend() # plt.show() # alternative flow computation centered_times = [] centered_flow = [] siphoning_time = [das_t[i] for i in range(len(das_t)) if das_siphoning[i] == 1] for i in range(len(siphoning_time)-1): centered_times.append((siphoning_time[i+1]+siphoning_time[i])/2) centered_flow.append(sg_tube_area*sg_siphon_height*3.6/1000/(siphoning_time[i+1]-siphoning_time[i])) # [l/h] corr_centered_flow = [] for i in range(len(centered_flow)): for j in range(len(cal_error)): if round(centered_flow[i], 1) == round(cal_inflow[j], 1): corr = cal_error[j] break else: corr = 0.0 corr_centered_flow.append(centered_flow[i]*(1.0 + corr/100)) interpolate_corr_flow = interp1d(centered_times, corr_centered_flow,kind='cubic') interpolate_flow = interp1d(centered_times, centered_flow,kind='cubic') das_t_interpolation = np.array(das_t)[(np.array(das_t) > centered_times[0]) & (np.array(das_t)<centered_times[-1])] interpolated_flow = interpolate_flow(das_t_interpolation) interpolated_corr_flow = interpolate_corr_flow(das_t_interpolation) pchip_interpolated_flow = pchip_interpolate(centered_times, corr_centered_flow,das_t_interpolation) import matplotlib matplotlib.rcParams.update({'font.size':15}) corr_fig = plt.figure('Poster') # Siphon tk_ax3 = corr_fig.add_subplot(3, 1, 1) tk_ax3.plot(t, sg_h, '-b') tk_ax3.set_ylabel('level in \nsiphon gauge [mm]') tk_ax3.set_axis_bgcolor('0.95') tk_ax3.grid(True) # DAS frequencies das_ax2 = corr_fig.add_subplot(3, 1, 2, sharex=tk_ax3) das_ax2.plot(das_t, das_frequencies, '-r') das_ax2.set_ylabel('DAS Frequencies [Hz]') das_ax2.set_axis_bgcolor('0.95') das_ax2.grid(True) # Retrieved flows das_ax1 = corr_fig.add_subplot(3, 1, 3, sharex=tk_ax3) das_ax1.plot(t, tk_i, '-', color='grey', linewidth=3, label='simulated inflow') #das_ax1.plot(das_t, das_flow, '-b',label='retrieved inflow') #das_ax1.plot(das_t, corr_flow, '-r',label='corrected retrieved inflow') das_ax1.plot(das_t_interpolation, interpolated_flow, '-r', linewidth=2, label='retrieved inflow') das_ax1.plot(das_t_interpolation, interpolated_corr_flow, '-k', linewidth=2, label='corrected retrieved inflow') #das_ax1.plot(das_t_interpolation, pchip_interpolated_flow, '-b', label='piecwise cubic interpolated retrieved inflow') #das_ax1.plot(centered_times,centered_flow,'ok') das_ax1.set_xlabel('time [s]') das_ax1.set_ylabel('Flow [l/h]') das_ax1.set_axis_bgcolor('0.95') das_ax1.grid(True) das_ax1.legend(loc='lower right', fontsize=15) tk_ax3.set_xlim((0, 36000)) plt.show() corr_fig.savefig('/home/su530201/Images/Poster_GB2016.png', dpi=(600), bbox_inches='tight')
gpl-3.0
6,213,190,602,569,627,000
44.776119
194
0.574154
false
2.96934
false
false
false
public-ink/public-ink
server/appengine-staging/lib/graphene/types/generic.py
1
1233
from __future__ import unicode_literals from graphql.language.ast import (BooleanValue, FloatValue, IntValue, StringValue, ListValue, ObjectValue) from graphene.types.scalars import MIN_INT, MAX_INT from .scalars import Scalar class GenericScalar(Scalar): """ The `GenericScalar` scalar type represents a generic GraphQL scalar value that could be: String, Boolean, Int, Float, List or Object. """ @staticmethod def identity(value): return value serialize = identity parse_value = identity @staticmethod def parse_literal(ast): if isinstance(ast, (StringValue, BooleanValue)): return ast.value elif isinstance(ast, IntValue): num = int(ast.value) if MIN_INT <= num <= MAX_INT: return num elif isinstance(ast, FloatValue): return float(ast.value) elif isinstance(ast, ListValue): return [GenericScalar.parse_literal(value) for value in ast.values] elif isinstance(ast, ObjectValue): return {field.name.value: GenericScalar.parse_literal(field.value) for field in ast.fields} else: return None
gpl-3.0
3,297,268,475,079,431,000
30.615385
103
0.631792
false
4.419355
false
false
false
melinath/django-graph-api
django_graph_api/graphql/introspection.py
1
7781
from django_graph_api.graphql.types import ( BooleanField, CharField, Enum, ENUM, EnumField, INPUT_OBJECT, INTERFACE, List, LIST, ManyEnumField, ManyRelatedField, NON_NULL, Object, OBJECT, RelatedField, SCALAR, UNION, NonNull, ) class DirectiveLocationEnum(Enum): object_name = '__DirectiveLocation' values = ( { 'name': 'QUERY', 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': 'MUTATION', 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': 'FIELD', 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': 'FRAGMENT_DEFINITION', 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': 'FRAGMENT_SPREAD', 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': 'INLINE_FRAGMENT', 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, ) class TypeKindEnum(Enum): object_name = '__TypeKind' values = ( { 'name': SCALAR, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': OBJECT, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': INTERFACE, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': UNION, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': ENUM, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': INPUT_OBJECT, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': LIST, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, { 'name': NON_NULL, 'description': None, 'isDeprecated': False, 'deprecationReason': None, }, ) class InputValue(Object): object_name = '__InputValue' name = CharField() description = CharField() type = RelatedField(lambda: Type) defaultValue = CharField() def get_name(self): return self.data[0] def get_type(self): type_ = self.data[1] if not type_.null: return NonNull(type_) elif isinstance(type_, List): return type_ return type_.__class__ class Directive(Object): object_name = '__Directive' name = CharField() description = CharField() locations = ManyEnumField(DirectiveLocationEnum) args = ManyRelatedField(InputValue) class Field(Object): # self.data will be an item from a declared fields dict object_name = '__Field' name = CharField() description = CharField() type = RelatedField(lambda: Type) args = ManyRelatedField(InputValue) isDeprecated = BooleanField() deprecationReason = CharField() def get_name(self): return self.data[0] def get_description(self): return getattr(self.data[1], 'description', None) def get_type(self): field = self.data[1] if isinstance(field, RelatedField): type_ = field.object_type if isinstance(field.type_, List): type_ = List(type_) if not field.null: type_ = NonNull(type_) elif not field.null: type_ = NonNull(field.type_) else: type_ = field.type_ return type_ def get_args(self): return tuple(self.data[1].arguments.items()) class EnumValue(Object): object_name = '__EnumValue' name = CharField() description = CharField() isDeprecated = BooleanField() deprecationReason = CharField() class Type(Object): # self.data will be an object or scalar object_name = '__Type' kind = EnumField(TypeKindEnum) name = CharField() description = CharField() fields = ManyRelatedField(Field) inputFields = ManyRelatedField(InputValue) interfaces = ManyRelatedField('self') possibleTypes = ManyRelatedField('self') enumValues = ManyRelatedField(EnumValue) ofType = RelatedField('self') def get_name(self): if self.data.kind in [LIST, NON_NULL]: return None return self.data.object_name def get_fields(self): if self.data.kind != OBJECT: return None return sorted( ( (name, field) for name, field in self.data._declared_fields.items() if name[:2] != '__' ), key=lambda item: item[0], ) def get_inputFields(self): if self.data.kind != INPUT_OBJECT: return None return [] def get_interfaces(self): if self.data.kind != OBJECT: return None return [] def get_possibleTypes(self): return None def get_enumValues(self): if self.data.kind != ENUM: return None return self.data.values def get_ofType(self): if self.data.kind in [NON_NULL, LIST]: type_ = self.data.type_ # Don't return NonNull if self is already NonNull if self.data.kind is not NON_NULL and not getattr(type_, 'null', True): return NonNull(type_) return type_ return None class Schema(Object): # self.data will be the query_root. object_name = '__Schema' types = ManyRelatedField(Type) queryType = RelatedField(Type) mutationType = RelatedField(Type) directives = ManyRelatedField(Directive) def _collect_types(self, object_type, types=None): if types is None: types = set((object_type,)) for field in object_type._declared_fields.values(): if isinstance(field, RelatedField): object_type = field.object_type if object_type in types: continue types.add(object_type) self._collect_types(object_type, types) elif isinstance(field, EnumField): enum_type = field.enum if enum_type in types: continue types.add(enum_type) elif isinstance(field.type_, List): field = field.type_ elif field.type_: types.add(field.type_) return types def _type_key(self, type_): object_name = type_.object_name # Sort: defined types, introspection types, scalars, and then by name. return ( type_.kind == SCALAR, object_name.startswith('__'), object_name, ) def get_types(self): types = self._collect_types(self.data.query_root_class) return sorted(types, key=self._type_key) def get_queryType(self): return self.data.query_root_class def get_mutationType(self): return None def get_directives(self): return []
mit
-8,515,198,416,324,023,000
25.198653
83
0.528081
false
4.356663
false
false
false
fsimkovic/conkit
conkit/core/struct.py
1
2751
# BSD 3-Clause License # # Copyright (c) 2016-18, University of Liverpool # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Internal classes required by ConKit defining some sort of internal structure""" from __future__ import division from __future__ import print_function __author__ = "Felix Simkovic" __date__ = "03 Aug 2016" __version__ = "1.0" class _Struct(object): """A basic class representing a struct residue""" __slots__ = ('res_seq', 'res_altseq', 'res_name', 'res_chain') def __repr__(self): string = "{name}(res_seq='{res_seq}' res_altseq='{res_altseq}' res_name='{res_name}' res_chain='{res_chain}')" return string.format(name=self.__class__.__name__, **{k: getattr(self, k) for k in self.__class__.__slots__}) class Gap(_Struct): """A basic class representing a gap residue""" IDENTIFIER = -999999 def __init__(self): self.res_seq = Gap.IDENTIFIER self.res_altseq = Gap.IDENTIFIER self.res_name = 'X' self.res_chain = '' class Residue(_Struct): """A basic class representing a residue""" def __init__(self, res_seq, res_altseq, res_name, res_chain): self.res_seq = res_seq self.res_altseq = res_altseq self.res_name = res_name self.res_chain = res_chain
bsd-3-clause
3,594,218,753,008,442,400
39.455882
118
0.708833
false
4.010204
false
false
false
bravomikekilo/mxconsole
mxconsole/platform/flags.py
1
4765
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of the flags interface.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse as _argparse from mxconsole.util.all_util import remove_undocumented _global_parser = _argparse.ArgumentParser() # pylint: disable=invalid-name class _FlagValues(object): """Global container and accessor for flags and their values.""" def __init__(self): self.__dict__['__flags'] = {} self.__dict__['__parsed'] = False def _parse_flags(self, args=None): result, unparsed = _global_parser.parse_known_args(args=args) for flag_name, val in vars(result).items(): self.__dict__['__flags'][flag_name] = val self.__dict__['__parsed'] = True return unparsed def __getattr__(self, name): """Retrieves the 'value' attribute of the flag --name.""" if not self.__dict__['__parsed']: self._parse_flags() if name not in self.__dict__['__flags']: raise AttributeError(name) return self.__dict__['__flags'][name] def __setattr__(self, name, value): """Sets the 'value' attribute of the flag --name.""" if not self.__dict__['__parsed']: self._parse_flags() self.__dict__['__flags'][name] = value def _define_helper(flag_name, default_value, docstring, flagtype): """Registers 'flag_name' with 'default_value' and 'docstring'.""" _global_parser.add_argument('--' + flag_name, default=default_value, help=docstring, type=flagtype) # Provides the global object that can be used to access flags. FLAGS = _FlagValues() def DEFINE_string(flag_name, default_value, docstring): """Defines a flag of type 'string'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as a string. docstring: A helpful message explaining the use of the flag. """ _define_helper(flag_name, default_value, docstring, str) def DEFINE_integer(flag_name, default_value, docstring): """Defines a flag of type 'int'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as an int. docstring: A helpful message explaining the use of the flag. """ _define_helper(flag_name, default_value, docstring, int) def DEFINE_boolean(flag_name, default_value, docstring): """Defines a flag of type 'boolean'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as a boolean. docstring: A helpful message explaining the use of the flag. """ # Register a custom function for 'bool' so --flag=True works. def str2bool(v): return v.lower() in ('true', 't', '1') _global_parser.add_argument('--' + flag_name, nargs='?', const=True, help=docstring, default=default_value, type=str2bool) # Add negated version, stay consistent with argparse with regard to # dashes in flag names. _global_parser.add_argument('--no' + flag_name, action='store_false', dest=flag_name.replace('-', '_')) # The internal google library defines the following alias, so we match # the API for consistency. DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name def DEFINE_float(flag_name, default_value, docstring): """Defines a flag of type 'float'. Args: flag_name: The name of the flag as a string. default_value: The default value the flag should take as a float. docstring: A helpful message explaining the use of the flag. """ _define_helper(flag_name, default_value, docstring, float) _allowed_symbols = [ # We rely on gflags documentation. 'DEFINE_bool', 'DEFINE_boolean', 'DEFINE_float', 'DEFINE_integer', 'DEFINE_string', 'FLAGS', ] remove_undocumented(__name__, _allowed_symbols)
apache-2.0
3,211,316,646,001,435,600
32.321678
80
0.633998
false
4.062234
false
false
false
bundlewrap/bundlewrap
bundlewrap/utils/ui.py
1
14061
from contextlib import contextmanager, suppress from datetime import datetime from functools import wraps from os import _exit, environ, getpid, kill from os.path import join from select import select from shutil import get_terminal_size from signal import signal, SIG_DFL, SIGINT, SIGQUIT, SIGTERM from subprocess import PIPE, Popen import sys import termios from time import time from threading import Event, Lock, Thread from . import STDERR_WRITER, STDOUT_WRITER from .table import render_table, ROW_SEPARATOR from .text import ( HIDE_CURSOR, SHOW_CURSOR, ansi_clean, blue, bold, format_duration, mark_for_translation as _, ) INFO_EVENT = Event() QUIT_EVENT = Event() SHUTDOWN_EVENT_HARD = Event() SHUTDOWN_EVENT_SOFT = Event() TTY = STDOUT_WRITER.isatty() def add_debug_indicator(f): @wraps(f) def wrapped(self, msg, **kwargs): return f(self, "[DEBUG] " + msg, **kwargs) return wrapped def add_debug_timestamp(f): @wraps(f) def wrapped(self, msg, **kwargs): if self.debug_mode: msg = datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + msg return f(self, msg, **kwargs) return wrapped def capture_for_debug_logfile(f): @wraps(f) def wrapped(self, msg, **kwargs): if self.debug_log_file and self._active: with self.lock: self.debug_log_file.write( datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + ansi_clean(msg).rstrip("\n") + "\n" ) return f(self, msg, **kwargs) return wrapped def clear_formatting(f): """ Makes sure formatting from cut-off lines can't bleed into next one """ @wraps(f) def wrapped(self, msg, **kwargs): if TTY and environ.get("BW_COLORS", "1") != "0": msg = "\033[0m" + msg return f(self, msg, **kwargs) return wrapped def sigint_handler(*args, **kwargs): """ This handler is kept short since it interrupts execution of the main thread. It's safer to handle these events in their own thread because the main thread might be holding the IO lock while it is interrupted. """ if not SHUTDOWN_EVENT_SOFT.is_set(): SHUTDOWN_EVENT_SOFT.set() else: SHUTDOWN_EVENT_HARD.set() def sigquit_handler(*args, **kwargs): """ This handler is kept short since it interrupts execution of the main thread. It's safer to handle these events in their own thread because the main thread might be holding the IO lock while it is interrupted. """ INFO_EVENT.set() def spinner(): while True: for c in "⠁⠈⠐⠠⢀⡀⠄⠂": yield c def page_lines(lines): """ View the given list of Unicode lines in a pager (e.g. `less`). """ lines = list(lines) line_width = max([len(ansi_clean(line)) for line in lines]) if ( TTY and ( line_width > get_terminal_size().columns or len(lines) > get_terminal_size().lines ) ): write_to_stream(STDOUT_WRITER, SHOW_CURSOR) env = environ.copy() env["LESS"] = env.get("LESS", "") + " -R" pager = Popen( [environ.get("PAGER", "/usr/bin/less")], env=env, stdin=PIPE, ) with suppress(BrokenPipeError): pager.stdin.write("\n".join(lines).encode('utf-8')) pager.stdin.close() pager.communicate() write_to_stream(STDOUT_WRITER, HIDE_CURSOR) else: for line in lines: io.stdout(line) def write_to_stream(stream, msg): with suppress(BrokenPipeError): if TTY: stream.write(msg) else: stream.write(ansi_clean(msg)) stream.flush() class DrainableStdin: def get_input(self): while True: if QUIT_EVENT.is_set(): return None if select([sys.stdin], [], [], 0.1)[0]: return sys.stdin.readline().strip() def drain(self): if sys.stdin.isatty(): termios.tcflush(sys.stdin, termios.TCIFLUSH) class IOManager: """ Threadsafe singleton class that handles all IO. """ def __init__(self): self._active = False self.debug_log_file = None self.debug_mode = False self.jobs = [] self.lock = Lock() self.progress = 0 self.progress_start = None self.progress_total = 0 self._spinner = spinner() self._last_spinner_character = next(self._spinner) self._last_spinner_update = 0 self._signal_handler_thread = None self._child_pids = [] self._status_line_present = False self._waiting_for_input = False def activate(self): self._active = True if 'BW_DEBUG_LOG_DIR' in environ: self.debug_log_file = open(join( environ['BW_DEBUG_LOG_DIR'], "{}_{}.log".format( datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), getpid(), ), ), 'a') self._signal_handler_thread = Thread( target=self._signal_handler_thread_body, ) # daemon mode is required because we need to keep the thread # around until the end of a soft shutdown to wait for a hard # shutdown signal, but don't have a feasible way of stopping # the thread once the soft shutdown has completed self._signal_handler_thread.daemon = True self._signal_handler_thread.start() signal(SIGINT, sigint_handler) signal(SIGQUIT, sigquit_handler) if TTY: write_to_stream(STDOUT_WRITER, HIDE_CURSOR) def ask(self, question, default, epilogue=None, input_handler=DrainableStdin()): assert self._active answers = _("[Y/n]") if default else _("[y/N]") question = question + " " + answers + " " self._waiting_for_input = True with self.lock: if QUIT_EVENT.is_set(): sys.exit(0) self._clear_last_job() while True: write_to_stream(STDOUT_WRITER, "\a" + question + SHOW_CURSOR) input_handler.drain() answer = input_handler.get_input() if answer is None: if epilogue: write_to_stream(STDOUT_WRITER, "\n" + epilogue + "\n") QUIT_EVENT.set() sys.exit(0) elif answer.lower() in (_("y"), _("yes")) or ( not answer and default ): answer = True break elif answer.lower() in (_("n"), _("no")) or ( not answer and not default ): answer = False break write_to_stream( STDOUT_WRITER, _("Please answer with 'y(es)' or 'n(o)'.\n"), ) if epilogue: write_to_stream(STDOUT_WRITER, epilogue + "\n") write_to_stream(STDOUT_WRITER, HIDE_CURSOR) self._waiting_for_input = False return answer def deactivate(self): self._active = False if TTY: write_to_stream(STDOUT_WRITER, SHOW_CURSOR) signal(SIGINT, SIG_DFL) signal(SIGQUIT, SIG_DFL) self._signal_handler_thread.join() if self.debug_log_file: self.debug_log_file.close() @clear_formatting @add_debug_indicator @capture_for_debug_logfile @add_debug_timestamp def debug(self, msg, append_newline=True): if self.debug_mode: with self.lock: self._write(msg, append_newline=append_newline) def job_add(self, msg): if not self._active: return with self.lock: self._clear_last_job() self.jobs.append(msg) self._write_current_job() def job_del(self, msg): if not self._active: return with self.lock: self._clear_last_job() self.jobs.remove(msg) self._write_current_job() def progress_advance(self, increment=1): with self.lock: self.progress += increment def progress_increase_total(self, increment=1): with self.lock: self.progress_total += increment def progress_set_total(self, total): self.progress = 0 self.progress_start = datetime.utcnow() self.progress_total = total def progress_show(self): if INFO_EVENT.is_set(): INFO_EVENT.clear() table = [] if self.jobs: table.append([bold(_("Running jobs")), self.jobs[0].strip()]) for job in self.jobs[1:]: table.append(["", job.strip()]) try: progress = (self.progress / float(self.progress_total)) elapsed = datetime.utcnow() - self.progress_start remaining = elapsed / progress - elapsed except ZeroDivisionError: pass else: if table: table.append(ROW_SEPARATOR) table.extend([ [bold(_("Progress")), "{:.1f}%".format(progress * 100)], ROW_SEPARATOR, [bold(_("Elapsed")), format_duration(elapsed)], ROW_SEPARATOR, [ bold(_("Remaining")), _("{} (estimate based on progress)").format(format_duration(remaining)) ], ]) output = blue("i") + "\n" if table: for line in render_table(table): output += ("{x} {line}\n".format(x=blue("i"), line=line)) else: output += _("{x} No progress info available at this time.\n").format(x=blue("i")) io.stderr(output + blue("i")) @clear_formatting @capture_for_debug_logfile @add_debug_timestamp def stderr(self, msg, append_newline=True): with self.lock: self._write(msg, append_newline=append_newline, err=True) @clear_formatting @capture_for_debug_logfile @add_debug_timestamp def stdout(self, msg, append_newline=True): with self.lock: self._write(msg, append_newline=append_newline) @contextmanager def job(self, job_text): self.job_add(job_text) try: yield finally: self.job_del(job_text) def job_wrapper(self, job_text): def outer_wrapper(wrapped_function): @wraps(wrapped_function) def inner_wrapper(*args, **kwargs): with self.job(job_text.format(*args, **kwargs)): return wrapped_function(*args, **kwargs) return inner_wrapper return outer_wrapper def _clear_last_job(self): if self._status_line_present and TTY: write_to_stream(STDOUT_WRITER, "\r\033[K") self._status_line_present = False def _signal_handler_thread_body(self): while self._active: self.progress_show() if not self._waiting_for_input: # do not block and ignore SIGINT while .ask()ing with self.lock: self._clear_last_job() self._write_current_job() if QUIT_EVENT.is_set(): if SHUTDOWN_EVENT_HARD.wait(0.1): self.stderr(_("{x} {signal} cleanup interrupted, exiting...").format( signal=bold(_("SIGINT")), x=blue("i"), )) for ssh_pid in self._child_pids: self.debug(_("killing SSH session with PID {pid}").format(pid=ssh_pid)) with suppress(ProcessLookupError): kill(ssh_pid, SIGTERM) self._clear_last_job() if TTY: write_to_stream(STDOUT_WRITER, SHOW_CURSOR) _exit(1) else: if SHUTDOWN_EVENT_SOFT.wait(0.1): QUIT_EVENT.set() self.stderr(_( "{x} {signal} canceling pending tasks... " "(hit CTRL+C again for immediate dirty exit)" ).format( signal=bold(_("SIGINT")), x=blue("i"), )) def _spinner_character(self): if time() - self._last_spinner_update > 0.2: self._last_spinner_update = time() self._last_spinner_character = next(self._spinner) return self._last_spinner_character def _write(self, msg, append_newline=True, err=False): if not self._active: return self._clear_last_job() if msg is not None: if append_newline: msg += "\n" write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg) self._write_current_job() def _write_current_job(self): if self.jobs and TTY: line = "{} ".format(blue(self._spinner_character())) # must track line length manually as len() will count ANSI escape codes visible_length = 2 try: progress = (self.progress / float(self.progress_total)) except ZeroDivisionError: pass else: progress_text = "{:.1f}% ".format(progress * 100) line += bold(progress_text) visible_length += len(progress_text) line += self.jobs[-1][:get_terminal_size().columns - 1 - visible_length] write_to_stream(STDOUT_WRITER, line) self._status_line_present = True io = IOManager()
gpl-3.0
-4,341,101,949,010,468,400
31.969484
98
0.527376
false
4.03476
false
false
false
susahe/sis
sis/schedule/models.py
1
1278
from __future__ import unicode_literals from django.template.defaultfilters import slugify from django.contrib.auth.models import User from django.db import models from course.models import Activity,Course,CourseGroup from datetime import datetime # Theory Session table create # have relationship between course groups table an class TheorySession(models.Model): coursegroup = models.ForeignKey(CourseGroup) name = models.CharField(max_length=120) start_time = models.DateTimeField(default=datetime.now, blank=True) end_time = models.DateTimeField(default=datetime.now, blank=True) activity = models.ForeignKey(Activity) is_present= models.BooleanField() class LabSession(models.Model): name = models.CharField(max_length=120) start_time = models.DateTimeField(default=datetime.now, blank=True) end_time = models.DateTimeField(default=datetime.now, blank=True) activity = models.ForeignKey(Activity) is_present= models.BooleanField() class PracticalSession(models.Model): name = models.CharField(max_length=120) user = models.ForeignKey(User) start_time = models.DateTimeField(default=datetime.now, blank=True) end_time = models.DateTimeField(default=datetime.now, blank=True) activity = models.ForeignKey(Activity) is_present= models.BooleanField()
gpl-2.0
7,980,110,490,229,708,000
35.514286
68
0.79734
false
3.651429
false
false
false
Cinntax/home-assistant
homeassistant/components/plex/media_player.py
1
28269
"""Support to interface with the Plex API.""" from datetime import timedelta import json import logging import plexapi.exceptions import plexapi.playlist import plexapi.playqueue import requests.exceptions from homeassistant.components.media_player import MediaPlayerDevice from homeassistant.components.media_player.const import ( MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, ) from homeassistant.const import ( DEVICE_DEFAULT_NAME, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, ) from homeassistant.helpers.event import track_time_interval from homeassistant.util import dt as dt_util from .const import ( CONF_SERVER_IDENTIFIER, DOMAIN as PLEX_DOMAIN, NAME_FORMAT, REFRESH_LISTENERS, SERVERS, ) _LOGGER = logging.getLogger(__name__) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the Plex media_player platform. Deprecated. """ pass async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Plex media_player from a config entry.""" def add_entities(entities, update_before_add=False): """Sync version of async add entities.""" hass.add_job(async_add_entities, entities, update_before_add) hass.async_add_executor_job(_setup_platform, hass, config_entry, add_entities) def _setup_platform(hass, config_entry, add_entities_callback): """Set up the Plex media_player platform.""" server_id = config_entry.data[CONF_SERVER_IDENTIFIER] plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id] plex_clients = {} plex_sessions = {} hass.data[PLEX_DOMAIN][REFRESH_LISTENERS][server_id] = track_time_interval( hass, lambda now: update_devices(), timedelta(seconds=10) ) def update_devices(): """Update the devices objects.""" try: devices = plexserver.clients() except plexapi.exceptions.BadRequest: _LOGGER.exception("Error listing plex devices") return except requests.exceptions.RequestException as ex: _LOGGER.warning( "Could not connect to Plex server: %s (%s)", plexserver.friendly_name, ex, ) return new_plex_clients = [] available_client_ids = [] for device in devices: # For now, let's allow all deviceClass types if device.deviceClass in ["badClient"]: continue available_client_ids.append(device.machineIdentifier) if device.machineIdentifier not in plex_clients: new_client = PlexClient( plexserver, device, None, plex_sessions, update_devices ) plex_clients[device.machineIdentifier] = new_client _LOGGER.debug("New device: %s", device.machineIdentifier) new_plex_clients.append(new_client) else: _LOGGER.debug("Refreshing device: %s", device.machineIdentifier) plex_clients[device.machineIdentifier].refresh(device, None) # add devices with a session and no client (ex. PlexConnect Apple TV's) try: sessions = plexserver.sessions() except plexapi.exceptions.BadRequest: _LOGGER.exception("Error listing plex sessions") return except requests.exceptions.RequestException as ex: _LOGGER.warning( "Could not connect to Plex server: %s (%s)", plexserver.friendly_name, ex, ) return plex_sessions.clear() for session in sessions: for player in session.players: plex_sessions[player.machineIdentifier] = session, player for machine_identifier, (session, player) in plex_sessions.items(): if machine_identifier in available_client_ids: # Avoid using session if already added as a device. _LOGGER.debug("Skipping session, device exists: %s", machine_identifier) continue if ( machine_identifier not in plex_clients and machine_identifier is not None ): new_client = PlexClient( plexserver, player, session, plex_sessions, update_devices ) plex_clients[machine_identifier] = new_client _LOGGER.debug("New session: %s", machine_identifier) new_plex_clients.append(new_client) else: _LOGGER.debug("Refreshing session: %s", machine_identifier) plex_clients[machine_identifier].refresh(None, session) for client in plex_clients.values(): # force devices to idle that do not have a valid session if client.session is None: client.force_idle() client.set_availability( client.machine_identifier in available_client_ids or client.machine_identifier in plex_sessions ) if client not in new_plex_clients: client.schedule_update_ha_state() if new_plex_clients: add_entities_callback(new_plex_clients) class PlexClient(MediaPlayerDevice): """Representation of a Plex device.""" def __init__(self, plex_server, device, session, plex_sessions, update_devices): """Initialize the Plex device.""" self._app_name = "" self._device = None self._available = False self._marked_unavailable = None self._device_protocol_capabilities = None self._is_player_active = False self._is_player_available = False self._player = None self._machine_identifier = None self._make = "" self._name = None self._player_state = "idle" self._previous_volume_level = 1 # Used in fake muting self._session = None self._session_type = None self._session_username = None self._state = STATE_IDLE self._volume_level = 1 # since we can't retrieve remotely self._volume_muted = False # since we can't retrieve remotely self.plex_server = plex_server self.plex_sessions = plex_sessions self.update_devices = update_devices # General self._media_content_id = None self._media_content_rating = None self._media_content_type = None self._media_duration = None self._media_image_url = None self._media_title = None self._media_position = None self._media_position_updated_at = None # Music self._media_album_artist = None self._media_album_name = None self._media_artist = None self._media_track = None # TV Show self._media_episode = None self._media_season = None self._media_series_title = None self.refresh(device, session) def _clear_media_details(self): """Set all Media Items to None.""" # General self._media_content_id = None self._media_content_rating = None self._media_content_type = None self._media_duration = None self._media_image_url = None self._media_title = None # Music self._media_album_artist = None self._media_album_name = None self._media_artist = None self._media_track = None # TV Show self._media_episode = None self._media_season = None self._media_series_title = None # Clear library Name self._app_name = "" def refresh(self, device, session): """Refresh key device data.""" self._clear_media_details() if session: # Not being triggered by Chrome or FireTablet Plex App self._session = session if device: self._device = device try: device_url = self._device.url("/") except plexapi.exceptions.BadRequest: device_url = "127.0.0.1" if "127.0.0.1" in device_url: self._device.proxyThroughServer() self._session = None self._machine_identifier = self._device.machineIdentifier self._name = NAME_FORMAT.format(self._device.title or DEVICE_DEFAULT_NAME) self._device_protocol_capabilities = self._device.protocolCapabilities # set valid session, preferring device session if self._device.machineIdentifier in self.plex_sessions: self._session = self.plex_sessions.get( self._device.machineIdentifier, [None, None] )[0] if self._session: if ( self._device is not None and self._device.machineIdentifier is not None and self._session.players ): self._is_player_available = True self._player = [ p for p in self._session.players if p.machineIdentifier == self._device.machineIdentifier ][0] self._name = NAME_FORMAT.format(self._player.title) self._player_state = self._player.state self._session_username = self._session.usernames[0] self._make = self._player.device else: self._is_player_available = False # Calculate throttled position for proper progress display. position = int(self._session.viewOffset / 1000) now = dt_util.utcnow() if self._media_position is not None: pos_diff = position - self._media_position time_diff = now - self._media_position_updated_at if pos_diff != 0 and abs(time_diff.total_seconds() - pos_diff) > 5: self._media_position_updated_at = now self._media_position = position else: self._media_position_updated_at = now self._media_position = position self._media_content_id = self._session.ratingKey self._media_content_rating = getattr(self._session, "contentRating", None) self._set_player_state() if self._is_player_active and self._session is not None: self._session_type = self._session.type self._media_duration = int(self._session.duration / 1000) # title (movie name, tv episode name, music song name) self._media_title = self._session.title # media type self._set_media_type() self._app_name = ( self._session.section().title if self._session.section() is not None else "" ) self._set_media_image() else: self._session_type = None def _set_media_image(self): thumb_url = self._session.thumbUrl if ( self.media_content_type is MEDIA_TYPE_TVSHOW and not self.plex_server.use_episode_art ): thumb_url = self._session.url(self._session.grandparentThumb) if thumb_url is None: _LOGGER.debug( "Using media art because media thumb " "was not found: %s", self.entity_id, ) thumb_url = self.session.url(self._session.art) self._media_image_url = thumb_url def set_availability(self, available): """Set the device as available/unavailable noting time.""" if not available: self._clear_media_details() if self._marked_unavailable is None: self._marked_unavailable = dt_util.utcnow() else: self._marked_unavailable = None self._available = available def _set_player_state(self): if self._player_state == "playing": self._is_player_active = True self._state = STATE_PLAYING elif self._player_state == "paused": self._is_player_active = True self._state = STATE_PAUSED elif self.device: self._is_player_active = False self._state = STATE_IDLE else: self._is_player_active = False self._state = STATE_OFF def _set_media_type(self): if self._session_type in ["clip", "episode"]: self._media_content_type = MEDIA_TYPE_TVSHOW # season number (00) if callable(self._session.season): self._media_season = str((self._session.season()).index).zfill(2) elif self._session.parentIndex is not None: self._media_season = self._session.parentIndex.zfill(2) else: self._media_season = None # show name self._media_series_title = self._session.grandparentTitle # episode number (00) if self._session.index is not None: self._media_episode = str(self._session.index).zfill(2) elif self._session_type == "movie": self._media_content_type = MEDIA_TYPE_MOVIE if self._session.year is not None and self._media_title is not None: self._media_title += " (" + str(self._session.year) + ")" elif self._session_type == "track": self._media_content_type = MEDIA_TYPE_MUSIC self._media_album_name = self._session.parentTitle self._media_album_artist = self._session.grandparentTitle self._media_track = self._session.index self._media_artist = self._session.originalTitle # use album artist if track artist is missing if self._media_artist is None: _LOGGER.debug( "Using album artist because track artist " "was not found: %s", self.entity_id, ) self._media_artist = self._media_album_artist def force_idle(self): """Force client to idle.""" self._state = STATE_IDLE self._session = None self._clear_media_details() @property def should_poll(self): """Return True if entity has to be polled for state.""" return False @property def unique_id(self): """Return the id of this plex client.""" return self.machine_identifier @property def available(self): """Return the availability of the client.""" return self._available @property def name(self): """Return the name of the device.""" return self._name @property def machine_identifier(self): """Return the machine identifier of the device.""" return self._machine_identifier @property def app_name(self): """Return the library name of playing media.""" return self._app_name @property def device(self): """Return the device, if any.""" return self._device @property def marked_unavailable(self): """Return time device was marked unavailable.""" return self._marked_unavailable @property def session(self): """Return the session, if any.""" return self._session @property def state(self): """Return the state of the device.""" return self._state @property def _active_media_plexapi_type(self): """Get the active media type required by PlexAPI commands.""" if self.media_content_type is MEDIA_TYPE_MUSIC: return "music" return "video" @property def media_content_id(self): """Return the content ID of current playing media.""" return self._media_content_id @property def media_content_type(self): """Return the content type of current playing media.""" if self._session_type == "clip": _LOGGER.debug( "Clip content type detected, " "compatibility may vary: %s", self.entity_id, ) return MEDIA_TYPE_TVSHOW if self._session_type == "episode": return MEDIA_TYPE_TVSHOW if self._session_type == "movie": return MEDIA_TYPE_MOVIE if self._session_type == "track": return MEDIA_TYPE_MUSIC return None @property def media_artist(self): """Return the artist of current playing media, music track only.""" return self._media_artist @property def media_album_name(self): """Return the album name of current playing media, music track only.""" return self._media_album_name @property def media_album_artist(self): """Return the album artist of current playing media, music only.""" return self._media_album_artist @property def media_track(self): """Return the track number of current playing media, music only.""" return self._media_track @property def media_duration(self): """Return the duration of current playing media in seconds.""" return self._media_duration @property def media_position(self): """Return the duration of current playing media in seconds.""" return self._media_position @property def media_position_updated_at(self): """When was the position of the current playing media valid.""" return self._media_position_updated_at @property def media_image_url(self): """Return the image URL of current playing media.""" return self._media_image_url @property def media_title(self): """Return the title of current playing media.""" return self._media_title @property def media_season(self): """Return the season of current playing media (TV Show only).""" return self._media_season @property def media_series_title(self): """Return the title of the series of current playing media.""" return self._media_series_title @property def media_episode(self): """Return the episode of current playing media (TV Show only).""" return self._media_episode @property def make(self): """Return the make of the device (ex. SHIELD Android TV).""" return self._make @property def supported_features(self): """Flag media player features that are supported.""" if not self._is_player_active: return 0 # force show all controls if self.plex_server.show_all_controls: return ( SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_STOP | SUPPORT_VOLUME_SET | SUPPORT_PLAY | SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE ) # only show controls when we know what device is connecting if not self._make: return 0 # no mute support if self.make.lower() == "shield android tv": _LOGGER.debug( "Shield Android TV client detected, disabling mute " "controls: %s", self.entity_id, ) return ( SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_STOP | SUPPORT_VOLUME_SET | SUPPORT_PLAY | SUPPORT_TURN_OFF ) # Only supports play,pause,stop (and off which really is stop) if self.make.lower().startswith("tivo"): _LOGGER.debug( "Tivo client detected, only enabling pause, play, " "stop, and off controls: %s", self.entity_id, ) return SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP | SUPPORT_TURN_OFF # Not all devices support playback functionality # Playback includes volume, stop/play/pause, etc. if self.device and "playback" in self._device_protocol_capabilities: return ( SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_STOP | SUPPORT_VOLUME_SET | SUPPORT_PLAY | SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE ) return 0 def set_volume_level(self, volume): """Set volume level, range 0..1.""" if self.device and "playback" in self._device_protocol_capabilities: self.device.setVolume(int(volume * 100), self._active_media_plexapi_type) self._volume_level = volume # store since we can't retrieve self.update_devices() @property def volume_level(self): """Return the volume level of the client (0..1).""" if ( self._is_player_active and self.device and "playback" in self._device_protocol_capabilities ): return self._volume_level @property def is_volume_muted(self): """Return boolean if volume is currently muted.""" if self._is_player_active and self.device: return self._volume_muted def mute_volume(self, mute): """Mute the volume. Since we can't actually mute, we'll: - On mute, store volume and set volume to 0 - On unmute, set volume to previously stored volume """ if not (self.device and "playback" in self._device_protocol_capabilities): return self._volume_muted = mute if mute: self._previous_volume_level = self._volume_level self.set_volume_level(0) else: self.set_volume_level(self._previous_volume_level) def media_play(self): """Send play command.""" if self.device and "playback" in self._device_protocol_capabilities: self.device.play(self._active_media_plexapi_type) self.update_devices() def media_pause(self): """Send pause command.""" if self.device and "playback" in self._device_protocol_capabilities: self.device.pause(self._active_media_plexapi_type) self.update_devices() def media_stop(self): """Send stop command.""" if self.device and "playback" in self._device_protocol_capabilities: self.device.stop(self._active_media_plexapi_type) self.update_devices() def turn_off(self): """Turn the client off.""" # Fake it since we can't turn the client off self.media_stop() def media_next_track(self): """Send next track command.""" if self.device and "playback" in self._device_protocol_capabilities: self.device.skipNext(self._active_media_plexapi_type) self.update_devices() def media_previous_track(self): """Send previous track command.""" if self.device and "playback" in self._device_protocol_capabilities: self.device.skipPrevious(self._active_media_plexapi_type) self.update_devices() def play_media(self, media_type, media_id, **kwargs): """Play a piece of media.""" if not (self.device and "playback" in self._device_protocol_capabilities): return src = json.loads(media_id) media = None if media_type == "MUSIC": media = ( self.device.server.library.section(src["library_name"]) .get(src["artist_name"]) .album(src["album_name"]) .get(src["track_name"]) ) elif media_type == "EPISODE": media = self._get_tv_media( src["library_name"], src["show_name"], src["season_number"], src["episode_number"], ) elif media_type == "PLAYLIST": media = self.device.server.playlist(src["playlist_name"]) elif media_type == "VIDEO": media = self.device.server.library.section(src["library_name"]).get( src["video_name"] ) if ( media and media_type == "EPISODE" and isinstance(media, plexapi.playlist.Playlist) ): # delete episode playlist after being loaded into a play queue self._client_play_media(media=media, delete=True, shuffle=src["shuffle"]) elif media: self._client_play_media(media=media, shuffle=src["shuffle"]) def _get_tv_media(self, library_name, show_name, season_number, episode_number): """Find TV media and return a Plex media object.""" target_season = None target_episode = None show = self.device.server.library.section(library_name).get(show_name) if not season_number: playlist_name = f"{self.entity_id} - {show_name} Episodes" return self.device.server.createPlaylist(playlist_name, show.episodes()) for season in show.seasons(): if int(season.seasonNumber) == int(season_number): target_season = season break if target_season is None: _LOGGER.error( "Season not found: %s\\%s - S%sE%s", library_name, show_name, str(season_number).zfill(2), str(episode_number).zfill(2), ) else: if not episode_number: playlist_name = "{} - {} Season {} Episodes".format( self.entity_id, show_name, str(season_number) ) return self.device.server.createPlaylist( playlist_name, target_season.episodes() ) for episode in target_season.episodes(): if int(episode.index) == int(episode_number): target_episode = episode break if target_episode is None: _LOGGER.error( "Episode not found: %s\\%s - S%sE%s", library_name, show_name, str(season_number).zfill(2), str(episode_number).zfill(2), ) return target_episode def _client_play_media(self, media, delete=False, **params): """Instruct Plex client to play a piece of media.""" if not (self.device and "playback" in self._device_protocol_capabilities): _LOGGER.error("Client cannot play media: %s", self.entity_id) return playqueue = plexapi.playqueue.PlayQueue.create( self.device.server, media, **params ) # Delete dynamic playlists used to build playqueue (ex. play tv season) if delete: media.delete() server_url = self.device.server.baseurl.split(":") self.device.sendCommand( "playback/playMedia", **dict( { "machineIdentifier": self.device.server.machineIdentifier, "address": server_url[1].strip("/"), "port": server_url[-1], "key": media.key, "containerKey": "/playQueues/{}?window=100&own=1".format( playqueue.playQueueID ), }, **params, ), ) self.update_devices() @property def device_state_attributes(self): """Return the scene state attributes.""" attr = { "media_content_rating": self._media_content_rating, "session_username": self._session_username, "media_library_name": self._app_name, } return attr
apache-2.0
1,241,736,605,791,758,600
33.986386
88
0.564894
false
4.393007
false
false
false
djaodjin/djaodjin-pages
pages/api/sources.py
1
5950
# Copyright (c) 2021, Djaodjin Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pylint: disable=no-member import logging, os, tempfile from django.template import TemplateSyntaxError from django.template.loader import _engine_list from django.utils._os import safe_join from rest_framework import status, generics, serializers from rest_framework.response import Response from ..mixins import ThemePackageMixin from ..themes import check_template, get_theme_dir, get_template_path LOGGER = logging.getLogger(__name__) def write_template(template_path, template_source): check_template(template_source) base_dir = os.path.dirname(template_path) if not os.path.isdir(base_dir): os.makedirs(base_dir) temp_file = tempfile.NamedTemporaryFile( mode='w+t', dir=base_dir, delete=False) temp_file.write(template_source) temp_file.close() os.rename(temp_file.name, template_path) LOGGER.info("pid %d wrote to %s", os.getpid(), template_path) class SourceCodeSerializer(serializers.Serializer): path = serializers.CharField(required=False, max_length=255) text = serializers.CharField(required=False, max_length=100000) def update(self, instance, validated_data): pass def create(self, validated_data): pass class SourceDetailAPIView(ThemePackageMixin, generics.RetrieveUpdateAPIView, generics.CreateAPIView): """ Retrieves a template source file **Examples .. code-block:: http GET /api/themes/sources/index.html HTTP/1.1 responds .. code-block:: json { "text": "..." } """ serializer_class = SourceCodeSerializer def post(self, request, *args, **kwargs): """ Creates a template source file **Examples .. code-block:: http POST /api/themes/sources/index.html HTTP/1.1 .. code-block:: json { "text": "..." } responds .. code-block:: json { "text": "..." } """ #pylint:disable=useless-super-delegation return super(SourceDetailAPIView, self).post(request, *args, **kwargs) def put(self, request, *args, **kwargs): """ Updates a template source file **Examples .. code-block:: http PUT /api/themes/sources/index.html HTTP/1.1 .. code-block:: json { "text": "..." } responds .. code-block:: json { "text": "..." } """ #pylint:disable=useless-super-delegation return super(SourceDetailAPIView, self).put(request, *args, **kwargs) def retrieve(self, request, *args, **kwargs): relative_path = self.kwargs.get('page') with open(get_template_path( relative_path=relative_path)) as source_file: source_content = source_file.read() return Response({'path': relative_path, 'text': source_content}) def update(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) relative_path = self.kwargs.get('page') template_path = get_template_path(relative_path=relative_path) theme_base = get_theme_dir(self.theme) if not template_path.startswith(theme_base): resp_status = status.HTTP_201_CREATED template_path = safe_join(theme_base, 'templates', relative_path) else: resp_status = status.HTTP_200_OK # We only write the file if the template syntax is correct. try: write_template(template_path, serializer.validated_data['text']) # clear template loaders caches engines = _engine_list(using=None) for engine in engines: try: engine.env.cache.clear() except AttributeError: pass except TemplateSyntaxError as err: LOGGER.debug("%s", err, extra={'request': request}) return self.retrieve(request, *args, **kwargs) return Response(serializer.data, status=resp_status) def perform_create(self, serializer): #pylint:disable=unused-argument relative_path = self.kwargs.get('page') theme_base = get_theme_dir(self.theme) template_path = safe_join(theme_base, 'templates', relative_path) write_template(template_path, '''{% extends "base.html" %} {% block content %} <h1>Lorem Ipsum</h1> {% endblock %} ''')
bsd-2-clause
5,008,858,569,694,676,000
30.989247
78
0.64084
false
4.231863
false
false
false
Zlash65/erpnext
erpnext/assets/doctype/asset/asset.py
1
23671
# -*- coding: utf-8 -*- # Copyright (c) 2016, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe, erpnext, math, json from frappe import _ from six import string_types from frappe.utils import flt, add_months, cint, nowdate, getdate, today, date_diff from frappe.model.document import Document from erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account from erpnext.assets.doctype.asset.depreciation \ import get_disposal_account_and_cost_center, get_depreciation_accounts from erpnext.accounts.general_ledger import make_gl_entries, delete_gl_entries from erpnext.accounts.utils import get_account_currency from erpnext.controllers.accounts_controller import AccountsController class Asset(AccountsController): def validate(self): self.validate_asset_values() self.validate_item() self.set_missing_values() if self.calculate_depreciation: self.set_depreciation_rate() self.make_depreciation_schedule() self.set_accumulated_depreciation() else: self.finance_books = [] if self.get("schedules"): self.validate_expected_value_after_useful_life() self.status = self.get_status() def on_submit(self): self.validate_in_use_date() self.set_status() self.update_stock_movement() if not self.booked_fixed_asset and not is_cwip_accounting_disabled(): self.make_gl_entries() def on_cancel(self): self.validate_cancellation() self.delete_depreciation_entries() self.set_status() delete_gl_entries(voucher_type='Asset', voucher_no=self.name) self.db_set('booked_fixed_asset', 0) def validate_item(self): item = frappe.get_cached_value("Item", self.item_code, ["is_fixed_asset", "is_stock_item", "disabled"], as_dict=1) if not item: frappe.throw(_("Item {0} does not exist").format(self.item_code)) elif item.disabled: frappe.throw(_("Item {0} has been disabled").format(self.item_code)) elif not item.is_fixed_asset: frappe.throw(_("Item {0} must be a Fixed Asset Item").format(self.item_code)) elif item.is_stock_item: frappe.throw(_("Item {0} must be a non-stock item").format(self.item_code)) def validate_in_use_date(self): if not self.available_for_use_date: frappe.throw(_("Available for use date is required")) def set_missing_values(self): if not self.asset_category: self.asset_category = frappe.get_cached_value("Item", self.item_code, "asset_category") if self.item_code and not self.get('finance_books'): finance_books = get_item_details(self.item_code, self.asset_category) self.set('finance_books', finance_books) def validate_asset_values(self): if not flt(self.gross_purchase_amount): frappe.throw(_("Gross Purchase Amount is mandatory"), frappe.MandatoryError) if not is_cwip_accounting_disabled(): if not self.is_existing_asset and not (self.purchase_receipt or self.purchase_invoice): frappe.throw(_("Please create purchase receipt or purchase invoice for the item {0}"). format(self.item_code)) if (not self.purchase_receipt and self.purchase_invoice and not frappe.db.get_value('Purchase Invoice', self.purchase_invoice, 'update_stock')): frappe.throw(_("Update stock must be enable for the purchase invoice {0}"). format(self.purchase_invoice)) if not self.calculate_depreciation: return elif not self.finance_books: frappe.throw(_("Enter depreciation details")) if self.is_existing_asset: return docname = self.purchase_receipt or self.purchase_invoice if docname: doctype = 'Purchase Receipt' if self.purchase_receipt else 'Purchase Invoice' date = frappe.db.get_value(doctype, docname, 'posting_date') if self.available_for_use_date and getdate(self.available_for_use_date) < getdate(self.purchase_date): frappe.throw(_("Available-for-use Date should be after purchase date")) def set_depreciation_rate(self): for d in self.get("finance_books"): d.rate_of_depreciation = self.get_depreciation_rate(d, on_validate=True) def make_depreciation_schedule(self): depreciation_method = [d.depreciation_method for d in self.finance_books] if 'Manual' not in depreciation_method: self.schedules = [] if not self.get("schedules") and self.available_for_use_date: total_depreciations = sum([d.total_number_of_depreciations for d in self.get('finance_books')]) for d in self.get('finance_books'): self.validate_asset_finance_books(d) value_after_depreciation = (flt(self.gross_purchase_amount) - flt(self.opening_accumulated_depreciation)) d.value_after_depreciation = value_after_depreciation no_of_depreciations = cint(d.total_number_of_depreciations - 1) - cint(self.number_of_depreciations_booked) end_date = add_months(d.depreciation_start_date, no_of_depreciations * cint(d.frequency_of_depreciation)) total_days = date_diff(end_date, self.available_for_use_date) rate_per_day = (value_after_depreciation - d.get("expected_value_after_useful_life")) / total_days number_of_pending_depreciations = cint(d.total_number_of_depreciations) - \ cint(self.number_of_depreciations_booked) from_date = self.available_for_use_date if number_of_pending_depreciations: next_depr_date = getdate(add_months(self.available_for_use_date, number_of_pending_depreciations * 12)) if (cint(frappe.db.get_value("Asset Settings", None, "schedule_based_on_fiscal_year")) == 1 and getdate(d.depreciation_start_date) < next_depr_date): number_of_pending_depreciations += 1 for n in range(number_of_pending_depreciations): if n == list(range(number_of_pending_depreciations))[-1]: schedule_date = add_months(self.available_for_use_date, n * 12) previous_scheduled_date = add_months(d.depreciation_start_date, (n-1) * 12) depreciation_amount = \ self.get_depreciation_amount_prorata_temporis(value_after_depreciation, d, previous_scheduled_date, schedule_date) elif n == list(range(number_of_pending_depreciations))[0]: schedule_date = d.depreciation_start_date depreciation_amount = \ self.get_depreciation_amount_prorata_temporis(value_after_depreciation, d, self.available_for_use_date, schedule_date) else: schedule_date = add_months(d.depreciation_start_date, n * 12) depreciation_amount = \ self.get_depreciation_amount_prorata_temporis(value_after_depreciation, d) if value_after_depreciation != 0: value_after_depreciation -= flt(depreciation_amount) self.append("schedules", { "schedule_date": schedule_date, "depreciation_amount": depreciation_amount, "depreciation_method": d.depreciation_method, "finance_book": d.finance_book, "finance_book_id": d.idx }) else: for n in range(number_of_pending_depreciations): schedule_date = add_months(d.depreciation_start_date, n * cint(d.frequency_of_depreciation)) if d.depreciation_method in ("Straight Line", "Manual"): days = date_diff(schedule_date, from_date) if n == 0: days += 1 depreciation_amount = days * rate_per_day from_date = schedule_date else: depreciation_amount = self.get_depreciation_amount(value_after_depreciation, d.total_number_of_depreciations, d) if depreciation_amount: value_after_depreciation -= flt(depreciation_amount) self.append("schedules", { "schedule_date": schedule_date, "depreciation_amount": depreciation_amount, "depreciation_method": d.depreciation_method, "finance_book": d.finance_book, "finance_book_id": d.idx }) def validate_asset_finance_books(self, row): if flt(row.expected_value_after_useful_life) >= flt(self.gross_purchase_amount): frappe.throw(_("Row {0}: Expected Value After Useful Life must be less than Gross Purchase Amount") .format(row.idx)) if not row.depreciation_start_date: frappe.throw(_("Row {0}: Depreciation Start Date is required").format(row.idx)) if not self.is_existing_asset: self.opening_accumulated_depreciation = 0 self.number_of_depreciations_booked = 0 else: depreciable_amount = flt(self.gross_purchase_amount) - flt(row.expected_value_after_useful_life) if flt(self.opening_accumulated_depreciation) > depreciable_amount: frappe.throw(_("Opening Accumulated Depreciation must be less than equal to {0}") .format(depreciable_amount)) if self.opening_accumulated_depreciation: if not self.number_of_depreciations_booked: frappe.throw(_("Please set Number of Depreciations Booked")) else: self.number_of_depreciations_booked = 0 if cint(self.number_of_depreciations_booked) > cint(row.total_number_of_depreciations): frappe.throw(_("Number of Depreciations Booked cannot be greater than Total Number of Depreciations")) if row.depreciation_start_date and getdate(row.depreciation_start_date) < getdate(nowdate()): frappe.msgprint(_("Depreciation Row {0}: Depreciation Start Date is entered as past date") .format(row.idx), title=_('Warning'), indicator='red') if row.depreciation_start_date and getdate(row.depreciation_start_date) < getdate(self.purchase_date): frappe.throw(_("Depreciation Row {0}: Next Depreciation Date cannot be before Purchase Date") .format(row.idx)) if row.depreciation_start_date and getdate(row.depreciation_start_date) < getdate(self.available_for_use_date): frappe.throw(_("Depreciation Row {0}: Next Depreciation Date cannot be before Available-for-use Date") .format(row.idx)) def set_accumulated_depreciation(self, ignore_booked_entry = False): straight_line_idx = [d.idx for d in self.get("schedules") if d.depreciation_method == 'Straight Line'] finance_books = [] for i, d in enumerate(self.get("schedules")): if ignore_booked_entry and d.journal_entry: continue if d.finance_book_id not in finance_books: accumulated_depreciation = flt(self.opening_accumulated_depreciation) value_after_depreciation = flt(self.get_value_after_depreciation(d.finance_book_id)) finance_books.append(d.finance_book_id) depreciation_amount = flt(d.depreciation_amount, d.precision("depreciation_amount")) value_after_depreciation -= flt(depreciation_amount) if straight_line_idx and i == max(straight_line_idx) - 1: book = self.get('finance_books')[cint(d.finance_book_id) - 1] depreciation_amount += flt(value_after_depreciation - flt(book.expected_value_after_useful_life), d.precision("depreciation_amount")) d.depreciation_amount = depreciation_amount accumulated_depreciation += d.depreciation_amount d.accumulated_depreciation_amount = flt(accumulated_depreciation, d.precision("accumulated_depreciation_amount")) def get_value_after_depreciation(self, idx): return flt(self.get('finance_books')[cint(idx)-1].value_after_depreciation) def get_depreciation_amount(self, depreciable_value, total_number_of_depreciations, row): if row.depreciation_method in ["Straight Line", "Manual"]: amt = (flt(self.gross_purchase_amount) - flt(row.expected_value_after_useful_life) - flt(self.opening_accumulated_depreciation)) depreciation_amount = amt * row.rate_of_depreciation else: depreciation_amount = flt(depreciable_value) * (flt(row.rate_of_depreciation) / 100) value_after_depreciation = flt(depreciable_value) - depreciation_amount if value_after_depreciation < flt(row.expected_value_after_useful_life): depreciation_amount = flt(depreciable_value) - flt(row.expected_value_after_useful_life) return depreciation_amount def get_depreciation_amount_prorata_temporis(self, depreciable_value, row, start_date=None, end_date=None): if start_date and end_date: prorata_temporis = min(abs(flt(date_diff(str(end_date), str(start_date)))) / flt(frappe.db.get_value("Asset Settings", None, "number_of_days_in_fiscal_year")), 1) else: prorata_temporis = 1 if row.depreciation_method in ("Straight Line", "Manual"): depreciation_amount = (flt(row.value_after_depreciation) - flt(row.expected_value_after_useful_life)) / (cint(row.total_number_of_depreciations) - cint(self.number_of_depreciations_booked)) * prorata_temporis else: depreciation_amount = self.get_depreciation_amount(depreciable_value, row.total_number_of_depreciations, row) return depreciation_amount def validate_expected_value_after_useful_life(self): for row in self.get('finance_books'): accumulated_depreciation_after_full_schedule = [d.accumulated_depreciation_amount for d in self.get("schedules") if cint(d.finance_book_id) == row.idx] if accumulated_depreciation_after_full_schedule: accumulated_depreciation_after_full_schedule = max(accumulated_depreciation_after_full_schedule) asset_value_after_full_schedule = flt(flt(self.gross_purchase_amount) - flt(accumulated_depreciation_after_full_schedule), self.precision('gross_purchase_amount')) if row.expected_value_after_useful_life < asset_value_after_full_schedule: frappe.throw(_("Depreciation Row {0}: Expected value after useful life must be greater than or equal to {1}") .format(row.idx, asset_value_after_full_schedule)) def validate_cancellation(self): if self.status not in ("Submitted", "Partially Depreciated", "Fully Depreciated"): frappe.throw(_("Asset cannot be cancelled, as it is already {0}").format(self.status)) if self.purchase_invoice: frappe.throw(_("Please cancel Purchase Invoice {0} first").format(self.purchase_invoice)) if self.purchase_receipt: frappe.throw(_("Please cancel Purchase Receipt {0} first").format(self.purchase_receipt)) def delete_depreciation_entries(self): for d in self.get("schedules"): if d.journal_entry: frappe.get_doc("Journal Entry", d.journal_entry).cancel() d.db_set("journal_entry", None) self.db_set("value_after_depreciation", (flt(self.gross_purchase_amount) - flt(self.opening_accumulated_depreciation))) def set_status(self, status=None): '''Get and update status''' if not status: status = self.get_status() self.db_set("status", status) def get_status(self): '''Returns status based on whether it is draft, submitted, scrapped or depreciated''' if self.docstatus == 0: status = "Draft" elif self.docstatus == 1: status = "Submitted" if self.journal_entry_for_scrap: status = "Scrapped" elif self.finance_books: idx = self.get_default_finance_book_idx() or 0 expected_value_after_useful_life = self.finance_books[idx].expected_value_after_useful_life value_after_depreciation = self.finance_books[idx].value_after_depreciation if flt(value_after_depreciation) <= expected_value_after_useful_life: status = "Fully Depreciated" elif flt(value_after_depreciation) < flt(self.gross_purchase_amount): status = 'Partially Depreciated' elif self.docstatus == 2: status = "Cancelled" return status def get_default_finance_book_idx(self): if not self.get('default_finance_book') and self.company: self.default_finance_book = erpnext.get_default_finance_book(self.company) if self.get('default_finance_book'): for d in self.get('finance_books'): if d.finance_book == self.default_finance_book: return cint(d.idx) - 1 def update_stock_movement(self): asset_movement = frappe.db.get_value('Asset Movement', {'asset': self.name, 'reference_name': self.purchase_receipt, 'docstatus': 0}, 'name') if asset_movement: doc = frappe.get_doc('Asset Movement', asset_movement) doc.submit() def make_gl_entries(self): gl_entries = [] if ((self.purchase_receipt or (self.purchase_invoice and frappe.db.get_value('Purchase Invoice', self.purchase_invoice, 'update_stock'))) and self.purchase_receipt_amount and self.available_for_use_date <= nowdate()): fixed_aseet_account = get_asset_category_account(self.name, 'fixed_asset_account', asset_category = self.asset_category, company = self.company) cwip_account = get_asset_account("capital_work_in_progress_account", self.name, self.asset_category, self.company) gl_entries.append(self.get_gl_dict({ "account": cwip_account, "against": fixed_aseet_account, "remarks": self.get("remarks") or _("Accounting Entry for Asset"), "posting_date": self.available_for_use_date, "credit": self.purchase_receipt_amount, "credit_in_account_currency": self.purchase_receipt_amount, "cost_center": self.cost_center })) gl_entries.append(self.get_gl_dict({ "account": fixed_aseet_account, "against": cwip_account, "remarks": self.get("remarks") or _("Accounting Entry for Asset"), "posting_date": self.available_for_use_date, "debit": self.purchase_receipt_amount, "debit_in_account_currency": self.purchase_receipt_amount, "cost_center": self.cost_center })) if gl_entries: from erpnext.accounts.general_ledger import make_gl_entries make_gl_entries(gl_entries) self.db_set('booked_fixed_asset', 1) def get_depreciation_rate(self, args, on_validate=False): if isinstance(args, string_types): args = json.loads(args) number_of_depreciations_booked = 0 if self.is_existing_asset: number_of_depreciations_booked = self.number_of_depreciations_booked float_precision = cint(frappe.db.get_default("float_precision")) or 2 tot_no_of_depreciation = flt(args.get("total_number_of_depreciations")) - flt(number_of_depreciations_booked) if args.get("depreciation_method") in ["Straight Line", "Manual"]: return 1.0 / tot_no_of_depreciation if args.get("depreciation_method") == 'Double Declining Balance': return 200.0 / args.get("total_number_of_depreciations") if args.get("depreciation_method") == "Written Down Value": if args.get("rate_of_depreciation") and on_validate: return args.get("rate_of_depreciation") no_of_years = flt(args.get("total_number_of_depreciations") * flt(args.get("frequency_of_depreciation"))) / 12 value = flt(args.get("expected_value_after_useful_life")) / flt(self.gross_purchase_amount) # square root of flt(salvage_value) / flt(asset_cost) depreciation_rate = math.pow(value, 1.0/flt(no_of_years, 2)) return 100 * (1 - flt(depreciation_rate, float_precision)) def update_maintenance_status(): assets = frappe.get_all('Asset', filters = {'docstatus': 1, 'maintenance_required': 1}) for asset in assets: asset = frappe.get_doc("Asset", asset.name) if frappe.db.exists('Asset Maintenance Task', {'parent': asset.name, 'next_due_date': today()}): asset.set_status('In Maintenance') if frappe.db.exists('Asset Repair', {'asset_name': asset.name, 'repair_status': 'Pending'}): asset.set_status('Out of Order') def make_post_gl_entry(): if is_cwip_accounting_disabled(): return assets = frappe.db.sql_list(""" select name from `tabAsset` where ifnull(booked_fixed_asset, 0) = 0 and available_for_use_date = %s""", nowdate()) for asset in assets: doc = frappe.get_doc('Asset', asset) doc.make_gl_entries() def get_asset_naming_series(): meta = frappe.get_meta('Asset') return meta.get_field("naming_series").options @frappe.whitelist() def make_purchase_invoice(asset, item_code, gross_purchase_amount, company, posting_date): pi = frappe.new_doc("Purchase Invoice") pi.company = company pi.currency = frappe.get_cached_value('Company', company, "default_currency") pi.set_posting_time = 1 pi.posting_date = posting_date pi.append("items", { "item_code": item_code, "is_fixed_asset": 1, "asset": asset, "expense_account": get_asset_category_account(asset, 'fixed_asset_account'), "qty": 1, "price_list_rate": gross_purchase_amount, "rate": gross_purchase_amount }) pi.set_missing_values() return pi @frappe.whitelist() def make_sales_invoice(asset, item_code, company, serial_no=None): si = frappe.new_doc("Sales Invoice") si.company = company si.currency = frappe.get_cached_value('Company', company, "default_currency") disposal_account, depreciation_cost_center = get_disposal_account_and_cost_center(company) si.append("items", { "item_code": item_code, "is_fixed_asset": 1, "asset": asset, "income_account": disposal_account, "serial_no": serial_no, "cost_center": depreciation_cost_center, "qty": 1 }) si.set_missing_values() return si @frappe.whitelist() def create_asset_maintenance(asset, item_code, item_name, asset_category, company): asset_maintenance = frappe.new_doc("Asset Maintenance") asset_maintenance.update({ "asset_name": asset, "company": company, "item_code": item_code, "item_name": item_name, "asset_category": asset_category }) return asset_maintenance @frappe.whitelist() def create_asset_adjustment(asset, asset_category, company): asset_maintenance = frappe.new_doc("Asset Value Adjustment") asset_maintenance.update({ "asset": asset, "company": company, "asset_category": asset_category }) return asset_maintenance @frappe.whitelist() def transfer_asset(args): args = json.loads(args) if args.get('serial_no'): args['quantity'] = len(args.get('serial_no').split('\n')) movement_entry = frappe.new_doc("Asset Movement") movement_entry.update(args) movement_entry.insert() movement_entry.submit() frappe.db.commit() frappe.msgprint(_("Asset Movement record {0} created").format("<a href='#Form/Asset Movement/{0}'>{0}</a>".format(movement_entry.name))) @frappe.whitelist() def get_item_details(item_code, asset_category): asset_category_doc = frappe.get_doc('Asset Category', asset_category) books = [] for d in asset_category_doc.finance_books: books.append({ 'finance_book': d.finance_book, 'depreciation_method': d.depreciation_method, 'total_number_of_depreciations': d.total_number_of_depreciations, 'frequency_of_depreciation': d.frequency_of_depreciation, 'start_date': nowdate() }) return books def get_asset_account(account_name, asset=None, asset_category=None, company=None): account = None if asset: account = get_asset_category_account(asset, account_name, asset_category = asset_category, company = company) if not account: account = frappe.get_cached_value('Company', company, account_name) if not account: frappe.throw(_("Set {0} in asset category {1} or company {2}") .format(account_name.replace('_', ' ').title(), asset_category, company)) return account @frappe.whitelist() def make_journal_entry(asset_name): asset = frappe.get_doc("Asset", asset_name) fixed_asset_account, accumulated_depreciation_account, depreciation_expense_account = \ get_depreciation_accounts(asset) depreciation_cost_center, depreciation_series = frappe.db.get_value("Company", asset.company, ["depreciation_cost_center", "series_for_depreciation_entry"]) depreciation_cost_center = asset.cost_center or depreciation_cost_center je = frappe.new_doc("Journal Entry") je.voucher_type = "Depreciation Entry" je.naming_series = depreciation_series je.company = asset.company je.remark = "Depreciation Entry against asset {0}".format(asset_name) je.append("accounts", { "account": depreciation_expense_account, "reference_type": "Asset", "reference_name": asset.name, "cost_center": depreciation_cost_center }) je.append("accounts", { "account": accumulated_depreciation_account, "reference_type": "Asset", "reference_name": asset.name }) return je def is_cwip_accounting_disabled(): return cint(frappe.db.get_single_value("Asset Settings", "disable_cwip_accounting"))
gpl-3.0
-5,258,987,987,461,458,000
38.320598
166
0.715137
false
3.121588
false
false
false
AISpace2/AISpace2
aispace2/jupyter/csp/csp.py
1
23307
import threading from functools import partial from time import sleep from ipywidgets import register from traitlets import Bool, Dict, Float, Instance, Unicode from aipython.cspProblem import CSP from ... import __version__ from ..stepdomwidget import ReturnableThread, StepDOMWidget from .cspjsonbridge import (csp_to_json, generate_csp_graph_mappings, json_to_csp) @register class Displayable(StepDOMWidget): """A Jupyter widget for visualizing constraint satisfaction problems (CSPs). Handles arc consistency, domain splitting, and stochastic local search (SLS). See the accompanying frontend file: `js/src/csp/CSPVisualizer.ts` """ _view_name = Unicode('CSPViewer').tag(sync=True) _model_name = Unicode('CSPViewerModel').tag(sync=True) _view_module = Unicode('aispace2').tag(sync=True) _model_module = Unicode('aispace2').tag(sync=True) _view_module_version = Unicode(__version__).tag(sync=True) _model_module_version = Unicode(__version__).tag(sync=True) # The CSP that is synced as a graph to the frontend. graph = Instance(klass=CSP, allow_none=True).tag( sync=True, to_json=csp_to_json, from_json=json_to_csp) # Constrols whether the auto arc consistency button will show up in the widget (will not in SLS) need_AC_button = Bool(True).tag(sync=True) # Tracks if the visualization has been rendered at least once in the front-end. See the @visualize decorator. _previously_rendered = Bool(False).tag(sync=True) wait_for_render = Bool(True).tag(sync=True) def __init__(self): super().__init__() self.visualizer = self ############################## ### SLS-specific variables ### ############################## # Tracks if this is the first conflict reported. # If so, will also compute non-conflicts to highlight green the first time around. self._sls_first_conflict = True ########################################## ### Arc consistency-specific variables ### ########################################## # A reference to the arc the user has selected for arc consistency. A tuple of (variable name, Constraint instance). self._selected_arc = None # True if the user has selected an arc to perform arc-consistency on. Otherwise, an arc is automatically chosen. self._has_user_selected_arc = False # True if the algorithm is at a point where an arc is waiting to be chosen. Used to filter out extraneous clicks otherwise. self._is_waiting_for_arc_selection = False ########################################### ### Domain splitting-specific variables ### ########################################## # A reference to the variable the user has selected for domain splitting. self._selected_var = None # True if the user has selected a var to perform domain splitting on. Otherwise, a variable is automatically chosen. self._has_user_selected_var = False # True if the algorithm is at a point where a var is waiting to be chosen. Used to filter out extraneous clicks otherwise. self._is_waiting_for_var_selection = False # The domain the user has chosen as their first split for `_selected_var`. self._domain_split = None # self.graph = self.csp self.graph = CSP(self.csp.domains, self.csp.constraints, self.csp.positions) (self._domain_map, self._edge_map) = generate_csp_graph_mappings(self.csp) self._initialize_controls() def wait_for_arc_selection(self, to_do): """Pauses execution until an arc has been selected and returned. If the algorithm is running in auto mode, an arc is returned immediately. Otherwise, this function blocks until an arc is selected by the user. Args: to_do (set): A set of arcs to choose from. This set will be modified. Returns: (string, Constraint): A tuple (var_name, constraint) that represents an arc from `to_do`. """ # Running in Auto mode. Don't block! if self.max_display_level == 1 or self.max_display_level == 0: return to_do.pop() self._is_waiting_for_arc_selection = True self._block_for_user_input.wait() if self._has_user_selected_arc: self._has_user_selected_arc = False to_do.discard(self._selected_arc) return self._selected_arc # User did not select. Return random arc. return to_do.pop() def wait_for_var_selection(self, iter_var): """Pauses execution until a variable has been selected and returned. If the user steps instead of clicking on a variable, a random variable is returned. Otherwise, the variable clicked by the user is returned, but only if it is a variable that can be split on. Otherwise, this function continues waiting. Args: iter_var (iter): Variables that the user is allowed to split on. Returns: (string): The variable to split on. """ # Running in Auto mode. Split in half! if self.max_display_level == 1: return list(iter_var)[0] # Running in Auto Arc Consistency mode. Change to normal! if self.max_display_level == 0: self.max_display_level = 2 iter_var = list(iter_var) self._send_highlight_splittable_nodes_action(iter_var) self._is_waiting_for_var_selection = True self._block_for_user_input.wait() while (self.max_display_level != 1 and not self._has_user_selected_var): self._block_for_user_input.wait() if self._has_user_selected_var: self._has_user_selected_var = False if self._selected_var in iter_var: return self._selected_var else: return self.wait_for_var_selection(iter_var) self._is_waiting_for_var_selection = False return iter_var[0] def choose_domain_partition(self, domain, var): """Pauses execution until a domain has been split on. If the user chooses to not select a domain (clicks 'Cancel'), splits the domain in half. Otherwise, the subset of the domain chosen by the user is used as the initial split. Args: domain (set): Domain of the variable being split on. Returns: (set): A subset of the domain to be split on first. """ # Running in Auto mode. Split in half! if self.max_display_level == 1: split = len(domain) // 2 dom1 = set(list(domain)[:split]) dom2 = domain - dom1 return dom1, dom2 if self._domain_split is None: # Split in half split = len(domain) // 2 dom1 = set(list(domain)[:split]) dom2 = domain - dom1 return dom1, dom2 # make sure type of chosen domain matches original domain if all(isinstance(n, int) for n in domain): number_domain = set() for n in self._domain_split: number_domain.add(int(n)) self._domain_split = number_domain split1 = set(self._domain_split) split2 = set(domain) - split1 return split1, split2 def handle_custom_msgs(self, _, content, buffers=None): super().handle_custom_msgs(None, content, buffers) event = content.get('event', '') if event == 'arc:click': """ Expects a dictionary containing: varName (string): The name of the variable connected to this arc. constId (string): The id of the constraint connected to this arc. """ if self._is_waiting_for_arc_selection: var_name = content.get('varName') const = self.csp.constraints[content.get('constId')] self.max_display_level = 2 self._selected_arc = (var_name, const) self._has_user_selected_arc = True self._block_for_user_input.set() self._block_for_user_input.clear() self._is_waiting_for_arc_selection = False elif event == 'var:click': """ Expects a dictionary containing: varName (string): The name of the variable to split on. """ if not self._is_waiting_for_var_selection and content.get('varType') == 'csp:variable': self.send({'action': 'chooseDomainSplitBeforeAC'}) elif event == 'domain_split': """ Expects a dictionary containing: domain (string[]|None): An array of the elements in the domain to first split on, or None if no choice is made. In this case, splits the domain in half as a default. """ domain = content.get('domain') var_name = content.get('var') self._selected_var = var_name self._domain_split = domain self._has_user_selected_var = True self._block_for_user_input.set() self._block_for_user_input.clear() self._is_waiting_for_var_selection = False elif event == 'reset': """ Reset the algorithm and graph """ # Before resetting backend, freeze the execution of queued function to avoid undetermined state self._pause() # Wait until freezeing completed sleep(0.2) # Reset algorithm related variables user_sleep_time = getattr(self, 'sleep_time', None) super().__init__() self.sleep_time = user_sleep_time self.visualizer = self self._sls_first_conflict = True self._selected_arc = None self._has_user_selected_arc = False self._is_waiting_for_arc_selection = False self._selected_var = None self._has_user_selected_var = False self._is_waiting_for_var_selection = False self._domain_split = None self.graph = CSP(self.csp.domains, self.csp.constraints, self.csp.positions) (self._domain_map, self._edge_map) = generate_csp_graph_mappings(self.csp) # Tell frontend that it is ready to reset frontend graph and able to restart algorithm self.send({'action': 'frontReset'}) # Terminate current running thread if self._thread: self.stop_thread(self._thread) elif event == 'initial_render': queued_func = getattr(self, '_queued_func', None) # Run queued function after we know the frontend view exists if queued_func: func = queued_func['func'] args = queued_func['args'] kwargs = queued_func['kwargs'] self._previously_rendered = True self._thread = ReturnableThread( target=func, args=args, kwargs=kwargs) self._thread.start() elif event == 'update_sleep_time': self.sleep_time = content.get('sleepTime') def display(self, level, *args, **kwargs): if self.wait_for_render is False: return if self._request_backtrack is True: return should_wait = True if args[0] == 'Performing AC with domains': should_wait = False domains = args[1] vars_to_change = [] domains_to_change = [] for var, domain in domains.items(): vars_to_change.append(var) domains_to_change.append(domain) self._send_set_domains_action(vars_to_change, domains_to_change) elif args[0] == 'Domain pruned': variable = args[2] domain = args[4] constraint = args[6] self._send_set_domains_action(variable, domain) self._send_highlight_arcs_action( (variable, constraint), style='bold', colour='green') elif args[0] == "Processing arc (": variable = args[1] constraint = args[3] self._send_highlight_arcs_action( (variable, constraint), style='bold', colour=None) elif args[0] == "Arc: (" and args[4] == ") is inconsistent": variable = args[1] constraint = args[3] self._send_highlight_arcs_action( (variable, constraint), style='bold', colour='red') elif args[0] == "Arc: (" and args[4] == ") now consistent": variable = args[1] constraint = args[3] self._send_highlight_arcs_action( (variable, constraint), style='normal', colour='green') should_wait = False elif (args[0] == "Adding" or args[0] == "New domain. Adding") and args[2] == "to to_do.": if args[1] != "nothing": arcs = list(args[1]) arcs_to_highlight = [] for arc in arcs: arcs_to_highlight.append((arc[0], arc[1])) self._send_highlight_arcs_action( arcs_to_highlight, style='normal', colour='blue') elif args[0] == "You can now split domain. Click on a variable whose domain has more than 1 value.": self.send({'action': 'chooseDomainSplit'}) elif args[0] == "... splitting": self.send( {'action': 'setOrder', 'var': args[1], 'domain': args[3], 'other': args[5]}) elif args[0] == "Solution found:": if self.max_display_level == 0: self.max_display_level = 2 solString = "" for var in args[1]: solString += var + "=" + str(args[1][var]) + ", " solString = solString[:-2] self.send({'action': 'setPreSolution', 'solution': solString}) args += ("\nClick Fine Step, Step, Auto Arc Consistency, Auto Solve to find solutions in other domains.", ) elif args[0] == "Solving new domain with": self.send( {'action': 'setSplit', 'domain': args[2], 'var': args[1]}) elif args[0] == "Click Fine Step, Step, Auto Arc Consistency, Auto Solve to find solutions in other domains.": if self.max_display_level == 0: self.max_display_level = 2 self.send({'action': 'noSolution'}) ############################# ### SLS-specific displays ### ############################# elif args[0] == "Initial assignment": assignment = args[1] for (key, val) in assignment.items(): self._send_set_domains_action(key, [val]) elif args[0] == "Assigning" and args[2] == "=": var = args[1] domain = args[3] self._send_set_domains_action(var, [domain]) self._send_highlight_nodes_action(var, "blue") elif args[0] == "Checking": node = args[1] self._send_highlight_nodes_action(node, "blue") elif args[0] == "Still inconsistent": const = args[1] nodes_to_highlight = {const} arcs_to_highlight = [] for var in const.scope: nodes_to_highlight.add(var) arcs_to_highlight.append((var, const)) self._send_highlight_nodes_action(nodes_to_highlight, "red") self._send_highlight_arcs_action(arcs_to_highlight, "bold", "red") elif args[0] == "Still consistent": const = args[1] nodes_to_highlight = {const} arcs_to_highlight = [] for var in const.scope: nodes_to_highlight.add(var) arcs_to_highlight.append((var, const)) self._send_highlight_nodes_action(nodes_to_highlight, "green") self._send_highlight_arcs_action( arcs_to_highlight, "bold", "green") elif args[0] == "Became consistent": const = args[1] nodes_to_highlight = {const} arcs_to_highlight = [] for var in const.scope: nodes_to_highlight.add(var) arcs_to_highlight.append((var, const)) self._send_highlight_nodes_action(nodes_to_highlight, "green") self._send_highlight_arcs_action( arcs_to_highlight, "bold", "green") elif args[0] == "Became inconsistent": const = args[1] nodes_to_highlight = {const} arcs_to_highlight = [] for var in const.scope: nodes_to_highlight.add(var) arcs_to_highlight.append((var, const)) self._send_highlight_nodes_action(nodes_to_highlight, "red") self._send_highlight_arcs_action(arcs_to_highlight, "bold", "red") elif args[0] == "AC done. Reduced domains": should_wait = False elif args[0] == "Conflicts:": conflicts = args[1] conflict_nodes_to_highlight = set() conflict_arcs_to_highlight = [] non_conflict_nodes_to_highlight = set() non_conflict_arcs_to_highlight = [] if self._sls_first_conflict: # Highlight all non-conflicts green self._sls_first_conflict = False not_conflicts = set(self.csp.constraints) - conflicts for not_conflict in not_conflicts: non_conflict_nodes_to_highlight.add(not_conflict) for node in not_conflict.scope: non_conflict_nodes_to_highlight.add(node) non_conflict_arcs_to_highlight.append( (node, not_conflict)) self._send_highlight_nodes_action( non_conflict_nodes_to_highlight, "green") self._send_highlight_arcs_action( non_conflict_arcs_to_highlight, "bold", "green") # Highlight all conflicts red for conflict in conflicts: conflict_nodes_to_highlight.add(conflict) for node in conflict.scope: conflict_nodes_to_highlight.add(node) conflict_arcs_to_highlight.append((node, conflict)) self._send_highlight_nodes_action( conflict_nodes_to_highlight, "red") self._send_highlight_arcs_action( conflict_arcs_to_highlight, "bold", "red") super().display(level, *args, **dict(kwargs, should_wait=should_wait)) def _send_highlight_nodes_action(self, vars, colour): """Sends a message to the front-end visualization to highlight nodes. Args: vars (string|string[]): The name(s) of the variables to highlight. colour (string|None): A HTML colour string for the stroke of the node. Passing in None will keep the existing stroke of the node. """ # We don't want to check if it is iterable because a string is iterable if not isinstance(vars, list) and not isinstance(vars, set): vars = [vars] nodeIds = [] for var in vars: nodeIds.append(self._domain_map[var]) self.send({ 'action': 'highlightNodes', 'nodeIds': nodeIds, 'colour': colour }) def _send_highlight_splittable_nodes_action(self, vars): """Sends a message to the front-end visualization to highlight Splittable nodes when users can split domain. Args: vars (string|string[]): The name(s) of the splittable variables to highlight. """ # We don't want to check if it is iterable because a string is iterable if not isinstance(vars, list) and not isinstance(vars, set): vars = [vars] nodeIds = [] for var in vars: nodeIds.append(self._domain_map[var]) self.send({ 'action': 'highlightSplittableNodes', 'nodeIds': nodeIds, }) def _send_highlight_arcs_action(self, arcs, style='normal', colour=None): """Sends a message to the front-end visualization to highlight arcs. Args: arcs ((string, Constraint)|(string, Constraint)[]): Tuples of (variable name, Constraint instance) that form an arc. For convenience, you do not need to pass a list of tuples of you only have one to highlight. style ('normal'|'bold'): Style of the highlight. Applied to every arc passed in. colour (string|None): A HTML colour string for the colour of the line. Passing in None will keep the existing colour of the arcs. """ if not isinstance(arcs, list): arcs = [arcs] arc_ids = [] for arc in arcs: arc_ids.append(self._edge_map[arc]) self.send({ 'action': 'highlightArcs', 'arcIds': arc_ids, 'style': style, 'colour': colour }) def _send_set_domains_action(self, vars, domains): """Sends a message to the front-end visualization to set the domains of variables. Args: vars (string|string[]): The name of the variable(s) whose domain should be changed. domains (List[int|string]|List[List[int|string]]): The updated domain of the variable(s). If vars is an array, then domain is an array of domains, in the same order. """ is_single_var = False if not isinstance(vars, list): vars = [vars] is_single_var = True self.send({ 'action': 'setDomains', 'nodeIds': [self._domain_map[var] for var in vars], 'domains': [list(domain) for domain in domains] if not is_single_var else [domains] }) def visualize(func_to_delay): """Enqueues a function that does not run until the Jupyter widget has rendered. Once the Jupyter widget has rendered once, further invocation of the wrapped function behave as if unwrapped. Necessary because otherwise, the function runs (and blocks when display is called) immediately, before the view has a chance to render (and so there is no way to unblock using the step buttons!) Args: func_to_delay (function): The function to delay. Returns: The original function, wrapped such that it will automatically run when the Jupyter widget is rendered. """ def wrapper(self, *args, **kwargs): if self._previously_rendered is False and self.wait_for_render: self._queued_func = { 'func': partial(func_to_delay, self), 'args': args, 'kwargs': kwargs } else: return func_to_delay(self, *args, **kwargs) return wrapper
gpl-3.0
-6,535,950,557,674,089,000
38.105705
131
0.562449
false
4.249225
false
false
false
lukefrasera/cs775Homework
hw_002/scripts/gaussian_classify.py
1
9070
#!/usr/bin/env python import numpy as np from numpy import linalg as la import matplotlib.pyplot as plt import argparse import os import pdb from scipy import spatial import time import operator ''' Python Program demonstrating the use of a gaussian classifier. ''' #KNNCLassifier returns a tuple of the K closest feature vectors def KNNSearch(k, features, test_data): test_data_classification = [] for test_index, test_element in enumerate(test_data): if test_element == []: continue neighborDistance = [] for feature_index,feature in enumerate(features): try: distance = la.norm(feature-test_element) except ValueError: pdb.set_trace() neighborDistance.append([distance, feature_index]) neighborDistance = sorted(neighborDistance, key=lambda row: row[0], reverse=True) #pdb.set_trace() test_data_classification.append(np.matrix(neighborDistance[0:k][1])) pdb.set_trace() return test_data_classification def KNNSearchFast(k, features, test_data): t0 = time.time() tree = spatial.KDTree(features) t1 = time.time() result = tree.query(test_data, k) t2 = time.time() print "Build time: %f, query time: %f" % (t1-t0, t2-t1) return result def KNNClassify(train_classification, test_neighbors): test_classification = [] for sample in test_neighbors[1]: votes = [0 for x in xrange(10)] try: for neighbor in sample: sample_class = int(train_classification[neighbor]) votes[sample_class] += 1 except TypeError: #catch the case where K=1 sample_class = int(train_classification[sample]) votes[sample_class] = 1 classification = max(enumerate(votes), key=operator.itemgetter(1))[0] test_classification.append(classification) return test_classification def LSESearch(features,classification, test_data): features = np.matrix(features) classification = np.matrix(classification).T test_data = np.matrix(test_data) filter = la.inv(features.T * features) * features.T * classification test_data_classification = [] classification = (test_data * filter) classification[classification < 0] = -1 classification[classification >=0] = 1 return classification def ParseData(raw_data, class1, class2): raw_data = raw_data.rstrip('\n') raw_data_list = raw_data.split('\n') data_list = list() for raw_data_point in raw_data_list: raw_data_point = raw_data_point.rstrip() point = raw_data_point.split(' ') data_list.append([float(x) for x in point]) data_list.pop() data_list_np = np.array(data_list) mask = (data_list_np[:,0] == class1) + (data_list_np[:,0] == class2) data_list_np = data_list_np[mask] return data_list_np def GaussianBuild(features, classification, classa, classb): pdb.set_trace() classaFeaturesMask = (classification == classa) classbFeaturesMask = (classification == classb) aFeatures = np.array(features)[classaFeaturesMask].T bFeatures = np.array(features)[classbFeaturesMask].T print 'Of ',features.shape,'Elements, ',aFeatures.shape,' are of class A, ',bFeatures.shape,' are of class B' aCovMat = np.cov(aFeatures) aMeanMat = np.mean(aFeatures,1) bCovMat = np.cov(bFeatures) bMeanMat = np.mean(bFeatures,1) return [aCovMat,aMeanMat,bCovMat,bMeanMat] def ComputeGaussianProbability(covMat, meanMat, sample): meanMat = np.matrix(meanMat).T sample = sample.T #sample = meanMat nonInvertible = True eyeScale = 0.0 while nonInvertible: nonInvertible = False try: covMatInverse = la.inv(covMat + np.eye(covMat.shape[0])*eyeScale) except la.linalg.LinAlgError: nonInvertible = True eyeScale = eyeScale + 0.0001 if eyeScale > 0.002: print 'Set lambda to ',eyeScale,' to make covMat invertible' probability = 1.0/(np.sqrt(la.norm(2*np.pi*covMat))) probability *= np.exp(-0.5*(sample-meanMat).T*covMatInverse*(sample-meanMat)) return probability def GaussianClassify(aCovMat, aMeanMat, bCovMat, bMeanMat, test_data): #for each sample, compute the probability of it belonging to each class for sample in test_data: #pdb.set_trace() probability_a = ComputeGaussianProbability(aCovMat, aMeanMat, sample) probability_b = ComputeGaussianProbability(bCovMat, bMeanMat, sample) print 'Sample P(A)=',probability_a,'Sample P(B)=',probability_b def main(): parser = argparse.ArgumentParser(description='Process input') parser.add_argument('-t', '--training_file', type=str, help='submit data to train against') parser.add_argument('-f', '--testing_file', type=str, help='submit data to test the trained model against') parser.add_argument('-s', '--save_model', type=str, help='save out trained model') parser.add_argument('-r', '--read_model', type=str, help='read in trained model') parser.add_argument('-k', '--k_neighbors', type=int, help='number of neighbors to find') parser.add_argument('-a', '--classa', type=int, help='class to test/train on') parser.add_argument('-b', '--classb', type=int, help='class to test/train on') parser.add_argument('-m', '--method', type=int, help='0=KNN,1=LSE,2=Gauss') args = parser.parse_args() # Check if Arguments allow execution if (not args.training_file) and (not args.read_model): print "Error: No training Data or model present!" return -1 if args.training_file and args.read_model: print "Error: cannot read model and traing data at the same time!" return -1 if args.training_file: # trainagainst training file if not os.path.isfile(args.training_file): print "Error: Training file doesn't exist!" return -1 # train with open(args.training_file) as file: # read file contents raw_data = file.read() # parse data data = ParseData(raw_data, args.classa, args.classb) # train on data classification = data[:,0] features = np.matrix(data[:,1:]) if args.testing_file: with open(args.testing_file) as test_file: raw_test_data = test_file.read() test_data = ParseData(raw_test_data, args.classa, args.classb) test_data_truth = test_data[:,0] test_data = np.matrix(test_data[:,1:]) if args.method == 0: #Do KNN classification nearest_neighbors = KNNSearchFast(args.k_neighbors, features, test_data) print "Num training samples: %d, num test samples: %d" % (len(classification), len(test_data_truth)) classification = KNNClassify(classification, nearest_neighbors) #Compute the error rate errors = test_data_truth - classification misclassification_a = errors[errors == args.classa - args.classb] misclassification_b = errors[errors == args.classb - args.classa] mask = errors != 0 num_errors = sum(mask) print "Error rate: %f%%" % (float(num_errors)/len(test_data_truth)*100) print "Percentage of %d's misclassified: %f" % (args.classa, float(misclassification_a.size)/test_data_truth[test_data_truth == args.classa].size*100) print "Percentage of %d's misclassified: %f" % (args.classb, float(misclassification_b.size)/test_data_truth[test_data_truth == args.classb].size*100) if args.method == 1: #Do LSE classification #make classification binary classification[classification == args.classa] = -1 classification[classification == args.classb] = 1 #Perform the classficiation on the test data test_data_classification = LSESearch(features, classification, test_data) test_data_truth[test_data_truth == args.classa] = -1 test_data_truth[test_data_truth == args.classb] = 1 #Compute the error rate errors = test_data_classification.T - np.matrix(test_data_truth) misclassification_a = errors[errors == 2] misclassification_b = errors[errors == -2] num_errors = np.sum(np.absolute(errors)) print "Num training samples: %d, num test samples: %d" % (len(classification), len(test_data_truth)) print "Error rate: %f%%" % (float(num_errors)/len(test_data_truth)*100) print "Percentage of %d's misclassified: %f" % (args.classa, float(misclassification_a.size)/test_data_truth[test_data_truth == -1].size*100) print "Percentage of %d's misclassified: %f" % (args.classb, float(misclassification_b.size)/test_data_truth[test_data_truth == 1].size*100) if args.method == 2: #build the gaussian model [aCovMat, aMeanMat, bCovMat, bMeanMat] = GaussianBuild(features, classification, args.classa, args.classb) GaussianClassify(aCovMat, aMeanMat, bCovMat, bMeanMat, features) if __name__ == '__main__': main()
lgpl-3.0
-3,786,826,768,799,533,600
42.605769
159
0.653032
false
3.544353
true
false
false
srowe/xen-api
scripts/examples/smapiv2.py
8
9466
#!/usr/bin/env python import os, sys, time, socket, traceback log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw") pid = None def reopenlog(log_file): global log_f if log_f: log_f.close() if log_file: log_f = open(log_file, "aw") else: log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw") def log(txt): global log_f, pid if not pid: pid = os.getpid() t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime()) print >>log_f, "%s [%d] %s" % (t, pid, txt) log_f.flush() # Functions to construct SMAPI return types ################################# unit = [ "Success", "Unit" ] # Throw this to return an SR_BACKEND_FAILURE to the caller ################## class BackendError(Exception): def __init__(self, code, params): self.code = code self.params = params def __str__(self): return "BackendError(%s, %s)" % (self.code, ", ".join(self.params)) class Vdi_does_not_exist(Exception): def __init__(self, vdi): self.vdi = vdi def __str__(self): return "Vdi_does_not_exist(%s)" % self.vdi def vdi(vdi_info): # return ['Success', ['Vdi', {'vdi': location, 'virtual_size': str(virtual_size) }]] return ['Success', ['Vdi', vdi_info]] def vdis(vis): return ['Success', ['Vdis', vis]] def params(params): return ['Success', ['Params', params ]] def value(result): return { "Status": "Success", "Value": result } def backend_error(code, params): return [ "Failure", [ "Backend_error", code, params ] ] def internal_error(txt): return [ "Failure", "Internal_error", txt ] def vdi_does_not_exist(): return [ "Failure", "Vdi_does_not_exist" ] # Type-checking helper functions ############################################ vdi_info_types = { "vdi": type(""), "name_label": type(""), "name_description": type(""), "ty": type(""), "metadata_of_pool": type(""), "is_a_snapshot": type(True), "snapshot_time": type(""), "snapshot_of": type(""), "read_only": type(True), "cbt_enabled": type(True), "virtual_size": type(""), "physical_utilisation": type("") } def make_vdi_info(v): global vdi_info_types for k in vdi_info_types.keys(): t = vdi_info_types[k] if t == type(""): v[k] = str(v[k]) elif t == type(True): v[k] = str(v[k]).lower() == "true" else: raise (BackendError("make_vdi_info unknown type", [ str(t) ])) return v def vdi_info(v): global vdi_info_types for k in vdi_info_types.keys(): if k not in v: raise (BackendError("vdi_info missing key", [ k, repr(v) ])) t = vdi_info_types[k] if type(v[k]) <> t: raise (BackendError("vdi_info key has wrong type", [ k, str(t), str(type(v[k])) ])) return v def expect_none(x): if x <> None: raise (BackendError("type error", [ "None", repr(x) ])) def expect_long(x): if type(x) <> type(0L): raise (BackendError("type error", [ "long int", repr(x) ])) def expect_string(x): if type(x) <> type(""): raise (BackendError("type error", [ "string", repr(x) ])) # Well-known feature flags understood by xapi ############################## feature_sr_probe = "SR_PROBE" feature_sr_update = "SR_UPDATE" feature_sr_supports_local_caching = "SR_SUPPORTS_LOCAL_CACHING" feature_vdi_create = "VDI_CREATE" feature_vdi_destroy = "VDI_DESTROY" feature_vdi_attach = "VDI_ATTACH" feature_vdi_detach = "VDI_DETACH" feature_vdi_resize = "VDI_RESIZE" feature_vdi_resize_online = "VDI_RESIZE_ONLINE" feature_vdi_clone = "VDI_CLONE" feature_vdi_snapshot = "VDI_SNAPSHOT" feature_vdi_activate = "VDI_ACTIVATE" feature_vdi_deactivate = "VDI_DEACTIVATE" feature_vdi_update = "VDI_UPDATE" feature_vdi_introduce = "VDI_INTRODUCE" feature_vdi_generate_config = "VDI_GENERATE_CONFIG" feature_vdi_reset_on_boot = "VDI_RESET_ON_BOOT" # Unmarshals arguments and marshals results (including exceptions) ########## class Marshall: def __init__(self, x): self.x = x def query(self, args): result = self.x.query() return value(result) def sr_attach(self, args): result = self.x.sr_attach(args["task"], args["sr"], args["device_config"]) expect_none(result) return value(unit) def sr_detach(self, args): result = self.x.sr_detach(args["task"], args["sr"]) expect_none(result) return value(unit) def sr_destroy(self, args): result = self.x.sr_destroy(args["task"], args["sr"]) expect_none(result) return value(unit) def sr_scan(self, args): vis = self.x.sr_scan(args["task"], args["sr"]) result = map(lambda vi: vdi_info(vi), vis) return value(vdis(result)) def vdi_create(self, args): vi = self.x.vdi_create(args["task"], args["sr"], vdi_info(args["vdi_info"]), args["params"]) return value(vdi(vdi_info(vi))) def vdi_destroy(self, args): result = self.x.vdi_destroy(args["task"], args["sr"], args["vdi"]) expect_none(result) return value(unit) def vdi_attach(self, args): result = self.x.vdi_attach(args["task"], args["dp"], args["sr"], args["vdi"], args["read_write"]) expect_string(result) return value(params(result)) def vdi_activate(self, args): result = self.x.vdi_activate(args["task"], args["dp"], args["sr"], args["vdi"]) expect_none(result) return value(unit) def vdi_deactivate(self, args): result = self.x.vdi_deactivate(args["task"], args["dp"], args["sr"], args["vdi"]) expect_none(result) return value(unit) def vdi_detach(self, args): result = self.x.vdi_detach(args["task"], args["dp"], args["sr"], args["vdi"]) expect_none(result) return value(unit) def _dispatch(self, method, params): try: log("method = %s params = %s" % (method, repr(params))) args = params[0] if method == "query": return self.query(args) elif method == "SR.attach": return self.sr_attach(args) elif method == "SR.detach": return self.sr_detach(args) elif method == "SR.scan": return self.sr_scan(args) elif method == "VDI.create": return self.vdi_create(args) elif method == "VDI.destroy": return self.vdi_destroy(args) elif method == "VDI.attach": return self.vdi_attach(args) elif method == "VDI.activate": return self.vdi_activate(args) elif method == "VDI.deactivate": return self.vdi_deactivate(args) elif method == "VDI.detach": return self.vdi_detach(args) except BackendError, e: log("caught %s" % e) traceback.print_exc() return value(backend_error(e.code, e.params)) except Vdi_does_not_exist, e: log("caught %s" %e) return value(vdi_does_not_exist()) except Exception, e: log("caught %s" % e) traceback.print_exc() return value(internal_error(str(e))) # Helper function to daemonise ############################################## def daemonize(): def fork(): try: if os.fork() > 0: # parent sys.exit(0) except Exception, e: print >>sys.stderr, "fork() failed: %s" % e traceback.print_exc() raise fork() os.umask(0) os.chdir("/") os.setsid() fork() devnull = open("/dev/null", "r") os.dup2(devnull.fileno(), sys.stdin.fileno()) devnull = open("/dev/null", "aw") os.dup2(devnull.fileno(), sys.stdout.fileno()) os.dup2(devnull.fileno(), sys.stderr.fileno()) from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler # Server XMLRPC from any HTTP POST path ##################################### class RequestHandler(SimpleXMLRPCRequestHandler): rpc_paths = [] # SimpleXMLRPCServer with SO_REUSEADDR ###################################### class Server(SimpleXMLRPCServer): def __init__(self, ip, port): SimpleXMLRPCServer.__init__(self, (ip, port), requestHandler=RequestHandler) def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) SimpleXMLRPCServer.server_bind(self) # This is a hack to patch slow socket.getfqdn calls that # BaseHTTPServer (and its subclasses) make. # See: http://bugs.python.org/issue6085 # See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/ import BaseHTTPServer def _bare_address_string(self): host, port = self.client_address[:2] return '%s' % host BaseHTTPServer.BaseHTTPRequestHandler.address_string = \ _bare_address_string # Given an implementation, serve requests forever ########################### def start(impl, ip, port, daemon): if daemon: log("daemonising") daemonize() log("will listen on %s:%d" % (ip, port)) server = Server(ip, port) log("server registered on %s:%d" % (ip, port)) server.register_introspection_functions() # for debugging server.register_instance(Marshall(impl)) log("serving requests forever") server.serve_forever()
lgpl-2.1
-7,150,863,785,080,457,000
31.754325
105
0.570251
false
3.457268
false
false
false
Oksisane/RSS-Bot
Trolly-master/trolly/board.py
1
5227
""" Created on 8 Nov 2012 @author: plish """ from trolly.trelloobject import TrelloObject class Board(TrelloObject): """ Class representing a Trello Board """ def __init__(self, trello_client, board_id, name=''): super(Board, self).__init__(trello_client) self.id = board_id self.name = name self.base_uri = '/boards/' + self.id def get_board_information(self, query_params=None): """ Get all information for this board. Returns a dictionary of values. """ return self.fetch_json( uri_path='/boards/' + self.id, query_params=query_params or {} ) def get_lists(self): """ Get the lists attached to this board. Returns a list of List objects. """ lists = self.get_lists_json(self.base_uri) lists_list = [] for list_json in lists: lists_list.append(self.create_list(list_json)) return lists_list def get_cards(self): """ Get the cards for this board. Returns a list of Card objects. """ cards = self.get_cards_json(self.base_uri) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list def get_card(self, card_id): """ Get a Card for a given card id. Returns a Card object. """ card_json = self.fetch_json( uri_path=self.base_uri + '/cards/' + card_id ) return self.create_card(card_json) def get_members(self): """ Get Members attached to this board. Returns a list of Member objects. """ members = self.get_members_json(self.base_uri) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list def get_organisation(self): """ Get the Organisation for this board. Returns Organisation object. """ organisation_json = self.get_organisations_json(self.base_uri) return self.create_organisation(organisation_json) def update_board(self, query_params=None): """ Update this board's information. Returns a new board. """ board_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_board(board_json) def add_list(self, query_params=None): """ Create a list for a board. Returns a new List object. """ list_json = self.fetchJson( uri_path=self.base_uri + '/lists', http_method='POST', query_params=query_params or {} ) return self.create_list(list_json) def add_member_by_id(self, member_id, membership_type='normal'): """ Add a member to the board using the id. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. """ return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='PUT', query_params={ 'type': membership_type } ) def add_member(self, email, fullname, membership_type='normal'): """ Add a member to the board. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. """ return self.fetch_json( uri_path=self.base_uri + '/members', http_method='PUT', query_params={ 'email': email, 'fullName': fullname, 'type': membership_type } ) def remove_member(self, member_id): """ Remove a member from the organisation.Returns JSON of all members if successful or raises an Unauthorised exception if not. """ return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='DELETE' ) # Deprecated def getBoardInformation(self, query_params=None): return self.get_board_information(query_params) def getLists(self): return self.get_lists() def getCards(self): return self.get_cards() def getCard(self, card_id): return self.get_card(card_id) def getMembers(self): return self.get_members() def getOrganisation(self): return self.get_organisation() def updateBoard(self, query_params=None): return self.update_board(query_params) def addList(self, query_params=None): return self.add_list(query_params) def addMemberById(self, member_id, membership_type='normal'): return self.add_member_by_id(member_id, membership_type) def addMember(self, email, fullname, membership_type='normal'): return self.add_member(email, fullname, membership_type) def removeMember(self, member_id): return self.remove_member(member_id)
gpl-3.0
-6,671,452,477,042,218,000
27.71978
79
0.579682
false
3.999235
false
false
false
zevanzhao/TCCL-Code
ADF/ADFDFTB2xyz.py
1
1334
#!/usr/bin/env python #Time-stamp: <Last updated: Zhao,Yafan [email protected] 2013-11-25 20:20:08> """ A script to get the optimized geometry from ADF DFTB calculation out file. """ import sys, re if (len(sys.argv) < 2): print "Usage: ADFDFTB2xyz.py [adf.out]" exit(0) ADFOUT = sys.argv[1] inp = open(ADFOUT, "r") outlines = inp.readlines() #Search for the geometry section start = 0 end = 0 i = 0 for line in outlines: if (re.match(ur"^Geometry$", line)): #print "Find start at line %d" %(i) start = i elif (re.match(ur"^Total Energy \(hartree\)", line)): #print "Find end at line %d" %(i) end = i+1 i += 1 i = 0 geolines = outlines[start:end] #print "%s" % (geolines) mid = 0 #Search for the geometry section in angstrom for line in geolines: if (re.search(ur"angstrom", line)): mid = i+1 break i += 1 angstromgeo = geolines[mid:] #print "%s" % (angstromgeo) #print the geometry j = 0 xyzlines = "" energy = 0 for line in angstromgeo: array = line.split() if ( len(array) == 5): j += 1 xyzlines += "%s %s %s %s\n" % (array[1], array[2], array[3], array[4]) elif (re.match(ur"^Total Energy", line)): energy = array[3] movielines = "" movielines += "%d\n%s\n%s" % (j, energy, xyzlines) print "%s" % (movielines),
gpl-3.0
2,415,750,034,143,794,000
25.68
85
0.60045
false
2.700405
false
false
false
calee0219/Course
ML/lab2/lab2_vedio.py
1
1755
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import time import pandas as pd from sklearn.preprocessing import normalize from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.metrics import confusion_matrix from sklearn.model_selection import KFold from scipy.spatial.distance import cosine as Cos dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \ 'wine-quality/winequality-white.csv' data = pd.read_csv(dataset_url, sep=';') feature = normalize(data.iloc[:, :-2]) target = data.iloc[:, -1] start = time.time() nbr = KNN(n_neighbors=11, weights='distance', \ algorithm='brute', metric='manhattan') nbr.fit(feature, target) pred = nbr.predict(feature) mx = confusion_matrix(target, pred) print(mx.trace()/mx.sum()) print("===== used %s seconds =====" % (time.time()-start)) start = time.time() kf = KFold(n_splits=12) total_rate = 0 for train_index, test_index in kf.split(data): train_feature, test_feature = feature[train_index], feature[test_index] train_target, test_target = target[train_index], target[test_index] nbr = KNN(n_neighbors=11, weights='distance', \ algorithm='brute', metric='manhattan') nbr.fit(train_feature, train_target) pred = nbr.predict(test_feature) mx = confusion_matrix(test_target, pred) total_rate += mx.trace()/mx.sum() print(total_rate/12) print("===== used %s seconds =====" % (time.time()-start)) # Cos re def cosDist(a, b): return Cos(a, b) start = time.time() nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist) nbr.fit(feature, target) pred = nbr.predict(feature) mx = confusion_matrix(target, pred) print(mx.trace()/mx.sum()) print("===== used %s seconds =====" % (time.time()-start))
mit
4,068,913,304,894,074,000
30.339286
75
0.679202
false
3.128342
true
false
false
mgramsay/PlasMarkov
tweet.py
1
2137
# -*- coding: utf-8 -*- # Copyright (c) 2016 Martin Ramsay # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Handles the linking to Twitter. """ import codecs import os from time import gmtime, strftime import tweepy from secrets import C_KEY, C_SECRET, A_TOKEN, A_TOKEN_SECRET def send(text): """ Post a message to Twitter. """ auth = tweepy.OAuthHandler(C_KEY, C_SECRET) auth.set_access_token(A_TOKEN, A_TOKEN_SECRET) api = tweepy.API(auth) print 'Sending: ' + text try: api.update_status(text) except tweepy.error.TweepError as err: print err.message return err.message else: return 'Tweeted: ' + text def log(message, logfile_name): """ Update the log file. """ path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) with codecs.open(os.path.join(path, logfile_name), mode='a+', encoding='utf-8') as logfile: logtime = strftime('%d %b %Y %H:%M:%S', gmtime()) logfile.write(logtime + (' ' + message + '\n').decode('utf-8'))
mit
4,053,133,167,695,728,600
35.220339
79
0.680861
false
3.878403
false
false
false
dataforimpact/veliquest
scrapers/v1-plus-local-storage/jcdecaux-scraper.py
1
3422
import os import sys import requests import json import datetime as dt from boto.s3.connection import S3Connection, Location from boto.s3.key import Key def unsafe_getenviron(k): v = os.environ.get(k) if(v): return v else: raise Exception('environment variable %s not set' % k) JC_DECAUX_API_KEY = unsafe_getenviron('JC_DECAUX_API_KEY') AWS_SECRET_KEY = unsafe_getenviron('AWS_SECRET_KEY') AWS_ACCESS_KEY = unsafe_getenviron('AWS_ACCESS_KEY') VELIQUEST_BUCKET = unsafe_getenviron('VELIQUEST_BUCKET') # initiate S3 connection s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) dfibucket = s3conn.get_bucket(VELIQUEST_BUCKET)#, location=Location.EU) # attempts to create a bucket def getjcdecaux_data_as_json(): try: all_stations_r = requests.get('https://api.jcdecaux.com/vls/v1/stations', params={'apiKey': JC_DECAUX_API_KEY}) status = all_stations_r.status_code if (status == 200): json_data = all_stations_r.json() return status, json_data elif (status == 403): raise Exception("%s apiKey for JCDecaux is not valid" % JC_DECAUX_API_KEY) elif (status == 500): raise Exception("JCDecaux Server Error") else: raise Exception("JCDecaux Server Error") except Exception as e: raise e def parse_station(s): """Outputs a single line with (comma serpated) values of [contract_name, number, status, bike_stands, available_bike_stands, available_bikes, last_update] Note : status is 1 when "OPEN" and 0 when "CLOSED" to reduce bytes # per station """ keys = ['contract_name', 'number', 'status', 'bike_stands', 'available_bike_stands', 'available_bikes', 'last_update'] line_vals = [str(s[k]) if (k!='status') else ("1" if (s[k]=='OPEN') else "0") for k in keys] return ",".join(line_vals) def parse_stations(stations_json): lines_arr = [parse_station(s) for s in stations_json] return '\n'.join(lines_arr) def filename_from_date(dte): return dte.strftime("%Hh%Mm%S_%f.csv") def dirpath_from_date(dte): return dte.strftime("%Y/%m/%d/") def s3_key(dirpath, filename): return "/veliquest/jcdecaux/prod/v1/" + dirpath + filename def store_stations_in_s3(dirpath, filename, content): k = Key(dfibucket) k.key = s3_key(dirpath, filename) k.set_contents_from_string(content) return k.key def ensure_dir(f): d = os.path.dirname(f) if not os.path.exists(d): os.makedirs(d) def store_stations_locally(absdir, filename, content): ensure_dir(absdir) fpath = absdir + filename with open(fpath, 'w') as f: f.write(content) return fpath if not len(sys.argv)==2: print "Pass abs path of directory in sync with S3" exit() if not sys.argv[1][-1] == "/": print "arg must be a directory (does not end with /)" exit() if not sys.argv[1][0] == "/": print "arg must be abs directory (does not start with /)" exit() print "Executing Request..." status, json_data = getjcdecaux_data_as_json() if (status==200): print "Done (200)" print "Parsing stations data..." csv_lines = parse_stations(json_data) dte = dt.datetime.utcnow() dirpath, filename = dirpath_from_date(dte), filename_from_date(dte) print "Storing to S3..." s3_key = store_stations_in_s3(dirpath, filename, csv_lines) print "S3 stored in %s at %s" % (VELIQUEST_BUCKET, s3_key) print "Storing locally..." base_dir = sys.argv[1] abs_dir = base_dir + dirpath local_path = store_stations_locally(abs_dir, filename, csv_lines) print "Locally stored in %s" % local_path
mit
4,501,067,044,483,126,000
25.944882
119
0.697545
false
2.720191
false
false
false
gavinfish/leetcode-share
python/065 Valid Number.py
1
1907
''' Validate if a given string is numeric. Some examples: "0" => true " 0.1 " => true "abc" => false "1 a" => false "2e10" => true Note: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one. ''' class Solution(object): def isNumber(self, s): """ :type s: str :rtype: bool """ s = s.strip() length = len(s) index = 0 # Deal with symbol if index < length and (s[index] == '+' or s[index] == '-'): index += 1 is_normal = False is_exp = True # Deal with digits in the front while index < length and s[index].isdigit(): is_normal = True index += 1 # Deal with dot ant digits behind it if index < length and s[index] == '.': index += 1 while index < length and s[index].isdigit(): is_normal = True index += 1 # Deal with 'e' and number behind it if is_normal and index < length and (s[index] == 'e' or s[index] == 'E'): index += 1 is_exp = False if index < length and (s[index] == '+' or s[index] == '-'): index += 1 while index < length and s[index].isdigit(): index += 1 is_exp = True # Return true only deal with all the characters and the part in front of and behind 'e' are all ok return is_normal and is_exp and index == length if __name__ == "__main__": assert Solution().isNumber("3.e-23") == True assert Solution().isNumber(".2e81") == True assert Solution().isNumber("2e10") == True assert Solution().isNumber(" 0.1") == True assert Solution().isNumber("1 b") == False assert Solution().isNumber("3-2") == False assert Solution().isNumber("abc") == False
mit
-3,100,469,301,061,550,000
32.473684
132
0.528579
false
3.956432
false
false
false
grizmio/DictQueue
DictQueue/DictQueue.py
1
4887
import asyncio import time from collections import OrderedDict class KeyNotInDictException(Exception): pass class KeyVanishedException(Exception): pass class AlreadyWaitingForItException(Exception): pass class DictContainer: def __init__(self, max_size): self.__requested_keys = set() self.__container = OrderedDict() # '' => (time.time(), obj) self.__added_item = asyncio.Condition() self.__size = 0 self.__max_size = max_size async def __wait_for_put(self): with await self.__added_item: await self.__added_item.wait() return True def __get(self, key): if key in self.__container: try: x = self.__container.pop(key) self.__size -= 1 return x except KeyError: raise KeyVanishedException(key) else: raise KeyNotInDictException async def get(self, key): if key in self.__requested_keys: raise AlreadyWaitingForItException(key) self.__requested_keys.add(key) x = None while 1: try: x = self.__get(key) except KeyVanishedException: raise except KeyNotInDictException: pass if isinstance(x, dict): break with await self.__added_item: await self.__added_item.wait() if key in self.__container: try: x = self.__get(key) except KeyVanishedException: raise except KeyNotInDictException: pass else: break self.__requested_keys.remove(key) return x async def get_timeout(self, key, timeout): if key in self.__requested_keys: raise AlreadyWaitingForItException(key) self.__requested_keys.add(key) x = None timeout_total = timeout timeout_left = timeout_total timeout_end = time.time() + timeout_total while timeout_left > 0 and x is None: try: x = self.__get(key) except KeyVanishedException: raise except KeyNotInDictException: pass if isinstance(x, dict): break try: # siempre el maximo a esperar es el tiempo que queda de timeout: timeout_left await asyncio.wait_for(self.__wait_for_put(), timeout=timeout_left) except asyncio.TimeoutError: print('Timeout :-(', key) break timeout_left = timeout_end - time.time() self.__requested_keys.remove(key) return x async def put(self, m_key, item): # __size empieza en 1 if self.__size > self.__max_size: to_pop = self.__size - self.__max_size self.__size -= to_pop for k in list(self.__container)[:to_pop]: print('Quitando:', k) self.__container.pop(k) self.__container[m_key] = item self.__size += 1 with await self.__added_item: try: self.__added_item.notify_all() except Exception as EE: print('\n\n:--->>>> put()', EE, '\n\n') if __name__ == '__main__': # http://stackoverflow.com/questions/23864341/equivalent-of-asyncio-queues-with-worker-threads # http://stackoverflow.com/questions/35796421/async-def-and-coroutines-whats-the-link import random q = DictContainer(3) async def produce(): while 1: print('Produciendo') for c in '1234': await q.put('action-'+c, {'muchainfo': [random.randint(0, 999) for r in range(10)]}) await asyncio.sleep(5.1 + random.random()) async def consume1(): while 1: print('cosumiendo 1') value = None while value is None: value = await q.get_timeout('action-1', 9) print('consume1 value:', value) await asyncio.sleep(0.15) await asyncio.sleep(0.2 + random.random()) async def consume2(): while 1: print('cosumiendo 2') value = None while value is None: value = await q.get_timeout('action-2', 3) print('consume2 value:', value) await asyncio.sleep(0.15) print("Consumed2: ", value) await asyncio.sleep(0.2 + random.random()) loop = asyncio.get_event_loop() loop.create_task(produce()) time.sleep(0.2) loop.create_task(consume1()) loop.create_task(consume2()) loop.run_forever()
mit
1,505,946,286,195,720,000
28.089286
100
0.516268
false
4.298153
false
false
false
ucsd-ccbb/Oncolist
src/server/Louvain/cluster_analysis_module.py
1
30972
import pandas as pd import numpy as np import time, os, shutil, re, community import networkx as nx import matplotlib.pyplot as plt import scipy.spatial.distance as ssd import scipy.cluster.hierarchy as sch # import cluster_connect module import cluster_connect """ ------------------------------------------------------------- Author: Brin Rosenthal ([email protected]) ------------------------------------------------------------- """ from Utils import HypergeomCalculator from GO import GOLocusParser from multiprocessing import Pool from functools import partial # import the authomatic GO annotation tools NOTE: CHANGE THIS PATH!! import sys #sys.path.append('/Users/brin/Google_Drive/UCSD/cluster_code/go_annotation') #from HypergeomCalculator import * def import_TCGA_data(path_file): ''' function to import data and create network- return graph and edge list, input path to file (tsv) ''' D_df = pd.read_csv(path_file, sep='\t', names=['var1', 'var2', 'corr', 'p']) nodes = np.union1d(D_df.var1, D_df.var2) # don't need to make big network- takes a long time edge_list_w = zip(list(D_df['var1']), list(D_df['var2']), list(np.abs(D_df['corr']))) # try using absolute value of correlations return D_df, edge_list_w def find_edges_thresh(edge_list_total, edge_thresh=0, gamma=1, weight_flag='on'): ''' find edges < threshold and corresponding list of nodes find edges with weights less than a given threshold, the corresponding nodes, return edges, nodes, and graph constructed from these weighted edges and nodes NOTE: gamma and edge_thresh were set after analysis of gamma_scan (see cfncluster_gamma_scan.py), to optimize modularity and overlap fraction, while maintaining a large enough number of groups 5 < size < 500 UPDATE 1/27/2016: edge_thresh and gamma defaults set to 0 and 1, respectively--> including clusters from multiple gammas ''' if weight_flag == 'on': elarge = [(u, v, d**gamma) for (u, v, d) in edge_list_total if d > edge_thresh] #esmall=[(u,v,d) for (u,v,d) in edge_list_total if d['weight'] <=edge_thresh] # what are the corresponding nodes? nodetemp = [] [nodetemp.append(u) for (u, v, d) in elarge] [nodetemp.append(v) for (u, v, d) in elarge] else: # if no weights, only return connecting nodes elarge=[(u, v) for (u, v, d) in edge_list_total if d > edge_thresh] # what are the corresponding nodes? nodetemp = [] [nodetemp.append(u) for (u, v) in elarge] [nodetemp.append(v) for (u, v) in elarge] # how many edges in elarge? print('there are ' + str(len(elarge)) + ' edges with weight greater than ' + str(edge_thresh)) nodetemp = pd.Series(nodetemp) nodesmall = list(nodetemp.unique()) print('there are ' + str(len(nodesmall)) + ' corresponding nodes') # make the graph from nodesmall and elarge Gtemp = nx.Graph() Gtemp.add_nodes_from(nodesmall) Gtemp.add_weighted_edges_from(elarge) return elarge, nodesmall, Gtemp def run_lancichinetti_clustering(Gtemp,data_path,code_path,results_folder,algorithm='louvain', num_c_reps = 2,remove_flag=True): ''' This function calculates the clustering algorithm specified by 'algorithm'. The source code must be downloaded and installed from https://sites.google.com/site/andrealancichinetti/software. Note, the code failed out of the box. Had to change line 155 of 'wsarray.h' to: 'pair<int, double> * ww = new pair<int, double> [_size_];' See Lancichinetti's ReadMe doc for more info on how algorithms work beware: oslum algorithms are either VERY slow, or don't work at all returns partition ''' # check if Gtemp is bipartite is_G_bipartite = nx.bipartite.is_bipartite(Gtemp) if is_G_bipartite: v1_nodes,v2_nodes = nx.bipartite.sets(Gtemp) v1map = dict(zip(v1_nodes,range(len(v1_nodes)))) v2map = dict(zip(v2_nodes,range(len(v2_nodes)))) v_all_map = v1map.copy() v_all_map.update(v2map) else: v_all_map = dict(zip(Gtemp.nodes(),range(len(Gtemp.nodes())))) Gtemp_mapped = nx.relabel_nodes(Gtemp,v_all_map) edge_list_mapped = nx.to_edgelist(Gtemp_mapped) e1mapped,e2mapped,weight = zip(*edge_list_mapped) weight_list = [x['weight'] for x in weight] # pick the right algorithm if algorithm=='oslom_undirected': # note: oslum is very slow pnum=0 elif algorithm=='oslom_directed': pnum=1 elif algorithm=='infomap_undirected': pnum=2 elif algorithm=='infomap_directed': pnum=3 elif algorithm=='louvain': pnum=4 elif algorithm=='label_propagation': pnum=5 elif algorithm=='hierarchical_infomap_undirected': pnum=6 elif algorithm=='hierarchical_infomap_directed': pnum=7 elif algorithm=='modularity_optimization': pnum=8 edge_list_path = data_path[:-4] + '_edge_list.csv' edge_list_df = pd.DataFrame({'v1':e1mapped,'v2':e2mapped,'weight':weight_list}) edge_list_df.to_csv(edge_list_path,sep=' ',index=False,header=False) if remove_flag: # check if the directory already exists, delete it if it does. Otherwise the code throws an error if os.path.isdir(results_folder): shutil.rmtree(results_folder) command_line = "python " + code_path + " -n " + edge_list_path + " -p " + str(pnum) + " -f " +results_folder + " -c " + str(num_c_reps) os.system(command_line) # parse the results partition = parse_results_lancichinetti(results_folder,algorithm=algorithm) # translate back to correct ids v_all_map_r = {v: k for k, v in v_all_map.items()} # replace keys in partition partition = dict(partition) old_keys = partition.keys() for old_key in old_keys: new_key = v_all_map_r[old_key] partition[new_key] = partition.pop(old_key) partition = pd.Series(partition) return partition def parse_results_lancichinetti(results_folder,algorithm='louvain'): ''' This function parses the results from lancichinetti code (doesn't work for OSLOM algorithm yet... have to decide what to do about non-unique community membership) Returns pandas series object 'partition' ''' results_file = results_folder + '/results_consensus/tp' with open(results_file, "r") as ins: group_id_dict = dict() count = -1 for line in ins: if (algorithm=='hierarchical_infomap_undirected') or (algorithm=='hierarchical_infomap_directed'): count = count+1 # inconsistent file for this algorithm line = re.split(r' ', line.rstrip(' ')) elif (algorithm=='oslom_undirected') or (algorithm=='oslom_directed'): is_new_module = (line.find('module')>0) if is_new_module: count = count+1 else: line = re.split(r' ', line.rstrip(' ')) else: count = count+1 line = re.split(r'\t+', line.rstrip('\t')) group_id_dict[count]=line[:-1] # don't keep trailing \n # reverse the group_id_dict partition = dict() for g in group_id_dict.keys(): node_list_temp = group_id_dict[g] for n in node_list_temp: if int(n) in partition.keys(): partition[int(n)].append(g) else: partition[int(n)] = [g] partition = pd.Series(partition) return partition def results_TCGA_cluster(data_path,code_path,results_path, algorithm='louvain',edge_thresh=0,gamma=1,cluster_size_min=5, cluster_size_max=2000, write_file_name='cluster_results.csv', print_flag=True): ''' Function to process and cluster TCGA correlation files Inputs: - data_path: path to the correlation file, including file, example: '/home/ec2-user/data/LIHC/mirna_vs_rnaseq.cor' - code_path: path to location of 'select.py' function, example: '/home/ec2-user/code/clustering_programs_5_2' - results_path: path to storage of results, example: '/home/ec2-user/results' - algorithm: name of clustering algorithm to use. Can be one of: - 'oslom_undirected' - 'infomap_undirected' - 'louvain' - 'label_propagation' - 'hierarchical_infomap_undirected' - 'modularity_optimization' (see https://sites.google.com/site/andrealancichinetti/software for more details) - edge_thresh: edge weight cutoff (default= 0) - gamma: tuning parameter for weights (default = 1--> works with all algorithms) - cluster_size_min: minimum cluster size to include (default = 5) - cluster_size_max: maximum cluster size to include (default = 2000) - write_file_name: path and name to store results (example: '/home/ec2-user/results/louvain_cluster_results.csv') - print_flag: decide whether to print out progress (default = True) ''' # import the data print('importing the data...') D_df, edge_list_total = import_TCGA_data(data_path) # calculate louvain clusters print('thresholding edges...') elarge,nodesmall,Gtemp = find_edges_thresh(edge_list_total, edge_thresh = edge_thresh,gamma=gamma) print('calculating optimal community partitions using modularity maximization...') #partition = community.best_partition(Gtemp) # check if Gtemp is bipartite is_G_bipartite = nx.bipartite.is_bipartite(Gtemp) results_folder = results_path + '/results_'+algorithm+'_temp' code_select = code_path+'/select.py' partition = run_lancichinetti_clustering(Gtemp,data_path,code_select,results_folder,algorithm=algorithm,num_c_reps=5) # calculate the true value counts (flatten the list of lists first) flat_part_values = [item for sublist in partition.values for item in sublist] flat_part_VC = pd.Series(flat_part_values).value_counts() # switch partition values to tuples, so value_counts() works part_values = [tuple(x) for x in partition.values] partition = pd.Series(part_values,list(partition.index)) partition_VC = partition.value_counts() # set low co-occurence nodes to group -1 keylist = partition.keys() allnodes = [] allnodes.extend(D_df['var1']) allnodes.extend(D_df['var2']) allnodes = list(np.unique(allnodes)) setdiff_nodes = np.setdiff1d(allnodes,keylist) for s in range(len(setdiff_nodes)): partition[setdiff_nodes[s]]=[-1] # setup data for output- only save within community edges partition = dict(partition) numedges = len(D_df.var1) numnodes = len(partition) node1list, node2list, corrlist, pvallist, groupidlist = [],[],[],[],[] for i in range(numedges): # print out some progress if print_flag True if print_flag: if (i%100000)==0: print('%.2f percent written' % (i/float(numedges))) key1 = D_df.var1[i] key2 = D_df.var2[i] # check how many groups key1 and key2 belong to num_groups_1 = len(partition[key1]) num_groups_2 = len(partition[key2]) groups_both = [] groups_both.extend(partition[key1]) groups_both.extend(partition[key2]) groups_both = list(np.unique(groups_both)) # fill in lists if node 1 and node 2 are in the same group for g in groups_both: if (g in partition[key1]) and (g in partition[key2]) and (g>-1) and (flat_part_VC[g]>=cluster_size_min) and (flat_part_VC[g]<=cluster_size_max): node1list.append(key1) node2list.append(key2) corrlist.append(D_df['corr'][i]) pvallist.append(D_df['p'][i]) groupidlist.append(g) # wrap results in a dataframe D_with_groups = pd.DataFrame({'var1':node1list,'var2':node2list,'corr':corrlist,'p':pvallist,'group_id':groupidlist}) # trim the groups (connect periphery nodes to core nodes) D_trimmed = cluster_connect.trim_cluster_df(D_with_groups,num_edges_to_keep=20000) D_trimmed.index = range(len(D_trimmed)) # sort the groups D_with_groups_sorted = sort_clusters(D_trimmed,partition,is_bipartite=is_G_bipartite,print_flag=print_flag) # write results to file D_with_groups_sorted.to_csv(write_file_name,sep='\t',index=False) def local_modularity(G,node_list,weighted_tf=False): ''' Calculate the local modularity of a group of nodes. Sum of all partition Lmods = total modularity''' # is graph weighted? if weighted_tf: degree_G = G.degree(G.nodes(),weight='weight') else: degree_G = G.degree(G.nodes()) sub_G = G.subgraph(node_list) m2 = np.sum(degree_G.values()) # total number of edges in subgraph L_mod = 0 for i in range(len(node_list)): for j in range(len(node_list)): nodei = node_list[i] nodej = node_list[j] # does the edge exist? if sub_G.has_edge(nodei,nodej): edge_data = sub_G.get_edge_data(nodei,nodej) if weighted_tf: weight = edge_data['weight'] else: weight = 1 else: weight = 0 L_mod = L_mod + weight - degree_G[nodei]*degree_G[nodej]/float(m2) L_mod = L_mod/m2 # normalize it return L_mod def sort_clusters(D_with_groups,partition,is_bipartite=False,print_flag=True,plot_flag=False): # input D_with_groups and partition from results_TCGA_cluster # is the network symmetric or bipartite? --> import this from Gtemp in 'results_TCGA_cluster' # return sorted dataframe # how many groups are there? groups = D_with_groups['group_id'].unique() num_groups = len(groups) v1temp = D_with_groups['var1'] v2temp = D_with_groups['var2'] v1temp = np.unique(v1temp) v2temp = np.unique(v2temp) num_overlap = np.intersect1d(v1temp,v2temp) # sort group_ids by corr, re-order dataframe corr_sorted_total,p_sorted_total = [],[] v1total,v2total = [],[] group_total = [] group_count = 0 for focal_group in groups: group_count += 1 if print_flag: print('sorting group ' + str(group_count) + ' out of ' + str(num_groups)) c_idx = list(D_with_groups[D_with_groups['group_id']==focal_group].index) vrow = D_with_groups['var1'][c_idx] vrow = np.unique(vrow) num_nodes_r = len(vrow) vcol = D_with_groups['var2'][c_idx] vcol = np.unique(vcol) num_nodes_c = len(vcol) vtot = [] vtot.extend(vrow) vtot.extend(vcol) v_unique = np.unique(vtot) num_nodes_t = len(v_unique) v_map_tot = dict(zip(v_unique,range(len(v_unique)))) v_map_tot_r = dict(zip(range(len(v_unique)),v_unique)) v_map_row = dict(zip(vrow,range(num_nodes_r))) v_map_row_r = dict(zip(range(num_nodes_r),vrow)) v_map_col = dict(zip(vcol,range(num_nodes_c))) v_map_col_r = dict(zip(range(num_nodes_c),vcol)) # make corr_mat and p_mat symmetric if there is overlap between vrow and vcol if is_bipartite: corr_mat = np.zeros((num_nodes_r,num_nodes_c)) p_mat = np.ones((num_nodes_r,num_nodes_c)) else: corr_mat = np.zeros((num_nodes_t,num_nodes_t)) p_mat = np.ones((num_nodes_t, num_nodes_t)) for i in c_idx: v1 = D_with_groups['var1'][i] v2 = D_with_groups['var2'][i] # make it symmetric if there is overlap between vrow and vcol if is_bipartite: corr_mat[v_map_row[v1],v_map_col[v2]] = D_with_groups['corr'][i] p_mat[v_map_row[v1],v_map_col[v2]] = D_with_groups['p'][i] else: corr_mat[v_map_tot[v1],v_map_tot[v2]] = D_with_groups['corr'][i] p_mat[v_map_tot[v1],v_map_tot[v2]] = D_with_groups['p'][i] corr_mat[v_map_tot[v2],v_map_tot[v1]] = D_with_groups['corr'][i] # make it symmetric p_mat[v_map_tot[v2],v_map_tot[v1]] = D_with_groups['p'][i] # make it symmetric if (not is_bipartite) and len(v_map_tot)>1: #DRmat = ssd.squareform(ssd.pdist(np.abs(corr_mat))) DRmat = slow_dist_mat(np.abs(corr_mat)) # replaced dist mat calc because indices were wrong row_Z = sch.linkage(DRmat) row_idx = sch.leaves_list(row_Z) elif is_bipartite and len(v_map_row)>1: #DRmat = ssd.squareform(ssd.pdist(np.abs(corr_mat))) DRmat = slow_dist_mat(np.abs(corr_mat)) row_Z = sch.linkage(DRmat) row_idx = sch.leaves_list(row_Z) else: # don't sort if there is only one row row_idx=0 if (not is_bipartite) and len(v_map_tot)>1: #DCmat = ssd.squareform(ssd.pdist(np.abs(np.transpose(corr_mat)))) DCmat = slow_dist_mat(np.transpose(np.abs(corr_mat))) col_Z = sch.linkage(DCmat) col_idx = sch.leaves_list(col_Z) elif is_bipartite and len(v_map_col)>1: #DCmat = ssd.squareform(ssd.pdist(np.abs(np.transpose(corr_mat)))) DCmat = slow_dist_mat(np.transpose(np.abs(corr_mat))) col_Z = sch.linkage(DCmat) col_idx = sch.leaves_list(col_Z) else: # don't sort if there is only one column col_idx = 0 corr_shape = np.shape(corr_mat) print(corr_shape) numrows = corr_shape[0] numcols = corr_shape[1] corr_mat_sorted = corr_mat p_mat_sorted = p_mat if (numrows>1) and (numcols>1): # only need to sort if corr_mat has more than one row/col corr_mat_sorted = corr_mat_sorted[row_idx,:] corr_mat_sorted = corr_mat_sorted[:,col_idx] p_mat_sorted = p_mat_sorted[row_idx,:] p_mat_sorted = p_mat_sorted[:,col_idx] # reshape sorted corr_mat, save to new df? corr_mat_sorted_flat = np.ravel(corr_mat_sorted) p_mat_sorted_flat = np.ravel(p_mat_sorted) if plot_flag: plt.matshow(corr_mat_sorted,cmap='bwr',vmin=-1,vmax=1) # also save row/col gene ids mgrid_test = np.mgrid[0:numrows,0:numcols] mgrid_rows = mgrid_test[0] mgrid_cols = mgrid_test[1] row_flat = np.ravel(mgrid_rows) col_flat = np.ravel(mgrid_cols) # then translate to gene ids v1list = [] v2list = [] # handle symmetry if is_bipartite: if numrows>1: v1list = [v_map_row_r[row_idx[r]] for r in row_flat] else: v1list = [v_map_row_r[r] for r in row_flat] if numcols>1: v2list = [v_map_col_r[col_idx[c]] for c in col_flat] else: v2list = [v_map_col_r[c] for c in col_flat] else: v1list = [v_map_tot_r[row_idx[r]] for r in row_flat] v2list = [v_map_tot_r[col_idx[c]] for c in col_flat] # also save group ids group_list = (np.ones((1,len(v1list)))*focal_group) group_list = list(group_list[0]) corr_sorted_total.extend(corr_mat_sorted_flat) p_sorted_total.extend(p_mat_sorted_flat) v1total.extend(v1list) v2total.extend(v2list) group_total.extend(group_list) D_with_groups_sorted = pd.DataFrame({'corr':corr_sorted_total,'p':p_sorted_total, 'var1':v1total,'var2':v2total,'group_id':group_total}) return D_with_groups_sorted def slow_dist_mat(C): ''' Helper function to calculate the distance matrix (using squareform and pdist resulted in re-ordering indices) ''' dist = np.zeros((len(C),len(C))) for i in range(len(C)-1): p1 = C[i,:] for j in range(i+1,len(C)): p2 = C[j,:] dist[i,j] = ssd.cdist([p1],[p2])[0][0] dist[j,i] = dist[i,j] return dist def cal_mirna_enrichment(Gtemp, GO_ID_list, total_unique_gene, GO_Term_list, focal_node): enrichment_mirna = dict() # find neighbors of focal_node if focal_node in Gtemp.nodes(): f_neighbors = Gtemp.neighbors(focal_node) if len(f_neighbors)>20: print(focal_node + ' has ' + str(len(f_neighbors)) + ' neighbors') # annotate this list enriched_list = HypergeomCalculator.calc_enrichment(f_neighbors, GO_ID_list, total_unique_gene, GO_Term_list) GO_temp = dict() for enriched_item in enriched_list: if enriched_item['qvalue'] > 10: GO_temp[enriched_item['go_id']] = enriched_item['qvalue'] if True: print(enriched_item['name'] + ': q-value = ' + str(enriched_item['qvalue'])) # only create a key for focal node if it has some significant entries if len(GO_temp) > 0: enrichment_mirna[focal_node] = GO_temp return enrichment_mirna def save_ivanovska_clusters(data_path,edge_thresh=.5,gamma=1,qthresh=10, cluster_size_min=5, print_flag=True,plot_flag=False,write_file_name='GO_clusters_temp.csv'): ''' This is a function that implements the Ivanovska clustering method of annotating var2 terms which are highly associated with var1 terms, annotating against the gene ontology, then clustering this matrix. Saves an edge list which contains var1 terms with significant annotations, the terms they annotate to, their q-value, and the group they belong to. The edge list has been sorted so that the top annotating terms/genes appear highest in each cluster. arguments: - data_path: path to correlation edge list (example: data_path = '/Users/brin/Documents/TCGA_data/LIHC/mirna_vs_rnaseq.cor') - edge_thresh: cutoff for how highly associated var2 genes must be to each var1 (default = .5) - gamma: parameter to scale correlations (default = 1.. probably don't want to change this) - qthresh: cutoff for significance of enriched GO terms (default = 10) - cluster_size_min: minimum cluster size to save - print_flag: print some diagnostics? (default = True) - plot_flag: plot the total heatmap? (default = False) - write_file_name: where should we write the final file? (default = 'GO_clusters_temp.csv') returns: None ''' #data_path = '/Users/brin/Documents/TCGA_data/LIHC/mirna_vs_rnaseq.cor' #edge_thresh = .5 #gamma = 1 #qthresh = 10 # minimum enrichment significance to record #print_flag = True #plot_flag = False #write_file_name = 'GO_clusters_temp.csv' #cluster_size_min = 5 OV_df, edge_list = import_TCGA_data(data_path) # import the data elarge, nodesmall, Gtemp = find_edges_thresh(edge_list,edge_thresh=edge_thresh,gamma=gamma) # build the graph # import GO annotation tools (this takes a little time) NOTE: CHANGE THESE PATHS go_gene_file = '/shared/workspace/SearchEngineProject/GO/GO2all_locus.txt' gene_info_file = '/shared/workspace/SearchEngineProject/GO/Homo_sapiens.gene_info' go_term_file = '/shared/workspace/SearchEngineProject/GO/go.obo' GO_ID_list, total_unique_gene, GO_Term_list = GOLocusParser.parse(go_gene_file, gene_info_file, go_term_file) # write a function to annotate genes which correlate highly with any mirna (e.g. neighbors in the graph) #nodes_A,nodes_B = nx.bipartite.sets(Gtemp) nodes_A = list(OV_df['var1'].unique()) nodes_B = list(OV_df['var2'].unique()) test_nodes = nodes_A[-5:] func = partial(cal_mirna_enrichment, Gtemp, GO_ID_list, total_unique_gene, GO_Term_list) pool = Pool(processes=2) enrichment_list = pool.map(func, test_nodes) pool.close() pool.join() enrichment_mirna = {} for result in enrichment_list: for key in result: enrichment_mirna.update({key:result.get(key)}) if len(enrichment_mirna)>2: GO_unique = [enrichment_mirna[n].keys() for n in enrichment_mirna.keys()] # flatten the list GO_unique = [item for sublist in GO_unique for item in sublist] GO_unique = np.unique(GO_unique) print(len(GO_unique)) # make a dictionary to map from GO_unique to index, and mirna to index GO_map = dict(zip(GO_unique,range(len(GO_unique)))) GO_map_r = dict(zip(range(len(GO_unique)),GO_unique)) mirna_map = dict(zip(enrichment_mirna.keys(),range(len(enrichment_mirna.keys())))) mirna_map_r = dict(zip(range(len(enrichment_mirna.keys())),enrichment_mirna.keys())) # now make the correlation matrix: GO_mirna GO_mirna = np.zeros((len(GO_map),len(mirna_map))) # loop over mirnas for n in enrichment_mirna.keys(): mirna_idx = mirna_map[n] # loop over GO terms in each mirna for g in enrichment_mirna[n].keys(): GO_idx = GO_map[g] qtemp = enrichment_mirna[n][g] # fill in the matrix GO_mirna[GO_idx,mirna_idx] = qtemp # now try clustering using louvain- what do we get? go_mirna_for_graph = dict() qvec = [] for n in enrichment_mirna.keys(): # loop over GO terms in each mirna dict_temp = dict() for g in enrichment_mirna[n].keys(): qtemp = enrichment_mirna[n][g] qvec.append(qtemp) #qtemp = np.exp(-qtemp**2) #qtemp = round(qtemp*5) qtemp = qtemp**gamma # fill in the dict dict_temp[g]={'weight':qtemp} go_mirna_for_graph[n] = dict_temp G_go_mirna = nx.from_dict_of_dicts(go_mirna_for_graph) #partition = community.best_partition(G_go_mirna) dendo = community.generate_dendrogram(G_go_mirna) partition = community.partition_at_level(dendo, 0) partition = pd.Series(partition) partition_sort = partition.sort(axis=0,inplace=False) idx_sort = list(partition_sort.index) idx_mirna = np.array([m for m in idx_sort if (m in mirna_map.keys())]) # np.intersect1d(idx_sort,mirna_map.keys()) grp_mirna = np.array([partition_sort[m] for m in idx_sort if (m in mirna_map.keys())]) idx_GO = np.array([g for g in idx_sort if (g in GO_map.keys())]) grp_GO = np.array([partition[g] for g in idx_sort if (g in GO_map.keys())]) group_ids = list(np.unique(partition_sort)) col_idx = [] row_idx = [] corr_sorted_total, gene_list_total,GO_term_list_total,group_total = [],[],[],[] for g in group_ids: # sort individual groups by mean GO value in each row/column idx_mirna_focal = idx_mirna[grp_mirna==g] col_temp = np.array([mirna_map[i] for i in idx_mirna_focal]) mean_mirna_focal = np.mean(GO_mirna[:,col_temp],0) mean_sort = np.argsort(mean_mirna_focal) mean_sort = mean_sort[::-1] # sort descending col_temp = col_temp[mean_sort] # append to col_idx col_idx.extend(col_temp) idx_GO_focal = idx_GO[grp_GO==g] row_temp = np.array([GO_map[i] for i in idx_GO_focal]) print "break point!!!!" print idx_mirna_focal if len(row_temp)>0: # check that row_temp isn't empty mean_GO_focal = np.mean(GO_mirna[row_temp,:],1) mean_sort = np.argsort(mean_GO_focal) mean_sort = mean_sort[::-1] # sort descending row_temp = row_temp[mean_sort] # append to col_idx row_idx.extend(row_temp) # save out flattened sections of correlation matrix as clusters # only save if there are more than cluster_size_min items in cluster cluster_size = np.sum(partition==g) if cluster_size>cluster_size_min: corr_mat_focal = GO_mirna corr_mat_focal = corr_mat_focal[row_temp,:] corr_mat_focal = corr_mat_focal[:,col_temp] corr_mat_focal_flat = np.ravel(corr_mat_focal) corr_shape = np.shape(corr_mat_focal) print(corr_shape) numrows = corr_shape[0] numcols = corr_shape[1] mgrid_test = np.mgrid[0:numrows,0:numcols] mgrid_rows = mgrid_test[0] mgrid_cols = mgrid_test[1] row_flat = np.ravel(mgrid_rows) col_flat = np.ravel(mgrid_cols) # then translate to gene ids/ GO term names gene_list = [] gene_list = [mirna_map_r[col_temp[i]] for i in col_flat] GO_term_list = [GO_map_r[row_temp[i]] for i in row_flat] # also save the group list group_list = (np.ones((1,len(gene_list)))*g) group_list = list(group_list[0]) corr_sorted_total.extend(corr_mat_focal_flat) gene_list_total.extend(gene_list) GO_term_list_total.extend(GO_term_list) group_total.extend(group_list) GO_name_list_total=[GO_Term_list[x][0] for x in GO_term_list_total] D_with_groups_sorted = pd.DataFrame({'qvalue':corr_sorted_total,'gene_name':gene_list_total, 'GO_term':GO_term_list_total,'GO_name':GO_name_list_total, 'group_id':group_total}) else: # save out dummy dataframe if there are not enough enriched terms D_with_groups_sorted = pd.DataFrame({'qvalue':np.nan,'gene_name':np.nan, 'GO_term':np.nan, 'GO_name':np.nan, 'group_id':np.nan},index=[0]) # write results to file D_with_groups_sorted.to_csv(write_file_name,sep='\t',index=False) go_mirna_L = GO_mirna go_mirna_L = go_mirna_L[row_idx,:] go_mirna_L = go_mirna_L[:,col_idx] if plot_flag: plt.figure(figsize=(20,50)) plt.matshow(go_mirna_L,fignum=False,cmap='jet',aspect='auto',vmin=0,vmax=30) xtick_labels = [mirna_map_r[i] for i in col_idx] ytick_labels = [GO_map_r[i] for i in row_idx] plt.xticks(range(len(xtick_labels)),xtick_labels,rotation=90) plt.yticks(range(len(ytick_labels)),ytick_labels,fontsize=6) plt.grid('off') #plt.savefig('/Users/brin/Google_Drive/UCSD/update_16_01/LIHC_go_mirna_louvain.png',dpi=150)
mit
-3,302,082,777,373,428,000
38.912371
211
0.599348
false
3.270884
false
false
false